Rebase to qemu 3.1.0-RC2

This commit is contained in:
Danilo C. L. de Paula 2018-11-29 10:09:34 -02:00
parent ec15be19f0
commit a711492897
107 changed files with 1155 additions and 12453 deletions

2
.gitignore vendored
View File

@ -1 +1 @@
/qemu-3.0.0.tar.xz
/qemu-3.1.0.tar.xz

View File

@ -0,0 +1,128 @@
From 5be2fefbc0999ada944c36a865b154d398e16e27 Mon Sep 17 00:00:00 2001
From: Zhang Chen <zhangckid@gmail.com>
Date: Thu, 1 Nov 2018 10:12:26 +0800
Subject: migration/colo.c: Fix compilation issue when disable replication
This compilation issue will occur when user use --disable-replication
to config Qemu.
Reported-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Zhang Chen <zhangckid@gmail.com>
---
migration/colo.c | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/migration/colo.c b/migration/colo.c
index 956ac23..fcff04c 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -59,6 +59,8 @@ static bool colo_runstate_is_stopped(void)
static void secondary_vm_do_failover(void)
{
+/* COLO needs enable block-replication */
+#ifdef CONFIG_REPLICATION
int old_state;
MigrationIncomingState *mis = migration_incoming_get_current();
Error *local_err = NULL;
@@ -121,10 +123,14 @@ static void secondary_vm_do_failover(void)
if (mis->migration_incoming_co) {
qemu_coroutine_enter(mis->migration_incoming_co);
}
+#else
+ abort();
+#endif
}
static void primary_vm_do_failover(void)
{
+#ifdef CONFIG_REPLICATION
MigrationState *s = migrate_get_current();
int old_state;
Error *local_err = NULL;
@@ -165,6 +171,9 @@ static void primary_vm_do_failover(void)
/* Notify COLO thread that failover work is finished */
qemu_sem_post(&s->colo_exit_sem);
+#else
+ abort();
+#endif
}
COLOMode get_colo_mode(void)
@@ -415,11 +424,16 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
qemu_mutex_lock_iothread();
+
+#ifdef CONFIG_REPLICATION
replication_do_checkpoint_all(&local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
+#else
+ abort();
+#endif
colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err);
if (local_err) {
@@ -523,11 +537,15 @@ static void colo_process_checkpoint(MigrationState *s)
object_unref(OBJECT(bioc));
qemu_mutex_lock_iothread();
+#ifdef CONFIG_REPLICATION
replication_start_all(REPLICATION_MODE_PRIMARY, &local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
+#else
+ abort();
+#endif
vm_start();
qemu_mutex_unlock_iothread();
@@ -690,11 +708,15 @@ void *colo_process_incoming_thread(void *opaque)
object_unref(OBJECT(bioc));
qemu_mutex_lock_iothread();
+#ifdef CONFIG_REPLICATION
replication_start_all(REPLICATION_MODE_SECONDARY, &local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
+#else
+ abort();
+#endif
vm_start();
trace_colo_vm_state_change("stop", "run");
qemu_mutex_unlock_iothread();
@@ -785,18 +807,22 @@ void *colo_process_incoming_thread(void *opaque)
goto out;
}
+#ifdef CONFIG_REPLICATION
replication_get_error_all(&local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
+
/* discard colo disk buffer */
replication_do_checkpoint_all(&local_err);
if (local_err) {
qemu_mutex_unlock_iothread();
goto out;
}
-
+#else
+ abort();
+#endif
/* Notify all filters of all NIC to do checkpoint */
colo_notify_filters_event(COLO_EVENT_CHECKPOINT, &local_err);
--
1.8.3.1

View File

@ -1,41 +1,39 @@
From a1f1313c0c96b2a159647aabc6a4b0f3a3f4424a Mon Sep 17 00:00:00 2001
From f0cd0ed26f3a3ae0610fad93c9dde26b54910abb Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 11:17:08 +0100
Date: Fri, 12 Oct 2018 07:31:11 +0200
Subject: Initial redhat build
This patch introduces redhat build structure in redhat subdirectory. In addition,
several issues are fixed in QEMU tree:
- Change of app name for sasl_server_init in VNC code from qemu to qemu-kvm
- As we use qemu-kvm as name in all places, this is updated to be consistent
- Man page renamed from qemu to qemu-kvm
- man page is installed using make install so we have to fix it in qemu tree
- Use "/share/qemu-kvm" as SHARE_SUFFIX
- We reconfigured our share to qemu-kvm to be consistent with used name
- Change of app name for sasl_server_init in VNC code from qemu to qemu-kvm
- As we use qemu-kvm as name in all places, this is updated to be consistent
- Man page renamed from qemu to qemu-kvm
- man page is installed using make install so we have to fix it in qemu tree
- Use "/share/qemu-kvm" as SHARE_SUFFIX
- We reconfigured our share to qemu-kvm to be consistent with used name
This commit is synchronized with qemu-kvm-2.12.0-42.el8 build.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
This rebase includes changes up to qemu-kvm-2.12.0-42.el8
---
Makefile | 3 +-
block/Makefile.objs | 2 +-
block/vxhs.c | 119 ++-
configure | 40 +-
configure | 33 +-
os-posix.c | 2 +-
redhat/Makefile | 82 ++
redhat/Makefile.common | 49 ++
redhat/qemu-kvm.spec.template | 1721 +++++++++++++++++++++++++++++++++++++++++
redhat/qemu-kvm.spec.template | 1723 +++++++++++++++++++++++++++++++++++++++++
ui/vnc.c | 2 +-
9 files changed, 1972 insertions(+), 48 deletions(-)
9 files changed, 1972 insertions(+), 43 deletions(-)
create mode 100644 redhat/Makefile
create mode 100644 redhat/Makefile.common
create mode 100644 redhat/qemu-kvm.spec.template
diff --git a/Makefile b/Makefile
index 2da686b..eb4c57a 100644
index f294718..152821a 100644
--- a/Makefile
+++ b/Makefile
@@ -501,6 +501,7 @@ CAP_CFLAGS += -DCAPSTONE_HAS_ARM
@@ -503,6 +503,7 @@ CAP_CFLAGS += -DCAPSTONE_HAS_ARM
CAP_CFLAGS += -DCAPSTONE_HAS_ARM64
CAP_CFLAGS += -DCAPSTONE_HAS_POWERPC
CAP_CFLAGS += -DCAPSTONE_HAS_X86
@ -43,7 +41,7 @@ index 2da686b..eb4c57a 100644
subdir-capstone: .git-submodule-status
$(call quiet-command,$(MAKE) -C $(SRC_PATH)/capstone CAPSTONE_SHARED=no BUILDDIR="$(BUILD_DIR)/capstone" CC="$(CC)" AR="$(AR)" LD="$(LD)" RANLIB="$(RANLIB)" CFLAGS="$(CAP_CFLAGS)" $(SUBDIR_MAKEFLAGS) $(BUILD_DIR)/capstone/$(LIBCAPSTONE))
@@ -819,7 +820,7 @@ install-doc: $(DOCS)
@@ -830,7 +831,7 @@ install-doc: $(DOCS)
$(INSTALL_DATA) docs/interop/qemu-qmp-ref.txt "$(DESTDIR)$(qemu_docdir)"
ifdef CONFIG_POSIX
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man1"
@ -53,10 +51,10 @@ index 2da686b..eb4c57a 100644
$(INSTALL_DATA) docs/interop/qemu-qmp-ref.7 "$(DESTDIR)$(mandir)/man7"
$(INSTALL_DATA) docs/qemu-block-drivers.7 "$(DESTDIR)$(mandir)/man7"
diff --git a/block/Makefile.objs b/block/Makefile.objs
index c8337bf..cd1e309 100644
index 46d585c..a244100 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -21,7 +21,7 @@ block-obj-$(CONFIG_LIBNFS) += nfs.o
@@ -30,7 +30,7 @@ block-obj-$(CONFIG_LIBNFS) += nfs.o
block-obj-$(CONFIG_CURL) += curl.o
block-obj-$(CONFIG_RBD) += rbd.o
block-obj-$(CONFIG_GLUSTERFS) += gluster.o
@ -257,26 +255,10 @@ index 0cb0a00..9164b3e 100644
trace_vxhs_get_vdisk_stat_err(s->vdisk_guid, ret, errno);
return -EIO;
diff --git a/configure b/configure
index 2a7796e..0a27137 100755
index 0a3c6a7..98b05c5 100755
--- a/configure
+++ b/configure
@@ -2216,13 +2216,10 @@ fi
##########################################
# libseccomp check
+libseccomp_minver="2.2.0"
if test "$seccomp" != "no" ; then
case "$cpu" in
- i386|x86_64)
- libseccomp_minver="2.1.0"
- ;;
- mips)
- libseccomp_minver="2.2.0"
+ i386|x86_64|mips)
;;
arm|aarch64)
libseccomp_minver="2.2.3"
@@ -3460,7 +3457,7 @@ fi
@@ -3459,7 +3459,7 @@ fi
glib_req_ver=2.40
glib_modules=gthread-2.0
@ -285,7 +267,7 @@ index 2a7796e..0a27137 100755
glib_modules="$glib_modules gmodule-export-2.0"
fi
@@ -5435,33 +5432,6 @@ if compile_prog "" "" ; then
@@ -5494,33 +5494,6 @@ if compile_prog "" "" ; then
fi
##########################################
@ -319,7 +301,7 @@ index 2a7796e..0a27137 100755
# check for _Static_assert()
have_static_assert=no
@@ -6759,8 +6729,8 @@ if test "$pthread_setname_np" = "yes" ; then
@@ -6854,8 +6827,8 @@ if test "$pthread_setname_np" = "yes" ; then
fi
if test "$vxhs" = "yes" ; then
@ -329,9 +311,9 @@ index 2a7796e..0a27137 100755
+ echo "VXHS_LIBS= -lssl" >> $config_host_mak
fi
if test "$tcg_interpreter" = "yes"; then
if test "$libpmem" = "yes" ; then
diff --git a/os-posix.c b/os-posix.c
index 9ce6f74..c4cfd0d 100644
index 4bd80e4..ca13206 100644
--- a/os-posix.c
+++ b/os-posix.c
@@ -82,7 +82,7 @@ void os_setup_signal_handling(void)
@ -344,18 +326,18 @@ index 9ce6f74..c4cfd0d 100644
char *os_find_datadir(void)
{
diff --git a/ui/vnc.c b/ui/vnc.c
index 3596932..050c421 100644
index 0c1b477..d7903a7 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -4054,7 +4054,7 @@ void vnc_display_open(const char *id, Error **errp)
trace_vnc_auth_init(vd, 1, vd->ws_auth, vd->ws_subauth);
@@ -3962,7 +3962,7 @@ void vnc_display_open(const char *id, Error **errp)
#ifdef CONFIG_VNC_SASL
- if ((saslErr = sasl_server_init(NULL, "qemu")) != SASL_OK) {
+ if ((saslErr = sasl_server_init(NULL, "qemu-kvm")) != SASL_OK) {
error_setg(errp, "Failed to initialize SASL auth: %s",
sasl_errstring(saslErr, NULL, NULL));
goto fail;
if (sasl) {
- int saslErr = sasl_server_init(NULL, "qemu");
+ int saslErr = sasl_server_init(NULL, "qemu-kvm");
if (saslErr != SASL_OK) {
error_setg(errp, "Failed to initialize SASL auth: %s",
--
1.8.3.1

View File

@ -1,88 +1,68 @@
From b4a5b95153ca86eba72ff4a368a24ac31b77bbe5 Mon Sep 17 00:00:00 2001
From 38eba79aaa865ffa3e85bfa56e644e0846731744 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Mon, 11 Jan 2016 11:53:33 +0100
Subject: Enable/disable devices for RHEL 7
Subject: Enable/disable devices for RHEL
This commit adds all changes related to changes in supported devices
up to qemu-kvm-2.12.0-42.el8.
This commit adds all changes related to changes in supported devices.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Rebase notes (3.0.0):
- Added CONFIG_SCSI
- Enabled SMMUV3
- Using CONFIG_VIRTIO_CRYPTO to remove crypto
- Added CONFIG_VIRTIO_MMIO for aarch64
- Disabled arvm7v.c compile
- Introduced virtio.mak
- Disabled make check
- Removed test changes (moved to next patch)
Merged patches (3.0.0):
- a2721f6 Re-enable disabled Hyper-V enlightenments
- c670fa1 Disable aarch64 devices reappeared after 2.12 rebase
- 3ebdb95 Disable split-irq device
- d68f80c Disable AT24Cx i2c eeprom
- 3f953e9 Disable CAN bus devices
- 284c393 Disable new superio devices
- 747643c Disable new pvrdma device
- 0d4f38c s390x: Re-enable CONFIG_TERMINAL3270
- 0f725e9 AArch64: Enable CONFIG_FW_CFG_DMA for aarch64
- 67c5a8c Disable ivshmem
Rebase notes (qemu 3.1.0)
- spapr_rng disabled in default_config
- new hyperv.mak in default configs
- Move changes from x86_64-softmmu.mak to i386-softmmu.mak
- Added CONFIG_VIRTIO_MMIO to aarch64-softmmu.mak
---
default-configs/aarch64-softmmu.mak | 37 ++++++++++++++++++++++++++++--------
default-configs/pci.mak | 38 ++++++++++++++++++-------------------
default-configs/ppc64-softmmu.mak | 25 ++++++++++++++++++------
default-configs/aarch64-softmmu.mak | 40 +++++++++++++++++++++++++++----------
default-configs/hyperv.mak | 2 +-
default-configs/i386-softmmu.mak | 26 ++++++++++++------------
default-configs/pci.mak | 38 +++++++++++++++++------------------
default-configs/ppc64-softmmu.mak | 28 +++++++++++++++++++-------
default-configs/s390x-softmmu.mak | 5 +++--
default-configs/sound.mak | 8 ++++----
default-configs/usb.mak | 14 +++++++-------
default-configs/usb.mak | 14 ++++++-------
default-configs/virtio.mak | 5 ++---
default-configs/x86_64-softmmu.mak | 28 +++++++++++++--------------
hw/acpi/ich9.c | 4 ++--
hw/arm/Makefile.objs | 2 +-
hw/block/fdc.c | 1 +
hw/char/serial-pci.c | 4 ++++
hw/core/Makefile.objs | 9 +++++----
hw/display/cirrus_vga.c | 2 ++
hw/core/Makefile.objs | 10 ++++++----
hw/display/cirrus_vga_isa.c | 2 ++
hw/i386/pc.c | 2 ++
hw/ide/piix.c | 5 ++++-
hw/ide/via.c | 2 ++
hw/input/pckbd.c | 2 ++
hw/isa/Makefile.objs | 2 +-
hw/misc/Makefile.objs | 2 +-
hw/misc/ivshmem.c | 11 +++++++++++
hw/misc/ivshmem.c | 11 ++++++++++
hw/net/e1000.c | 2 ++
hw/nvram/Makefile.objs | 2 +-
hw/pci-host/piix.c | 4 ++++
hw/ppc/Makefile.objs | 2 +-
hw/ppc/spapr.c | 3 ++-
hw/ppc/spapr_cpu_core.c | 2 ++
hw/rdma/Makefile.objs | 3 ++-
hw/s390x/virtio-ccw.c | 8 ++++++++
hw/usb/ccid-card-emulated.c | 2 ++
hw/vfio/Makefile.objs | 1 -
hw/vfio/Makefile.objs | 3 ---
hw/vfio/pci-quirks.c | 5 +++++
hw/virtio/virtio-pci.c | 8 ++++----
qemu-options.hx | 5 -----
redhat/qemu-kvm.spec.template | 2 +-
stubs/Makefile.objs | 1 +
stubs/ide-isa.c | 13 +++++++++++++
stubs/ide-isa.c | 13 ++++++++++++
target/arm/cpu.c | 4 +++-
target/i386/cpu.c | 35 ++++++++++++++++++++++++++--------
target/ppc/cpu-models.c | 17 ++++++++++++++++-
target/i386/cpu.c | 35 ++++++++++++++++++++++++--------
target/ppc/cpu-models.c | 12 +++++++++++
target/s390x/cpu_models.c | 3 +++
target/s390x/kvm.c | 8 ++++++++
vl.c | 2 +-
43 files changed, 241 insertions(+), 99 deletions(-)
40 files changed, 229 insertions(+), 100 deletions(-)
create mode 100644 stubs/ide-isa.c
diff --git a/default-configs/aarch64-softmmu.mak b/default-configs/aarch64-softmmu.mak
index 6f790f0..3f27540 100644
index 4ea9add..221e266 100644
--- a/default-configs/aarch64-softmmu.mak
+++ b/default-configs/aarch64-softmmu.mak
@@ -1,11 +1,32 @@
@@ -1,12 +1,32 @@
# Default configuration for aarch64-softmmu
-
-# We support all the 32 bit boards so need all their config
-include arm-softmmu.mak
-
@ -91,12 +71,14 @@ index 6f790f0..3f27540 100644
-CONFIG_DPCD=y
-CONFIG_XLNX_ZYNQMP=y
-CONFIG_XLNX_ZYNQMP_ARM=y
CONFIG_ARM_SMMUV3=y
-CONFIG_XLNX_VERSAL=y
+# CONFIG_AUX=y
+# CONFIG_DDC=y
+# CONFIG_DPCD=y
+# CONFIG_XLNX_ZYNQMP=y
+# CONFIG_XLNX_ZYNQMP_ARM=y
+# CONFIG_XLNX_VERSAL=y
CONFIG_ARM_SMMUV3=y
+CONFIG_PCI=y
+CONFIG_PCI_TESTDEV=y
+CONFIG_VIRTIO_PCI=y
@ -121,8 +103,71 @@ index 6f790f0..3f27540 100644
+CONFIG_USB=y
+CONFIG_I2C=y
+CONFIG_FW_CFG_DMA=y
diff --git a/default-configs/hyperv.mak b/default-configs/hyperv.mak
index 5d0d9fd..fce5d91 100644
--- a/default-configs/hyperv.mak
+++ b/default-configs/hyperv.mak
@@ -1,2 +1,2 @@
CONFIG_HYPERV=$(CONFIG_KVM)
-CONFIG_HYPERV_TESTDEV=y
+#CONFIG_HYPERV_TESTDEV=y
diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak
index 64c998c..faea212 100644
--- a/default-configs/i386-softmmu.mak
+++ b/default-configs/i386-softmmu.mak
@@ -5,20 +5,20 @@ include sound.mak
include usb.mak
include hyperv.mak
CONFIG_QXL=$(CONFIG_SPICE)
-CONFIG_VGA_ISA=y
+#CONFIG_VGA_ISA=y
CONFIG_VGA_CIRRUS=y
-CONFIG_VMWARE_VGA=y
-CONFIG_VMXNET3_PCI=y
+#CONFIG_VMWARE_VGA=y
+#CONFIG_VMXNET3_PCI=y
CONFIG_VIRTIO_VGA=y
CONFIG_VMMOUSE=y
CONFIG_IPMI=y
-CONFIG_IPMI_LOCAL=y
-CONFIG_IPMI_EXTERN=y
-CONFIG_ISA_IPMI_KCS=y
-CONFIG_ISA_IPMI_BT=y
+#CONFIG_IPMI_LOCAL=y
+#CONFIG_IPMI_EXTERN=y
+#CONFIG_ISA_IPMI_KCS=y
+#CONFIG_ISA_IPMI_BT=y
CONFIG_SERIAL=y
CONFIG_SERIAL_ISA=y
-CONFIG_PARALLEL=y
+#CONFIG_PARALLEL=y
CONFIG_I8254=y
CONFIG_PCSPK=y
CONFIG_PCKBD=y
@@ -30,11 +30,11 @@ CONFIG_ACPI_MEMORY_HOTPLUG=y
CONFIG_ACPI_CPU_HOTPLUG=y
CONFIG_APM=y
CONFIG_I8257=y
-CONFIG_IDE_ISA=y
+#CONFIG_IDE_ISA=y
CONFIG_IDE_PIIX=y
-CONFIG_NE2000_ISA=y
-CONFIG_HPET=y
-CONFIG_APPLESMC=y
+#CONFIG_NE2000_ISA=y
+#CONFIG_HPET=y
+#CONFIG_APPLESMC=y
CONFIG_I8259=y
CONFIG_PFLASH_CFI01=y
CONFIG_TPM_TIS=$(CONFIG_TPM)
@@ -66,4 +66,4 @@ CONFIG_FW_CFG_DMA=y
CONFIG_I2C=y
CONFIG_SEV=$(CONFIG_KVM)
CONFIG_VTD=y
-CONFIG_AMD_IOMMU=y
+#CONFIG_AMD_IOMMU=y
diff --git a/default-configs/pci.mak b/default-configs/pci.mak
index de53d20..70e40ad 100644
index 6c7be12..292b3f2 100644
--- a/default-configs/pci.mak
+++ b/default-configs/pci.mak
@@ -4,22 +4,22 @@ CONFIG_ISA_BUS=y
@ -158,7 +203,7 @@ index de53d20..70e40ad 100644
CONFIG_RTL8139_PCI=y
CONFIG_E1000_PCI=y
CONFIG_E1000E_PCI=y
@@ -27,22 +27,22 @@ CONFIG_IDE_CORE=y
@@ -27,23 +27,23 @@ CONFIG_IDE_CORE=y
CONFIG_IDE_QDEV=y
CONFIG_IDE_PCI=y
CONFIG_AHCI=y
@ -186,19 +231,21 @@ index de53d20..70e40ad 100644
CONFIG_EDU=y
CONFIG_VGA=y
CONFIG_VGA_PCI=y
CONFIG_BOCHS_DISPLAY=y
-CONFIG_IVSHMEM_DEVICE=$(CONFIG_IVSHMEM)
-CONFIG_ROCKER=y
+#CONFIG_IVSHMEM_DEVICE=$(CONFIG_IVSHMEM)
+#CONFIG_ROCKER=y
diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak
index b94af6c..30ca76d 100644
index aec2855..a492986 100644
--- a/default-configs/ppc64-softmmu.mak
+++ b/default-configs/ppc64-softmmu.mak
@@ -1,14 +1,27 @@
@@ -1,14 +1,28 @@
# Default configuration for ppc64-softmmu
-# Include all 32-bit boards
-include ppc-softmmu.mak
+include sound.mak
+include usb.mak
+include virtio.mak
+
@ -228,11 +275,17 @@ index b94af6c..30ca76d 100644
# For pSeries
CONFIG_PSERIES=y
@@ -18,4 +32,4 @@ CONFIG_XICS_SPAPR=$(CONFIG_PSERIES)
CONFIG_XICS_KVM=$(call land,$(CONFIG_PSERIES),$(CONFIG_KVM))
CONFIG_MEM_DEVICE=y
CONFIG_DIMM=y
-CONFIG_SPAPR_RNG=y
+#CONFIG_SPAPR_RNG=y
diff --git a/default-configs/s390x-softmmu.mak b/default-configs/s390x-softmmu.mak
index d6b67d5..8b2db3e 100644
index 5eef375..49a59fc 100644
--- a/default-configs/s390x-softmmu.mak
+++ b/default-configs/s390x-softmmu.mak
@@ -1,9 +1,10 @@
@@ -1,10 +1,11 @@
CONFIG_PCI=y
-CONFIG_VIRTIO_PCI=$(CONFIG_PCI)
+#CONFIG_VIRTIO_PCI=$(CONFIG_PCI)
@ -245,6 +298,7 @@ index d6b67d5..8b2db3e 100644
+# Disabled for Red Hat Enterprise Linux:
+# CONFIG_VFIO_CCW=$(CONFIG_LINUX)
CONFIG_WDT_DIAG288=y
CONFIG_VFIO_AP=$(CONFIG_LINUX)
diff --git a/default-configs/sound.mak b/default-configs/sound.mak
index 4f22c34..1bead9b 100644
--- a/default-configs/sound.mak
@ -298,69 +352,6 @@ index 1304849..6330e6b 100644
CONFIG_VIRTIO_GPU=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_NET=y
diff --git a/default-configs/x86_64-softmmu.mak b/default-configs/x86_64-softmmu.mak
index 0390b43..613fc50 100644
--- a/default-configs/x86_64-softmmu.mak
+++ b/default-configs/x86_64-softmmu.mak
@@ -4,20 +4,20 @@ include pci.mak
include sound.mak
include usb.mak
CONFIG_QXL=$(CONFIG_SPICE)
-CONFIG_VGA_ISA=y
+#CONFIG_VGA_ISA=y
CONFIG_VGA_CIRRUS=y
-CONFIG_VMWARE_VGA=y
-CONFIG_VMXNET3_PCI=y
+#CONFIG_VMWARE_VGA=y
+#CONFIG_VMXNET3_PCI=y
CONFIG_VIRTIO_VGA=y
CONFIG_VMMOUSE=y
CONFIG_IPMI=y
-CONFIG_IPMI_LOCAL=y
-CONFIG_IPMI_EXTERN=y
-CONFIG_ISA_IPMI_KCS=y
-CONFIG_ISA_IPMI_BT=y
+#CONFIG_IPMI_LOCAL=y
+#CONFIG_IPMI_EXTERN=y
+#CONFIG_ISA_IPMI_KCS=y
+#CONFIG_ISA_IPMI_BT=y
CONFIG_SERIAL=y
CONFIG_SERIAL_ISA=y
-CONFIG_PARALLEL=y
+#CONFIG_PARALLEL=y
CONFIG_I8254=y
CONFIG_PCSPK=y
CONFIG_PCKBD=y
@@ -29,11 +29,11 @@ CONFIG_ACPI_MEMORY_HOTPLUG=y
CONFIG_ACPI_CPU_HOTPLUG=y
CONFIG_APM=y
CONFIG_I8257=y
-CONFIG_IDE_ISA=y
+#CONFIG_IDE_ISA=y
CONFIG_IDE_PIIX=y
-CONFIG_NE2000_ISA=y
-CONFIG_HPET=y
-CONFIG_APPLESMC=y
+#CONFIG_NE2000_ISA=y
+#CONFIG_HPET=y
+#CONFIG_APPLESMC=y
CONFIG_I8259=y
CONFIG_PFLASH_CFI01=y
CONFIG_TPM_TIS=$(CONFIG_TPM)
@@ -58,11 +58,11 @@ CONFIG_XIO3130=y
CONFIG_IOH3420=y
CONFIG_I82801B11=y
CONFIG_SMBIOS=y
-CONFIG_HYPERV_TESTDEV=$(CONFIG_KVM)
+#CONFIG_HYPERV_TESTDEV=$(CONFIG_KVM)
CONFIG_PXB=y
CONFIG_ACPI_VMGENID=y
CONFIG_FW_CFG_DMA=y
CONFIG_I2C=y
CONFIG_SEV=$(CONFIG_KVM)
CONFIG_VTD=y
-CONFIG_AMD_IOMMU=y
+#CONFIG_AMD_IOMMU=y
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index c5d8646..a4e87b8 100644
--- a/hw/acpi/ich9.c
@ -377,7 +368,7 @@ index c5d8646..a4e87b8 100644
object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index d51fcec..52ec91b 100644
index 50c7b4a..149848e 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -16,7 +16,7 @@ obj-$(CONFIG_STRONGARM) += collie.o
@ -390,7 +381,7 @@ index d51fcec..52ec91b 100644
obj-$(CONFIG_PXA2XX) += pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
obj-$(CONFIG_DIGIC) += digic.o
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index 2e9c1e1..f284df7 100644
index 6f19f12..56b7aeb 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -599,6 +599,7 @@ static void floppy_drive_class_init(ObjectClass *klass, void *data)
@ -424,10 +415,10 @@ index cb0d04c..d426982 100644
static const TypeInfo serial_pci_info = {
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index eb88ca9..e967fb2 100644
index a799c83..1c7ba0b 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -16,10 +16,11 @@ common-obj-$(CONFIG_SOFTMMU) += machine.o
@@ -16,9 +16,11 @@ common-obj-$(CONFIG_SOFTMMU) += machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
common-obj-$(CONFIG_FITLOADER) += loader-fit.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
@ -435,19 +426,19 @@ index eb88ca9..e967fb2 100644
-common-obj-$(CONFIG_SOFTMMU) += or-irq.o
-common-obj-$(CONFIG_SOFTMMU) += split-irq.o
+# Disabled in Red Hat Enterprise Linux
+# common-obj-$(CONFIG_SOFTMMU) += register.o
+# obj-$(CONFIG_SOFTMMU) += generic-loader.o
+# common-obj-$(CONFIG_SOFTMMU) += or-irq.o
+#common-obj-$(CONFIG_SOFTMMU) += register.o
+#obj-$(CONFIG_SOFTMMU) += generic-loader.o
+#common-obj-$(CONFIG_SOFTMMU) += or-irq.o
+#common-obj-$(CONFIG_SOFTMMU) += split-irq.o
common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o
-obj-$(CONFIG_SOFTMMU) += generic-loader.o
obj-$(CONFIG_SOFTMMU) += null-machine.o
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 7583b18..9fd5665 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -3075,6 +3075,8 @@ static void isa_cirrus_vga_class_init(ObjectClass *klass, void *data)
-common-obj-$(CONFIG_SOFTMMU) += generic-loader.o
+#common-obj-$(CONFIG_SOFTMMU) += generic-loader.o
common-obj-$(CONFIG_SOFTMMU) += null-machine.o
diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c
index fa10b74..1cb607d 100644
--- a/hw/display/cirrus_vga_isa.c
+++ b/hw/display/cirrus_vga_isa.c
@@ -81,6 +81,8 @@ static void isa_cirrus_vga_class_init(ObjectClass *klass, void *data)
dc->realize = isa_cirrus_vga_realizefn;
dc->props = isa_cirrus_vga_properties;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
@ -457,10 +448,10 @@ index 7583b18..9fd5665 100644
static const TypeInfo isa_cirrus_vga_info = {
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 83a4444..11c287e 100644
index f095725..567439e 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1528,7 +1528,9 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport)
@@ -1533,7 +1533,9 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport)
ISADevice *i8042, *port92, *vmmouse;
serial_hds_isa_init(isa_bus, 0, MAX_ISA_SERIAL_PORTS);
@ -520,18 +511,18 @@ index 07c8801..c27a0f8 100644
static const TypeInfo i8042_info = {
diff --git a/hw/isa/Makefile.objs b/hw/isa/Makefile.objs
index 83e06f6..7de4f44 100644
index 9e106df..0828964 100644
--- a/hw/isa/Makefile.objs
+++ b/hw/isa/Makefile.objs
@@ -1,5 +1,5 @@
common-obj-$(CONFIG_ISA_BUS) += isa-bus.o
-common-obj-$(CONFIG_ISA_BUS) += isa-superio.o smc37c669-superio.o
+#common-obj-$(CONFIG_ISA_BUS) += isa-superio.o smc37c669-superio.o
-common-obj-$(CONFIG_ISA_BUS) += isa-superio.o
+#common-obj-$(CONFIG_ISA_BUS) += isa-superio.o
common-obj-$(CONFIG_APM) += apm.o
common-obj-$(CONFIG_I82378) += i82378.o
common-obj-$(CONFIG_PC87312) += pc87312.o
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 9350900..9c2c404 100644
index 680350b..ed543a6 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -9,7 +9,7 @@ common-obj-$(CONFIG_PCI_TESTDEV) += pci-testdev.o
@ -544,7 +535,7 @@ index 9350900..9c2c404 100644
# ARM devices
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 6febbab..0786fb9 100644
index ecfd10a..8059563 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -893,6 +893,13 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
@ -561,7 +552,7 @@ index 6febbab..0786fb9 100644
pci_conf = dev->config;
pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
@@ -1183,6 +1190,8 @@ static void ivshmem_doorbell_class_init(ObjectClass *klass, void *data)
@@ -1179,6 +1186,8 @@ static void ivshmem_doorbell_class_init(ObjectClass *klass, void *data)
k->realize = ivshmem_doorbell_realize;
dc->props = ivshmem_doorbell_properties;
dc->vmsd = &ivshmem_doorbell_vmsd;
@ -570,7 +561,7 @@ index 6febbab..0786fb9 100644
}
static const TypeInfo ivshmem_doorbell_info = {
@@ -1352,6 +1361,8 @@ static void ivshmem_class_init(ObjectClass *klass, void *data)
@@ -1349,6 +1358,8 @@ static void ivshmem_class_init(ObjectClass *klass, void *data)
dc->desc = "Inter-VM shared memory (legacy)";
dc->props = ivshmem_properties;
dc->vmsd = &ivshmem_vmsd;
@ -580,10 +571,10 @@ index 6febbab..0786fb9 100644
static const TypeInfo ivshmem_info = {
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 13a9494..742cd0a 100644
index 5e144cb..2e07880 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -1768,6 +1768,7 @@ static const E1000Info e1000_devices[] = {
@@ -1778,6 +1778,7 @@ static const E1000Info e1000_devices[] = {
.revision = 0x03,
.phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
},
@ -591,7 +582,7 @@ index 13a9494..742cd0a 100644
{
.name = "e1000-82544gc",
.device_id = E1000_DEV_ID_82544GC_COPPER,
@@ -1780,6 +1781,7 @@ static const E1000Info e1000_devices[] = {
@@ -1790,6 +1791,7 @@ static const E1000Info e1000_devices[] = {
.revision = 0x03,
.phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
},
@ -599,23 +590,11 @@ index 13a9494..742cd0a 100644
};
static void e1000_register_types(void)
diff --git a/hw/nvram/Makefile.objs b/hw/nvram/Makefile.objs
index a912d25..cbc8bba 100644
--- a/hw/nvram/Makefile.objs
+++ b/hw/nvram/Makefile.objs
@@ -1,6 +1,6 @@
common-obj-$(CONFIG_DS1225Y) += ds1225y.o
common-obj-y += eeprom93xx.o
-common-obj-$(CONFIG_I2C) += eeprom_at24c.o
+#common-obj-$(CONFIG_I2C) += eeprom_at24c.o
common-obj-y += fw_cfg.o
common-obj-y += chrp_nvram.o
common-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o
diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c
index 0e60834..3ce4b14 100644
index d9c70f7..f294fbc 100644
--- a/hw/pci-host/piix.c
+++ b/hw/pci-host/piix.c
@@ -787,6 +787,7 @@ static const TypeInfo i440fx_info = {
@@ -801,6 +801,7 @@ static const TypeInfo i440fx_info = {
},
};
@ -623,7 +602,7 @@ index 0e60834..3ce4b14 100644
/* IGD Passthrough Host Bridge. */
typedef struct {
uint8_t offset;
@@ -870,6 +871,7 @@ static const TypeInfo igd_passthrough_i440fx_info = {
@@ -884,6 +885,7 @@ static const TypeInfo igd_passthrough_i440fx_info = {
.instance_size = sizeof(PCII440FXState),
.class_init = igd_passthrough_i440fx_class_init,
};
@ -631,7 +610,7 @@ index 0e60834..3ce4b14 100644
static const char *i440fx_pcihost_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
@@ -915,7 +917,9 @@ static const TypeInfo i440fx_pcihost_info = {
@@ -929,7 +931,9 @@ static const TypeInfo i440fx_pcihost_info = {
static void i440fx_register_types(void)
{
type_register_static(&i440fx_info);
@ -641,45 +620,11 @@ index 0e60834..3ce4b14 100644
type_register_static(&piix3_pci_type_info);
type_register_static(&piix3_info);
type_register_static(&piix3_xen_info);
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index bcab632..70e8780 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -3,7 +3,7 @@ obj-y += ppc.o ppc_booke.o fdt.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_caps.o spapr_vio.o spapr_events.o
obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
-obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o spapr_rng.o
+obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o
obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o
# IBM PowerNV
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 421b2dd..2f8c304 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1303,6 +1303,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
/* /vdevice */
spapr_dt_vdevice(spapr->vio_bus, fdt);
+#if 0 /* Disabled in Red Hat Enterprise Linux */
if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
ret = spapr_rng_populate_dt(fdt);
if (ret < 0) {
@@ -1310,7 +1311,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
exit(1);
}
}
-
+#endif
QLIST_FOREACH(phb, &spapr->phbs, list) {
ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
if (ret < 0) {
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 993759d..fb29eec 100644
index 2398ce6..63a7bb6 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -378,10 +378,12 @@ static const TypeInfo spapr_cpu_core_type_infos[] = {
@@ -382,10 +382,12 @@ static const TypeInfo spapr_cpu_core_type_infos[] = {
.instance_size = sizeof(sPAPRCPUCore),
.class_size = sizeof(sPAPRCPUCoreClass),
},
@ -693,70 +638,22 @@ index 993759d..fb29eec 100644
DEFINE_SPAPR_CPU_CORE_TYPE("power7+_v2.1"),
DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"),
diff --git a/hw/rdma/Makefile.objs b/hw/rdma/Makefile.objs
index 3504c39..02ca2a9 100644
index bd36cbf..e87e7e5 100644
--- a/hw/rdma/Makefile.objs
+++ b/hw/rdma/Makefile.objs
@@ -1,5 +1,6 @@
ifeq ($(CONFIG_RDMA),y)
ifeq ($(CONFIG_PVRDMA),y)
obj-$(CONFIG_PCI) += rdma_utils.o rdma_backend.o rdma_rm.o
obj-$(CONFIG_PCI) += vmw/pvrdma_dev_ring.o vmw/pvrdma_cmd.o \
- vmw/pvrdma_qp_ops.o vmw/pvrdma_main.o
+ vmw/pvrdma_qp_ops.o
+#obj-$(CONFIG_PCI) += vmw/pvrdma_main.o
endif
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 7ddb378..b131781 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -925,6 +925,8 @@ static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp)
NULL);
}
+#if 0 /* Disabled in Red Hat Enterprise Linux */
+
static void virtio_ccw_crypto_realize(VirtioCcwDevice *ccw_dev, Error **errp)
{
VirtIOCryptoCcw *dev = VIRTIO_CRYPTO_CCW(ccw_dev);
@@ -942,6 +944,7 @@ static void virtio_ccw_crypto_realize(VirtioCcwDevice *ccw_dev, Error **errp)
OBJECT(dev->vdev.conf.cryptodev), "cryptodev",
NULL);
}
+#endif
static void virtio_ccw_gpu_realize(VirtioCcwDevice *ccw_dev, Error **errp)
{
@@ -1532,6 +1535,8 @@ static const TypeInfo virtio_ccw_rng = {
.class_init = virtio_ccw_rng_class_init,
};
+#if 0 /* Disabled in Red Hat Enterprise Linux */
+
static Property virtio_ccw_crypto_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
@@ -1568,6 +1573,7 @@ static const TypeInfo virtio_ccw_crypto = {
.instance_init = virtio_ccw_crypto_instance_init,
.class_init = virtio_ccw_crypto_class_init,
};
+#endif
static Property virtio_ccw_gpu_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
@@ -1888,7 +1894,9 @@ static void virtio_ccw_register(void)
#ifdef CONFIG_VHOST_VSOCK
type_register_static(&vhost_vsock_ccw_info);
#endif
+#if 0 /* Disabled in Red Hat Enterprise Linux */
type_register_static(&virtio_ccw_crypto);
+#endif
type_register_static(&virtio_ccw_gpu);
type_register_static(&virtio_ccw_input);
type_register_static(&virtio_ccw_input_hid);
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index 5c8b3c9..d1cbe54 100644
index 25976ed..a793584 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -585,6 +585,8 @@ static void emulated_class_initfn(ObjectClass *klass, void *data)
@@ -600,6 +600,8 @@ static void emulated_class_initfn(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
dc->desc = "emulated smartcard";
dc->props = emulated_card_properties;
@ -766,19 +663,21 @@ index 5c8b3c9..d1cbe54 100644
static const TypeInfo emulated_card_info = {
diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
index a2e7a0a..d38205b 100644
index 8b3f664..a6b6039 100644
--- a/hw/vfio/Makefile.objs
+++ b/hw/vfio/Makefile.objs
@@ -2,7 +2,6 @@ ifeq ($(CONFIG_LINUX), y)
@@ -2,9 +2,6 @@ ifeq ($(CONFIG_LINUX), y)
obj-$(CONFIG_SOFTMMU) += common.o
obj-$(CONFIG_PCI) += pci.o pci-quirks.o display.o
obj-$(CONFIG_VFIO_CCW) += ccw.o
-obj-$(CONFIG_SOFTMMU) += platform.o
obj-$(CONFIG_VFIO_XGMAC) += calxeda-xgmac.o
obj-$(CONFIG_VFIO_AMD_XGBE) += amd-xgbe.o
-obj-$(CONFIG_VFIO_XGMAC) += calxeda-xgmac.o
-obj-$(CONFIG_VFIO_AMD_XGBE) += amd-xgbe.o
obj-$(CONFIG_SOFTMMU) += spapr.o
obj-$(CONFIG_VFIO_AP) += ap.o
endif
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index 481fd08..1c588f5 100644
index eae31c7..4a6e98e 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -1387,6 +1387,8 @@ static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data)
@ -801,7 +700,7 @@ index 481fd08..1c588f5 100644
/*
* We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 3a01fe9..3567faf 100644
index a954799..9a987cb 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -2003,7 +2003,7 @@ static const TypeInfo virtio_blk_pci_info = {
@ -841,10 +740,10 @@ index 3a01fe9..3567faf 100644
#endif
#ifdef CONFIG_VHOST_VSOCK
diff --git a/qemu-options.hx b/qemu-options.hx
index b1bf0f4..37f2aa8 100644
index f7df472..cd2b25b 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1811,11 +1811,6 @@ ETEXI
@@ -1741,11 +1741,6 @@ ETEXI
DEF("no-hpet", 0, QEMU_OPTION_no_hpet,
"-no-hpet disable HPET\n", QEMU_ARCH_I386)
@ -857,13 +756,13 @@ index b1bf0f4..37f2aa8 100644
DEF("acpitable", HAS_ARG, QEMU_OPTION_acpitable,
"-acpitable [sig=str][,rev=n][,oem_id=str][,oem_table_id=str][,oem_rev=n][,asl_compiler_id=str][,asl_compiler_rev=n][,{data|file}=file1[:file2]...]\n"
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index 53d3f32..b1360c4 100644
index 5dd0aee..f27250e 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -43,3 +43,4 @@ stub-obj-y += xen-common.o
stub-obj-y += xen-hvm.o
@@ -44,3 +44,4 @@ stub-obj-y += xen-hvm.o
stub-obj-y += pci-host-piix.o
stub-obj-y += ram-block.o
stub-obj-y += ramfb.o
+stub-obj-y += ide-isa.o
diff --git a/stubs/ide-isa.c b/stubs/ide-isa.c
new file mode 100644
@ -885,10 +784,10 @@ index 0000000..9fd50ef
+ abort();
+}
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 64a8005..bc8d09d 100644
index 60411f6..d2ac5bb 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2029,7 +2029,9 @@ static void arm_cpu_register_types(void)
@@ -2149,7 +2149,9 @@ static void arm_cpu_register_types(void)
type_register_static(&idau_interface_type_info);
while (info->name) {
@ -900,10 +799,10 @@ index 64a8005..bc8d09d 100644
}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 723e022..338ee37 100644
index f81d35e..e9b9183 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1366,14 +1366,14 @@ static X86CPUDefinition builtin_x86_defs[] = {
@@ -1443,14 +1443,14 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 6,
.model = 6,
.stepping = 3,
@ -926,7 +825,7 @@ index 723e022..338ee37 100644
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
.xlevel = 0x8000000A,
@@ -1603,6 +1603,25 @@ static X86CPUDefinition builtin_x86_defs[] = {
@@ -1680,6 +1680,25 @@ static X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
},
{
@ -953,7 +852,7 @@ index 723e022..338ee37 100644
.level = 10,
.vendor = CPUID_VENDOR_INTEL,
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
index 6c9bfde..77cb298 100644
index 7c75963..7f179ff 100644
--- a/target/ppc/cpu-models.c
+++ b/target/ppc/cpu-models.c
@@ -65,6 +65,7 @@
@ -964,13 +863,22 @@ index 6c9bfde..77cb298 100644
/* Embedded PowerPC */
/* PowerPC 401 family */
POWERPC_DEF("401", CPU_POWERPC_401, 401,
@@ -739,10 +740,13 @@
@@ -739,8 +740,10 @@
"PowerPC 7447A v1.2 (G4)")
POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455,
"PowerPC 7457A v1.2 (G4)")
+#endif
/* 64 bits PowerPC */
#if defined (TARGET_PPC64)
+#if 0 /* Disabled for Red Hat Enterprise Linux */
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
@@ -757,8 +760,11 @@
"PowerPC 970MP v1.0")
POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970,
"PowerPC 970MP v1.1")
+#endif
+#if 0 /* Disabled for Red Hat Enterprise Linux */
POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P,
"POWER5+ v2.1")
@ -978,32 +886,7 @@ index 6c9bfde..77cb298 100644
POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
"POWER7 v2.3")
POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
@@ -753,14 +757,17 @@
"POWER8 v2.0")
POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8,
"POWER8NVL v1.0")
+#if 0 /* Disabled for Red Hat Enterprise Linux */
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
+#endif
POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9,
"POWER9 v1.0")
POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9,
"POWER9 v2.0")
+#if 0 /* Disabled for Red Hat Enterprise Linux */
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
"PowerPC 970FX v1.0 (G5)")
POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970,
@@ -775,12 +782,14 @@
"PowerPC 970MP v1.0")
POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970,
"PowerPC 970MP v1.1")
+#endif
#endif /* defined (TARGET_PPC64) */
/***************************************************************************/
@@ -779,6 +785,7 @@
/* PowerPC CPU aliases */
PowerPCCPUAlias ppc_cpu_aliases[] = {
@ -1011,34 +894,31 @@ index 6c9bfde..77cb298 100644
{ "403", "403gc" },
{ "405", "405d4" },
{ "405cr", "405crc" },
@@ -939,20 +948,25 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
@@ -937,12 +944,15 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "7447a", "7447a_v1.2" },
{ "7457a", "7457a_v1.2" },
{ "apollo7pm", "7457a_v1.0" },
+#endif
#if defined(TARGET_PPC64)
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
{ "power5+", "power5+_v2.1" },
{ "power5gs", "power5+_v2.1" },
+#endif
{ "power7", "power7_v2.3" },
{ "power7+", "power7+_v2.1" },
{ "power8e", "power8e_v2.1" },
{ "power8", "power8_v2.0" },
{ "power8nvl", "power8nvl_v1.0" },
@@ -951,6 +961,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "power9", "power9_v2.0" },
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
#endif
-
+#endif
+#if 0 /* Disabled for Red Hat Enterprise Linux */
/* Generic PowerPCs */
#if defined(TARGET_PPC64)
{ "ppc64", "970fx_v3.1" },
@@ -960,5 +974,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
@@ -958,5 +969,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "ppc32", "604" },
{ "ppc", "604" },
{ "default", "604" },
@ -1046,10 +926,10 @@ index 6c9bfde..77cb298 100644
{ NULL, NULL }
};
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 604898a..9c469ff 100644
index 7c253ff..e73f812 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -373,6 +373,9 @@ static void check_unavailable_features(const S390CPUModel *max_model,
@@ -380,6 +380,9 @@ static void check_unavailable_features(const S390CPUModel *max_model,
(max_model->def->gen == model->def->gen &&
max_model->def->ec_ga < model->def->ec_ga)) {
list_add_feat("type", unavailable);
@ -1060,10 +940,10 @@ index 604898a..9c469ff 100644
/* detect missing features if any to properly report them */
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index d923cf4..bbcbeed 100644
index 2ebf26a..3a0337a 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -2277,6 +2277,14 @@ void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
@@ -2342,6 +2342,14 @@ void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
error_setg(errp, "KVM doesn't support CPU models");
return;
}
@ -1079,10 +959,10 @@ index d923cf4..bbcbeed 100644
prop.ibc = s390_ibc_from_cpu_model(model);
/* configure cpu features indicated via STFL(e) */
diff --git a/vl.c b/vl.c
index 16b913f..4f96203 100644
index fa25d1a..39d152a 100644
--- a/vl.c
+++ b/vl.c
@@ -164,7 +164,7 @@ unsigned int max_cpus;
@@ -171,7 +171,7 @@ unsigned int max_cpus;
int smp_cores = 1;
int smp_threads = 1;
int acpi_enabled = 1;

View File

@ -1,6 +1,6 @@
From e34179d713443601a16936e2e80b8fbd044429be Mon Sep 17 00:00:00 2001
From c59789ec7d5213bda9aeb48aacef2e3e897fdf7e Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 11:59:55 +0100
Date: Fri, 19 Oct 2018 12:36:59 +0200
Subject: Machine type related general changes
This patch is first part of original "Add RHEL machine types" patch we
@ -12,7 +12,7 @@ Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
hw/acpi/ich9.c | 16 +++
hw/acpi/piix4.c | 6 +-
hw/char/serial.c | 16 +++
hw/display/cirrus_vga.c | 2 +-
hw/display/cirrus_vga_isa.c | 2 +-
hw/display/vga-isa.c | 2 +-
hw/net/e1000.c | 18 ++-
hw/net/e1000e.c | 21 ++++
@ -67,7 +67,7 @@ index a4e87b8..23a7baa 100644
ich9_pm_get_disable_s3,
ich9_pm_set_disable_s3,
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index 6404af5..0f1f9e2 100644
index e330f24..b213f65 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -310,7 +310,7 @@ static const VMStateDescription vmstate_cpuhp_state = {
@ -91,7 +91,7 @@ index 6404af5..0f1f9e2 100644
DEFINE_PROP_BOOL("acpi-pci-hotplug-with-bridge-support", PIIX4PMState,
use_acpi_pci_hotplug, true),
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 251f40f..8e3520c 100644
index 02463e3..a591387 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -30,6 +30,7 @@
@ -145,11 +145,11 @@ index 251f40f..8e3520c 100644
return s->poll_msl >= 0;
}
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 9fd5665..6910014 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -3061,7 +3061,7 @@ static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp)
diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c
index 1cb607d..22678a3 100644
--- a/hw/display/cirrus_vga_isa.c
+++ b/hw/display/cirrus_vga_isa.c
@@ -67,7 +67,7 @@ static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp)
static Property isa_cirrus_vga_properties[] = {
DEFINE_PROP_UINT32("vgamem_mb", struct ISACirrusVGAState,
@ -172,10 +172,10 @@ index fa44242..7835c83 100644
};
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 742cd0a..7d568da 100644
index 2e07880..e886e7c 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -1663,6 +1663,16 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
@@ -1673,6 +1673,16 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
pci_conf = pci_dev->config;
@ -192,7 +192,7 @@ index 742cd0a..7d568da 100644
/* TODO: RST# value should be 0, PCI spec 6.2.4 */
pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
@@ -1763,7 +1773,7 @@ static const TypeInfo e1000_base_info = {
@@ -1773,7 +1783,7 @@ static const TypeInfo e1000_base_info = {
static const E1000Info e1000_devices[] = {
{
@ -201,7 +201,7 @@ index 742cd0a..7d568da 100644
.device_id = E1000_DEV_ID_82540EM,
.revision = 0x03,
.phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
@@ -1784,6 +1794,11 @@ static const E1000Info e1000_devices[] = {
@@ -1794,6 +1804,11 @@ static const E1000Info e1000_devices[] = {
#endif
};
@ -213,7 +213,7 @@ index 742cd0a..7d568da 100644
static void e1000_register_types(void)
{
int i;
@@ -1801,6 +1816,7 @@ static void e1000_register_types(void)
@@ -1811,6 +1826,7 @@ static void e1000_register_types(void)
type_register(&type_info);
}
@ -296,7 +296,7 @@ index 510ddb3..f1de9e5 100644
e1000e_prop_disable_vnet, bool),
DEFINE_PROP_SIGNED("subsys_ven", E1000EState, subsys_ven,
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index 46daa16..05453e7 100644
index 2342a09..0c916b7 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -3174,7 +3174,7 @@ static int rtl8139_pre_save(void *opaque)
@ -319,7 +319,7 @@ index 46daa16..05453e7 100644
VMSTATE_UINT16(tally_counters.TxUndrn, RTL8139State),
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index a27e54b..144e6e9 100644
index 9209394..43cf057 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -775,6 +775,7 @@ void smbios_set_defaults(const char *manufacturer, const char *product,
@ -344,18 +344,18 @@ index 6190b6f..ad2ad2d 100644
vmstate_pit_channel, PITChannelState),
VMSTATE_INT64(channels[0].next_transition_time,
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index 6f1f723..68c353f 100644
index e4e4de8..3eced9c 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -34,6 +34,7 @@
#include "qapi/qapi-commands-misc.h"
@@ -35,6 +35,7 @@
#include "qapi/qapi-events-misc.h"
#include "qapi/visitor.h"
#include "exec/address-spaces.h"
+#include "migration/migration.h"
#ifdef TARGET_I386
#include "hw/i386/apic.h"
@@ -839,6 +840,11 @@ static int rtc_post_load(void *opaque, int version_id)
@@ -841,6 +842,11 @@ static int rtc_post_load(void *opaque, int version_id)
static bool rtc_irq_reinject_on_ack_count_needed(void *opaque)
{
RTCState *s = (RTCState *)opaque;
@ -456,10 +456,10 @@ index 59aeb06..7b5cc25 100644
#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
diff --git a/include/hw/compat.h b/include/hw/compat.h
index c08f404..22262c7 100644
index 6f4d5fc..f08cc7c 100644
--- a/include/hw/compat.h
+++ b/include/hw/compat.h
@@ -282,4 +282,233 @@
@@ -285,4 +285,233 @@
.value = "on",\
},
@ -707,10 +707,10 @@ index a5080ad..b943ec9 100644
+
#endif
diff --git a/migration/migration.c b/migration/migration.c
index b7d9854..381039c 100644
index b261c1e..fb425b5 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -106,6 +106,8 @@ enum mig_rp_message_type {
@@ -105,6 +105,8 @@ enum mig_rp_message_type {
MIG_RP_MSG_MAX
};
@ -720,10 +720,10 @@ index b7d9854..381039c 100644
migrations at once. For now we don't need to add
dynamic creation of migration */
diff --git a/migration/migration.h b/migration/migration.h
index 64a7b33..405d984 100644
index e413d4d..795238c 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -288,6 +288,11 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
@@ -292,6 +292,11 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
void dirty_bitmap_mig_before_vm_start(void);
void init_dirty_bitmap_incoming_migration(void);
@ -736,7 +736,7 @@ index 64a7b33..405d984 100644
#define qemu_ram_foreach_block \
#warning "Use qemu_ram_foreach_block_migratable in migration code"
diff --git a/qdev-monitor.c b/qdev-monitor.c
index 61e0300..f439b83 100644
index 07147c6..47ea051 100644
--- a/qdev-monitor.c
+++ b/qdev-monitor.c
@@ -47,7 +47,6 @@ typedef struct QDevAlias

View File

@ -1,6 +1,6 @@
From 2c0d79871ccb5383b1a91e5fc9139b6f8e8ed8e0 Mon Sep 17 00:00:00 2001
From 6df04926524e1a9f1178b53bf2b7b8978a6d5935 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 12:00:54 +0100
Date: Fri, 19 Oct 2018 12:53:31 +0200
Subject: Add aarch64 machine types
Adding changes to add RHEL machine types for aarch64 architecture.
@ -12,7 +12,7 @@ Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
2 files changed, 147 insertions(+), 1 deletion(-)
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 281ddcd..b02e4a0 100644
index a2b8d8f..703f0dd 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -60,6 +60,7 @@
@ -61,7 +61,7 @@ index 281ddcd..b02e4a0 100644
/* Number of external interrupt lines to configure the GIC with */
#define NUM_IRQS 256
@@ -1539,6 +1569,7 @@ static void machvirt_init(MachineState *machine)
@@ -1577,6 +1607,7 @@ static void machvirt_init(MachineState *machine)
qemu_add_machine_init_done_notifier(&vms->machine_done);
}
@ -69,7 +69,7 @@ index 281ddcd..b02e4a0 100644
static bool virt_get_secure(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1567,6 +1598,7 @@ static void virt_set_virt(Object *obj, bool value, Error **errp)
@@ -1605,6 +1636,7 @@ static void virt_set_virt(Object *obj, bool value, Error **errp)
vms->virt = value;
}
@ -77,7 +77,7 @@ index 281ddcd..b02e4a0 100644
static bool virt_get_highmem(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1621,6 +1653,7 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
@@ -1659,6 +1691,7 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
}
}
@ -85,7 +85,7 @@ index 281ddcd..b02e4a0 100644
static char *virt_get_iommu(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1648,6 +1681,7 @@ static void virt_set_iommu(Object *obj, const char *value, Error **errp)
@@ -1686,6 +1719,7 @@ static void virt_set_iommu(Object *obj, const char *value, Error **errp)
error_append_hint(errp, "Valid values are none, smmuv3.\n");
}
}
@ -93,7 +93,7 @@ index 281ddcd..b02e4a0 100644
static CpuInstanceProperties
virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
@@ -1687,6 +1721,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
@@ -1725,6 +1759,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
return ms->possible_cpus;
}
@ -101,9 +101,9 @@ index 281ddcd..b02e4a0 100644
static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -1835,6 +1870,9 @@ static void virt_machine_3_0_options(MachineClass *mc)
}
DEFINE_VIRT_MACHINE_AS_LATEST(3, 0)
@@ -1889,6 +1924,9 @@ DEFINE_VIRT_MACHINE(3, 0)
#define VIRT_COMPAT_2_12 \
HW_COMPAT_2_12
+#define VIRT_COMPAT_2_12 \
+ HW_COMPAT_2_12
@ -111,7 +111,7 @@ index 281ddcd..b02e4a0 100644
static void virt_2_12_instance_init(Object *obj)
{
virt_3_0_instance_init(obj);
@@ -1960,3 +1998,89 @@ static void virt_machine_2_6_options(MachineClass *mc)
@@ -2016,3 +2054,89 @@ static void virt_machine_2_6_options(MachineClass *mc)
vmc->no_pmu = true;
}
DEFINE_VIRT_MACHINE(2, 6)
@ -202,10 +202,10 @@ index 281ddcd..b02e4a0 100644
+}
+DEFINE_RHEL_MACHINE_AS_LATEST(7, 6, 0)
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a870cc..2293315 100644
index 4cc57a7..3237e97 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -128,6 +128,7 @@ typedef struct {
@@ -130,6 +130,7 @@ typedef struct {
#define VIRT_ECAM_ID(high) (high ? VIRT_PCIE_ECAM_HIGH : VIRT_PCIE_ECAM)
@ -213,7 +213,7 @@ index 9a870cc..2293315 100644
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
#define VIRT_MACHINE(obj) \
OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
@@ -136,6 +137,27 @@ typedef struct {
@@ -138,6 +139,27 @@ typedef struct {
#define VIRT_MACHINE_CLASS(klass) \
OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE)

View File

@ -1,6 +1,6 @@
From b6c41d9cfe7ae58455737c967f2e47d6bc99d21e Mon Sep 17 00:00:00 2001
From 0f1a361c4bd8fc0874cc5d05e611fadb67524a1e Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 12:01:38 +0100
Date: Fri, 19 Oct 2018 13:27:13 +0200
Subject: Add ppc64 machine types
Adding changes to add RHEL machine types for ppc64 architecture.
@ -15,26 +15,26 @@ Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
5 files changed, 279 insertions(+), 1 deletion(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 2f8c304..b8bdb69 100644
index 7afd1a1..76a4e83 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4009,6 +4009,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
@@ -3906,6 +3906,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
spapr_caps_add_properties(smc, &error_abort);
smc->irq = &spapr_irq_xics;
+ smc->has_power9_support = true;
}
static const TypeInfo spapr_machine_info = {
@@ -4059,6 +4060,7 @@ static const TypeInfo spapr_machine_info = {
@@ -3956,6 +3957,7 @@ static const TypeInfo spapr_machine_info = {
} \
type_init(spapr_machine_register_##suffix)
+#if 0 /* Disabled for Red Hat Enterprise Linux */
/*
* pseries-3.0
/*
* pseries-3.1
*/
@@ -4248,6 +4250,7 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
@@ -4169,6 +4171,7 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
.property = "pre-2.8-migration", \
.value = "on", \
},
@ -42,7 +42,7 @@ index 2f8c304..b8bdb69 100644
static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
@@ -4298,6 +4301,7 @@ static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
@@ -4219,6 +4222,7 @@ static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
*/
}
@ -50,7 +50,7 @@ index 2f8c304..b8bdb69 100644
static void spapr_machine_2_7_instance_options(MachineState *machine)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
@@ -4457,6 +4461,254 @@ static void spapr_machine_2_1_class_options(MachineClass *mc)
@@ -4378,6 +4382,254 @@ static void spapr_machine_2_1_class_options(MachineClass *mc)
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1);
}
DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
@ -306,10 +306,10 @@ index 2f8c304..b8bdb69 100644
static void spapr_machine_register_types(void)
{
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index fb29eec..a081b01 100644
index 63a7bb6..fcf6174 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -21,6 +21,7 @@
@@ -22,6 +22,7 @@
#include "sysemu/numa.h"
#include "sysemu/hw_accel.h"
#include "qemu/error-report.h"
@ -317,15 +317,15 @@ index fb29eec..a081b01 100644
static void spapr_cpu_reset(void *opaque)
{
@@ -212,6 +213,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
{
@@ -218,6 +219,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
CPUPPCState *env = &cpu->env;
CPUState *cs = CPU(cpu);
Error *local_err = NULL;
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
object_property_set_bool(OBJECT(cpu), true, "realized", &local_err);
if (local_err) {
@@ -224,6 +226,17 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
@@ -230,6 +232,17 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
kvmppc_set_papr(cpu);
@ -344,13 +344,13 @@ index fb29eec..a081b01 100644
spapr_cpu_reset(cpu);
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 7e5de1a..330c370 100644
index 6279711..d2370e5 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -101,6 +101,7 @@ struct sPAPRMachineClass {
bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
@@ -106,6 +106,7 @@ struct sPAPRMachineClass {
bool pre_2_10_has_unused_icps;
bool legacy_irq_allocation;
+ bool has_power9_support;
void (*phb_placement)(sPAPRMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
@ -381,10 +381,10 @@ index 7de4bf3..3e2e353 100644
const CompatInfo *compat = compat_by_pvr(compat_pvr);
const CompatInfo *min = compat_by_pvr(min_compat_pvr);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 4edcf62..532f0d5 100644
index ab68abe..c559740 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1365,6 +1365,7 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
@@ -1376,6 +1376,7 @@ static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
/* Compatibility modes */
#if defined(TARGET_PPC64)

View File

@ -1,20 +1,23 @@
From 05b950dccdf9e8f58f3358730aa4705642d0196f Mon Sep 17 00:00:00 2001
From a47c6d2b9d75dcb15810fcfedcddf5eadf0ec227 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 12:02:37 +0100
Date: Fri, 19 Oct 2018 13:47:32 +0200
Subject: Add s390x machine types
Adding changes to add RHEL machine types for s390x architecture.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Merged patches (3.1.0):
- 29df663 s390x/cpumodel: default enable bpb and ppa15 for z196 and later
---
hw/s390x/s390-virtio-ccw.c | 46 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)
hw/s390x/s390-virtio-ccw.c | 50 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 7983185..0f135c9 100644
index a0615a8..04f4c1a 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -649,7 +649,7 @@ bool css_migration_enabled(void)
@@ -627,7 +627,7 @@ bool css_migration_enabled(void)
{ \
MachineClass *mc = MACHINE_CLASS(oc); \
ccw_machine_##suffix##_class_options(mc); \
@ -23,7 +26,7 @@ index 7983185..0f135c9 100644
if (latest) { \
mc->alias = "s390-ccw-virtio"; \
mc->is_default = 1; \
@@ -676,6 +676,8 @@ bool css_migration_enabled(void)
@@ -657,6 +657,8 @@ bool css_migration_enabled(void)
#define CCW_COMPAT_2_12 \
HW_COMPAT_2_12
@ -32,7 +35,7 @@ index 7983185..0f135c9 100644
#define CCW_COMPAT_2_11 \
HW_COMPAT_2_11 \
{\
@@ -898,6 +900,48 @@ static void ccw_machine_2_4_class_options(MachineClass *mc)
@@ -894,6 +896,52 @@ static void ccw_machine_2_4_class_options(MachineClass *mc)
}
DEFINE_CCW_MACHINE(2_4, "2.4", false);
@ -67,6 +70,10 @@ index 7983185..0f135c9 100644
+ /* before 2.12 we emulated the very first z900, and RHEL 7.5 is
+ based on 2.10 */
+ s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat);
+
+ /* bpb and ppa15 were only in the full model in RHEL 7.5 */
+ s390_cpudef_featoff_greater(11, 1, S390_FEAT_PPA15);
+ s390_cpudef_featoff_greater(11, 1, S390_FEAT_BPB);
+}
+
+static void ccw_machine_rhel750_class_options(MachineClass *mc)

View File

@ -1,6 +1,6 @@
From b95483e9a18050c7dac0e6c17b049f0733a409cd Mon Sep 17 00:00:00 2001
From edae60c4f30697c3c859cc9c88f80c0ed3dc0f0e Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 12:03:11 +0100
Date: Fri, 19 Oct 2018 13:10:31 +0200
Subject: Add x86_64 machine types
Adding changes to add RHEL machine types for x86_64 architecture.
@ -17,7 +17,7 @@ Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
7 files changed, 884 insertions(+), 7 deletions(-)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index e1ee8ae..be9bdb5 100644
index 236a20e..3360da9 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -184,6 +184,9 @@ static void acpi_get_pm_info(AcpiPmInfo *pm)
@ -31,10 +31,10 @@ index e1ee8ae..be9bdb5 100644
}
assert(obj);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 11c287e..253d48d 100644
index 567439e..a609332 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1419,7 +1419,8 @@ void pc_memory_init(PCMachineState *pcms,
@@ -1424,7 +1424,8 @@ void pc_memory_init(PCMachineState *pcms,
option_rom_mr = g_malloc(sizeof(*option_rom_mr));
memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
&error_fatal);
@ -44,7 +44,7 @@ index 11c287e..253d48d 100644
memory_region_set_readonly(option_rom_mr, true);
}
memory_region_add_subregion_overlap(rom_memory,
@@ -2387,6 +2388,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
@@ -2389,6 +2390,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
pcmc->save_tsc_khz = true;
pcmc->linuxboot_dma_enabled = true;
assert(!mc->get_hotplug_handler);
@ -52,7 +52,7 @@ index 11c287e..253d48d 100644
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_instance_props = pc_cpu_index_to_props;
mc->get_default_cpu_node_id = pc_get_default_cpu_node_id;
@@ -2396,7 +2398,8 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
@@ -2398,7 +2400,8 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
mc->default_boot_order = "cad";
mc->hot_add_cpu = pc_hot_add_cpu;
mc->block_default_type = IF_IDE;
@ -63,7 +63,7 @@ index 11c287e..253d48d 100644
hc->pre_plug = pc_machine_device_pre_plug_cb;
hc->plug = pc_machine_device_plug_cb;
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index dc09466..f0484ec 100644
index 7092d6d..83c22ae 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -50,6 +50,7 @@
@ -93,15 +93,15 @@ index dc09466..f0484ec 100644
static void pc_compat_2_3(MachineState *machine)
{
PCMachineState *pcms = PC_MACHINE(machine);
@@ -433,6 +435,7 @@ static void pc_i440fx_3_0_machine_options(MachineClass *m)
@@ -433,6 +435,7 @@ static void pc_i440fx_3_1_machine_options(MachineClass *m)
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
}
DEFINE_I440FX_MACHINE(v3_0, "pc-i440fx-3.0", NULL,
@@ -1148,3 +1151,190 @@ static void xenfv_machine_options(MachineClass *m)
DEFINE_I440FX_MACHINE(v3_1, "pc-i440fx-3.1", NULL,
@@ -1157,3 +1160,190 @@ static void xenfv_machine_options(MachineClass *m)
DEFINE_PC_MACHINE(xenfv, "xenfv", pc_xen_hvm_init,
xenfv_machine_options);
#endif
@ -293,7 +293,7 @@ index dc09466..f0484ec 100644
+DEFINE_PC_MACHINE(rhel700, "pc-i440fx-rhel7.0.0", pc_init_rhel700,
+ pc_machine_rhel700_options);
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 532241e..c1024c5 100644
index 4702bb1..163546e 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -145,8 +145,8 @@ static void pc_q35_init(MachineState *machine)
@ -315,15 +315,15 @@ index 532241e..c1024c5 100644
static void pc_q35_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
@@ -315,6 +316,7 @@ static void pc_q35_3_0_machine_options(MachineClass *m)
@@ -315,6 +316,7 @@ static void pc_q35_3_1_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
m->alias = "q35";
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_12);
}
DEFINE_Q35_MACHINE(v3_0, "pc-q35-3.0", NULL,
@@ -416,3 +418,90 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
DEFINE_Q35_MACHINE(v3_1, "pc-q35-3.1", NULL,
@@ -425,3 +427,90 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL,
pc_q35_2_4_machine_options);
@ -415,7 +415,7 @@ index 532241e..c1024c5 100644
+DEFINE_PC_MACHINE(q35_rhel730, "pc-q35-rhel7.3.0", pc_q35_init_rhel730,
+ pc_q35_machine_rhel730_options);
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 6894f37..ef82513 100644
index 136fe49..f8f35af 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -134,6 +134,9 @@ struct PCMachineClass {
@ -428,7 +428,7 @@ index 6894f37..ef82513 100644
};
#define TYPE_PC_MACHINE "generic-pc-machine"
@@ -960,4 +963,565 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
@@ -976,4 +979,565 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
type_init(pc_machine_init_##suffix)
extern void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id);
@ -995,10 +995,10 @@ index 6894f37..ef82513 100644
+ },
#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 338ee37..051018a 100644
index e9b9183..573de14 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1360,11 +1360,17 @@ static CPUCaches epyc_cache_info = {
@@ -1437,11 +1437,17 @@ static CPUCaches epyc_cache_info = {
static X86CPUDefinition builtin_x86_defs[] = {
{
@ -1017,7 +1017,7 @@ index 338ee37..051018a 100644
.stepping = 3,
.features[FEAT_1_EDX] = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
@@ -2684,6 +2690,7 @@ static PropValue kvm_default_props[] = {
@@ -2934,6 +2940,7 @@ static PropValue kvm_default_props[] = {
{ "acpi", "off" },
{ "monitor", "off" },
{ "svm", "off" },
@ -1026,10 +1026,10 @@ index 338ee37..051018a 100644
};
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 084c2c7..0c57c26 100644
index 225b5d4..c60e1b8 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -955,6 +955,26 @@ static const VMStateDescription vmstate_svm_npt = {
@@ -964,6 +964,26 @@ static const VMStateDescription vmstate_svm_npt = {
}
};
@ -1056,7 +1056,7 @@ index 084c2c7..0c57c26 100644
VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -1080,6 +1100,7 @@ VMStateDescription vmstate_x86_cpu = {
@@ -1089,6 +1109,7 @@ VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_intel_pt,
&vmstate_msr_virt_ssbd,
&vmstate_svm_npt,

View File

@ -1,498 +0,0 @@
From f4e3d697cb6a18301b1279c0b07896eb5b228aa9 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Nov 2018 12:03:48 +0100
Subject: Enable make check
Fixing tests after device disabling and machine types changes and enabling
make check run during build.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
redhat/qemu-kvm.spec.template | 2 +-
tests/Makefile.include | 123 +++++++++++++++++++++---------------------
tests/boot-serial-test.c | 6 ++-
tests/cpu-plug-test.c | 3 +-
tests/e1000-test.c | 2 +
tests/endianness-test.c | 2 +
tests/prom-env-test.c | 2 +
tests/qemu-iotests/051 | 12 ++---
tests/qemu-iotests/group | 4 +-
tests/qom-test.c | 2 +-
tests/test-x86-cpuid-compat.c | 2 +
tests/usb-hcd-xhci-test.c | 4 ++
12 files changed, 91 insertions(+), 73 deletions(-)
diff --git a/tests/Makefile.include b/tests/Makefile.include
index a492827..4b78396 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -184,8 +184,8 @@ gcov-files-generic-y = qdev-monitor.c qmp.c
check-qtest-generic-y += tests/cdrom-test$(EXESUF)
gcov-files-ipack-y += hw/ipack/ipack.c
-check-qtest-ipack-y += tests/ipoctal232-test$(EXESUF)
-gcov-files-ipack-y += hw/char/ipoctal232.c
+#check-qtest-ipack-y += tests/ipoctal232-test$(EXESUF)
+#gcov-files-ipack-y += hw/char/ipoctal232.c
check-qtest-virtioserial-y += tests/virtio-console-test$(EXESUF)
gcov-files-virtioserial-y += hw/char/virtio-console.c
@@ -217,23 +217,23 @@ check-qtest-pci-y += tests/e1000e-test$(EXESUF)
gcov-files-pci-y += hw/net/e1000e.c hw/net/e1000e_core.c
check-qtest-pci-y += tests/rtl8139-test$(EXESUF)
gcov-files-pci-y += hw/net/rtl8139.c
-check-qtest-pci-y += tests/pcnet-test$(EXESUF)
-gcov-files-pci-y += hw/net/pcnet.c
-gcov-files-pci-y += hw/net/pcnet-pci.c
-check-qtest-pci-y += tests/eepro100-test$(EXESUF)
-gcov-files-pci-y += hw/net/eepro100.c
-check-qtest-pci-y += tests/ne2000-test$(EXESUF)
-gcov-files-pci-y += hw/net/ne2000.c
-check-qtest-pci-y += tests/nvme-test$(EXESUF)
-gcov-files-pci-y += hw/block/nvme.c
+#check-qtest-pci-y += tests/pcnet-test$(EXESUF)
+#gcov-files-pci-y += hw/net/pcnet.c
+#gcov-files-pci-y += hw/net/pcnet-pci.c
+#check-qtest-pci-y += tests/eepro100-test$(EXESUF)
+#gcov-files-pci-y += hw/net/eepro100.c
+#check-qtest-pci-y += tests/ne2000-test$(EXESUF)
+#gcov-files-pci-y += hw/net/ne2000.c
+#check-qtest-pci-y += tests/nvme-test$(EXESUF)
+#gcov-files-pci-y += hw/block/nvme.c
check-qtest-pci-y += tests/ac97-test$(EXESUF)
gcov-files-pci-y += hw/audio/ac97.c
-check-qtest-pci-y += tests/es1370-test$(EXESUF)
-gcov-files-pci-y += hw/audio/es1370.c
+#check-qtest-pci-y += tests/es1370-test$(EXESUF)
+#gcov-files-pci-y += hw/audio/es1370.c
check-qtest-pci-y += $(check-qtest-virtio-y)
gcov-files-pci-y += $(gcov-files-virtio-y) hw/virtio/virtio-pci.c
-check-qtest-pci-y += tests/tpci200-test$(EXESUF)
-gcov-files-pci-y += hw/ipack/tpci200.c
+#check-qtest-pci-y += tests/tpci200-test$(EXESUF)
+#gcov-files-pci-y += hw/ipack/tpci200.c
check-qtest-pci-y += $(check-qtest-ipack-y)
gcov-files-pci-y += $(gcov-files-ipack-y)
check-qtest-pci-y += tests/display-vga-test$(EXESUF)
@@ -245,25 +245,25 @@ gcov-files-pci-y += hw/display/virtio-gpu-pci.c
gcov-files-pci-$(CONFIG_VIRTIO_VGA) += hw/display/virtio-vga.c
check-qtest-pci-y += tests/intel-hda-test$(EXESUF)
gcov-files-pci-y += hw/audio/intel-hda.c hw/audio/hda-codec.c
-check-qtest-pci-$(CONFIG_IVSHMEM) += tests/ivshmem-test$(EXESUF)
-gcov-files-pci-y += hw/misc/ivshmem.c
-check-qtest-pci-y += tests/megasas-test$(EXESUF)
-gcov-files-pci-y += hw/scsi/megasas.c
+check-qtest-pci-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF)
+gcov-files-pci-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
+#check-qtest-pci-y += tests/megasas-test$(EXESUF)
+#gcov-files-pci-y += hw/scsi/megasas.c
check-qtest-i386-y = tests/endianness-test$(EXESUF)
-check-qtest-i386-y += tests/fdc-test$(EXESUF)
-gcov-files-i386-y = hw/block/fdc.c
+#check-qtest-i386-y += tests/fdc-test$(EXESUF)
+#gcov-files-i386-y = hw/block/fdc.c
check-qtest-i386-y += tests/ide-test$(EXESUF)
check-qtest-i386-y += tests/ahci-test$(EXESUF)
check-qtest-i386-y += tests/hd-geo-test$(EXESUF)
gcov-files-i386-y += hw/block/hd-geometry.c
check-qtest-i386-y += tests/boot-order-test$(EXESUF)
-check-qtest-i386-y += tests/bios-tables-test$(EXESUF)
+#check-qtest-i386-y += tests/bios-tables-test$(EXESUF)
check-qtest-i386-y += tests/boot-serial-test$(EXESUF)
check-qtest-i386-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF)
check-qtest-i386-y += tests/rtc-test$(EXESUF)
-check-qtest-i386-y += tests/ipmi-kcs-test$(EXESUF)
-check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
+#check-qtest-i386-y += tests/ipmi-kcs-test$(EXESUF)
+#check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
check-qtest-i386-y += tests/i440fx-test$(EXESUF)
check-qtest-i386-y += tests/fw_cfg-test$(EXESUF)
check-qtest-i386-y += tests/drive_del-test$(EXESUF)
@@ -272,8 +272,8 @@ check-qtest-i386-y += tests/tco-test$(EXESUF)
gcov-files-i386-y += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c
check-qtest-i386-y += $(check-qtest-pci-y)
gcov-files-i386-y += $(gcov-files-pci-y)
-check-qtest-i386-y += tests/vmxnet3-test$(EXESUF)
-gcov-files-i386-y += hw/net/vmxnet3.c
+#check-qtest-i386-y += tests/vmxnet3-test$(EXESUF)
+#gcov-files-i386-y += hw/net/vmxnet3.c
gcov-files-i386-y += hw/net/net_rx_pkt.c
gcov-files-i386-y += hw/net/net_tx_pkt.c
check-qtest-i386-y += tests/pvpanic-test$(EXESUF)
@@ -282,8 +282,8 @@ check-qtest-i386-y += tests/i82801b11-test$(EXESUF)
gcov-files-i386-y += hw/pci-bridge/i82801b11.c
check-qtest-i386-y += tests/ioh3420-test$(EXESUF)
gcov-files-i386-y += hw/pci-bridge/ioh3420.c
-check-qtest-i386-y += tests/usb-hcd-ohci-test$(EXESUF)
-gcov-files-i386-y += hw/usb/hcd-ohci.c
+#check-qtest-i386-y += tests/usb-hcd-ohci-test$(EXESUF)
+#gcov-files-i386-y += hw/usb/hcd-ohci.c
check-qtest-i386-y += tests/usb-hcd-uhci-test$(EXESUF)
gcov-files-i386-y += hw/usb/hcd-uhci.c
check-qtest-i386-y += tests/usb-hcd-ehci-test$(EXESUF)
@@ -311,7 +311,7 @@ check-qtest-i386-y += tests/migration-test$(EXESUF)
check-qtest-i386-y += tests/test-x86-cpuid-compat$(EXESUF)
check-qtest-i386-y += tests/numa-test$(EXESUF)
check-qtest-x86_64-y += $(check-qtest-i386-y)
-check-qtest-x86_64-y += tests/sdhci-test$(EXESUF)
+#check-qtest-x86_64-y += tests/sdhci-test$(EXESUF)
gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c
gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y))
@@ -332,34 +332,35 @@ check-qtest-mips64el-y = tests/endianness-test$(EXESUF)
check-qtest-moxie-y = tests/boot-serial-test$(EXESUF)
check-qtest-ppc-y = tests/endianness-test$(EXESUF)
-check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
+#check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
check-qtest-ppc-y += tests/prom-env-test$(EXESUF)
check-qtest-ppc-y += tests/drive_del-test$(EXESUF)
check-qtest-ppc-y += tests/boot-serial-test$(EXESUF)
-check-qtest-ppc-y += tests/m48t59-test$(EXESUF)
-gcov-files-ppc-y += hw/timer/m48t59.c
+#check-qtest-ppc-y += tests/m48t59-test$(EXESUF)
+#gcov-files-ppc-y += hw/timer/m48t59.c
check-qtest-ppc64-y = $(check-qtest-ppc-y)
gcov-files-ppc64-y = $(subst ppc-softmmu/,ppc64-softmmu/,$(gcov-files-ppc-y))
check-qtest-ppc64-y += tests/spapr-phb-test$(EXESUF)
gcov-files-ppc64-y += ppc64-softmmu/hw/ppc/spapr_pci.c
-check-qtest-ppc64-y += tests/pnv-xscom-test$(EXESUF)
+#check-qtest-ppc64-y += tests/pnv-xscom-test$(EXESUF)
check-qtest-ppc64-y += tests/migration-test$(EXESUF)
check-qtest-ppc64-y += tests/rtas-test$(EXESUF)
check-qtest-ppc64-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF)
-check-qtest-ppc64-y += tests/usb-hcd-ohci-test$(EXESUF)
-gcov-files-ppc64-y += hw/usb/hcd-ohci.c
-check-qtest-ppc64-y += tests/usb-hcd-uhci-test$(EXESUF)
-gcov-files-ppc64-y += hw/usb/hcd-uhci.c
+#check-qtest-ppc64-y += tests/usb-hcd-ohci-test$(EXESUF)
+#gcov-files-ppc64-y += hw/usb/hcd-ohci.c
+#check-qtest-ppc64-y += tests/usb-hcd-uhci-test$(EXESUF)
+#gcov-files-ppc64-y += hw/usb/hcd-uhci.c
check-qtest-ppc64-y += tests/usb-hcd-xhci-test$(EXESUF)
gcov-files-ppc64-y += hw/usb/hcd-xhci.c
check-qtest-ppc64-y += $(check-qtest-virtio-y)
-check-qtest-ppc64-$(CONFIG_SLIRP) += tests/test-netfilter$(EXESUF)
-check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-mirror$(EXESUF)
-check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-redirector$(EXESUF)
+#check-qtest-ppc64-$(CONFIG_SLIRP) += tests/test-netfilter$(EXESUF)
+#check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-mirror$(EXESUF)
+#check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-redirector$(EXESUF)
check-qtest-ppc64-y += tests/display-vga-test$(EXESUF)
check-qtest-ppc64-y += tests/numa-test$(EXESUF)
-check-qtest-ppc64-$(CONFIG_IVSHMEM) += tests/ivshmem-test$(EXESUF)
+check-qtest-ppc64-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF)
+gcov-files-ppc64-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
check-qtest-ppc64-y += tests/cpu-plug-test$(EXESUF)
check-qtest-sh4-y = tests/endianness-test$(EXESUF)
@@ -388,7 +389,7 @@ check-qtest-arm-y += tests/boot-serial-test$(EXESUF)
check-qtest-arm-y += tests/sdhci-test$(EXESUF)
check-qtest-aarch64-y = tests/numa-test$(EXESUF)
-check-qtest-aarch64-y += tests/sdhci-test$(EXESUF)
+#check-qtest-aarch64-y += tests/sdhci-test$(EXESUF)
check-qtest-aarch64-y += tests/boot-serial-test$(EXESUF)
check-qtest-microblazeel-y = $(check-qtest-microblaze-y)
@@ -777,15 +778,15 @@ tests/endianness-test$(EXESUF): tests/endianness-test.o
tests/spapr-phb-test$(EXESUF): tests/spapr-phb-test.o $(libqos-obj-y)
tests/prom-env-test$(EXESUF): tests/prom-env-test.o $(libqos-obj-y)
tests/rtas-test$(EXESUF): tests/rtas-test.o $(libqos-spapr-obj-y)
-tests/fdc-test$(EXESUF): tests/fdc-test.o
+#tests/fdc-test$(EXESUF): tests/fdc-test.o
tests/ide-test$(EXESUF): tests/ide-test.o $(libqos-pc-obj-y)
tests/ahci-test$(EXESUF): tests/ahci-test.o $(libqos-pc-obj-y)
-tests/ipmi-kcs-test$(EXESUF): tests/ipmi-kcs-test.o
-tests/ipmi-bt-test$(EXESUF): tests/ipmi-bt-test.o
+#tests/ipmi-kcs-test$(EXESUF): tests/ipmi-kcs-test.o
+#tests/ipmi-bt-test$(EXESUF): tests/ipmi-bt-test.o
tests/hd-geo-test$(EXESUF): tests/hd-geo-test.o
tests/boot-order-test$(EXESUF): tests/boot-order-test.o $(libqos-obj-y)
tests/boot-serial-test$(EXESUF): tests/boot-serial-test.o $(libqos-obj-y)
-tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o \
+#tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o \
tests/boot-sector.o tests/acpi-utils.o $(libqos-obj-y)
tests/pxe-test$(EXESUF): tests/pxe-test.o tests/boot-sector.o $(libqos-obj-y)
tests/tmp105-test$(EXESUF): tests/tmp105-test.o $(libqos-omap-obj-y)
@@ -798,11 +799,11 @@ tests/fw_cfg-test$(EXESUF): tests/fw_cfg-test.o $(libqos-pc-obj-y)
tests/e1000-test$(EXESUF): tests/e1000-test.o
tests/e1000e-test$(EXESUF): tests/e1000e-test.o $(libqos-pc-obj-y)
tests/rtl8139-test$(EXESUF): tests/rtl8139-test.o $(libqos-pc-obj-y)
-tests/pcnet-test$(EXESUF): tests/pcnet-test.o
-tests/pnv-xscom-test$(EXESUF): tests/pnv-xscom-test.o
-tests/eepro100-test$(EXESUF): tests/eepro100-test.o
-tests/vmxnet3-test$(EXESUF): tests/vmxnet3-test.o
-tests/ne2000-test$(EXESUF): tests/ne2000-test.o
+#tests/pcnet-test$(EXESUF): tests/pcnet-test.o
+#tests/pnv-xscom-test$(EXESUF): tests/pnv-xscom-test.o
+#tests/eepro100-test$(EXESUF): tests/eepro100-test.o
+#tests/vmxnet3-test$(EXESUF): tests/vmxnet3-test.o
+#tests/ne2000-test$(EXESUF): tests/ne2000-test.o
tests/wdt_ib700-test$(EXESUF): tests/wdt_ib700-test.o
tests/tco-test$(EXESUF): tests/tco-test.o $(libqos-pc-obj-y)
tests/virtio-balloon-test$(EXESUF): tests/virtio-balloon-test.o $(libqos-virtio-obj-y)
@@ -813,22 +814,22 @@ tests/virtio-scsi-test$(EXESUF): tests/virtio-scsi-test.o $(libqos-virtio-obj-y)
tests/virtio-9p-test$(EXESUF): tests/virtio-9p-test.o $(libqos-virtio-obj-y)
tests/virtio-serial-test$(EXESUF): tests/virtio-serial-test.o $(libqos-virtio-obj-y)
tests/virtio-console-test$(EXESUF): tests/virtio-console-test.o $(libqos-virtio-obj-y)
-tests/tpci200-test$(EXESUF): tests/tpci200-test.o
+#tests/tpci200-test$(EXESUF): tests/tpci200-test.o
tests/display-vga-test$(EXESUF): tests/display-vga-test.o
-tests/ipoctal232-test$(EXESUF): tests/ipoctal232-test.o
+#tests/ipoctal232-test$(EXESUF): tests/ipoctal232-test.o
tests/qom-test$(EXESUF): tests/qom-test.o
tests/test-hmp$(EXESUF): tests/test-hmp.o
tests/machine-none-test$(EXESUF): tests/machine-none-test.o
tests/drive_del-test$(EXESUF): tests/drive_del-test.o $(libqos-virtio-obj-y)
tests/qdev-monitor-test$(EXESUF): tests/qdev-monitor-test.o $(libqos-pc-obj-y)
-tests/nvme-test$(EXESUF): tests/nvme-test.o
+#tests/nvme-test$(EXESUF): tests/nvme-test.o
tests/pvpanic-test$(EXESUF): tests/pvpanic-test.o
tests/i82801b11-test$(EXESUF): tests/i82801b11-test.o
tests/ac97-test$(EXESUF): tests/ac97-test.o
-tests/es1370-test$(EXESUF): tests/es1370-test.o
+#tests/es1370-test$(EXESUF): tests/es1370-test.o
tests/intel-hda-test$(EXESUF): tests/intel-hda-test.o
tests/ioh3420-test$(EXESUF): tests/ioh3420-test.o
-tests/usb-hcd-ohci-test$(EXESUF): tests/usb-hcd-ohci-test.o $(libqos-usb-obj-y)
+#tests/usb-hcd-ohci-test$(EXESUF): tests/usb-hcd-ohci-test.o $(libqos-usb-obj-y)
tests/usb-hcd-uhci-test$(EXESUF): tests/usb-hcd-uhci-test.o $(libqos-usb-obj-y)
tests/usb-hcd-ehci-test$(EXESUF): tests/usb-hcd-ehci-test.o $(libqos-usb-obj-y)
tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y)
@@ -841,19 +842,19 @@ tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_hel
tests/test-qemu-opts$(EXESUF): tests/test-qemu-opts.o $(test-util-obj-y)
tests/test-keyval$(EXESUF): tests/test-keyval.o $(test-util-obj-y) $(test-qapi-obj-y)
tests/test-write-threshold$(EXESUF): tests/test-write-threshold.o $(test-block-obj-y)
-tests/test-netfilter$(EXESUF): tests/test-netfilter.o $(qtest-obj-y)
-tests/test-filter-mirror$(EXESUF): tests/test-filter-mirror.o $(qtest-obj-y)
-tests/test-filter-redirector$(EXESUF): tests/test-filter-redirector.o $(qtest-obj-y)
+#tests/test-netfilter$(EXESUF): tests/test-netfilter.o $(qtest-obj-y)
+#tests/test-filter-mirror$(EXESUF): tests/test-filter-mirror.o $(qtest-obj-y)
+#tests/test-filter-redirector$(EXESUF): tests/test-filter-redirector.o $(qtest-obj-y)
tests/test-x86-cpuid-compat$(EXESUF): tests/test-x86-cpuid-compat.o $(qtest-obj-y)
tests/ivshmem-test$(EXESUF): tests/ivshmem-test.o contrib/ivshmem-server/ivshmem-server.o $(libqos-pc-obj-y) $(libqos-spapr-obj-y)
-tests/megasas-test$(EXESUF): tests/megasas-test.o $(libqos-spapr-obj-y) $(libqos-pc-obj-y)
+#tests/megasas-test$(EXESUF): tests/megasas-test.o $(libqos-spapr-obj-y) $(libqos-pc-obj-y)
tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o $(test-util-obj-y) libvhost-user.a
tests/test-uuid$(EXESUF): tests/test-uuid.o $(test-util-obj-y)
tests/test-arm-mptimer$(EXESUF): tests/test-arm-mptimer.o
tests/test-qapi-util$(EXESUF): tests/test-qapi-util.o $(test-util-obj-y)
tests/numa-test$(EXESUF): tests/numa-test.o
tests/vmgenid-test$(EXESUF): tests/vmgenid-test.o tests/boot-sector.o tests/acpi-utils.o
-tests/sdhci-test$(EXESUF): tests/sdhci-test.o $(libqos-pc-obj-y)
+#tests/sdhci-test$(EXESUF): tests/sdhci-test.o $(libqos-pc-obj-y)
tests/cdrom-test$(EXESUF): tests/cdrom-test.o tests/boot-sector.o $(libqos-obj-y)
tests/migration/stress$(EXESUF): tests/migration/stress.o
diff --git a/tests/boot-serial-test.c b/tests/boot-serial-test.c
index 952a2e7..5217a39 100644
--- a/tests/boot-serial-test.c
+++ b/tests/boot-serial-test.c
@@ -80,17 +80,21 @@ static testdef_t tests[] = {
{ "ppc", "g3beige", "", "PowerPC,750" },
{ "ppc", "mac99", "", "PowerPC,G4" },
{ "ppc", "sam460ex", "-m 256", "DRAM: 256 MiB" },
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "ppc64", "ppce500", "", "U-Boot" },
{ "ppc64", "prep", "-boot e", "Booting from device e" },
{ "ppc64", "40p", "-m 192", "Memory size: 192 MB" },
{ "ppc64", "mac99", "", "PowerPC,970FX" },
+#endif
{ "ppc64", "pseries", "", "Open Firmware" },
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "ppc64", "powernv", "-cpu POWER8", "OPAL" },
{ "ppc64", "sam460ex", "-device e1000", "8086 100e" },
+#endif
{ "i386", "isapc", "-cpu qemu32 -device sga", "SGABIOS" },
{ "i386", "pc", "-device sga", "SGABIOS" },
{ "i386", "q35", "-device sga", "SGABIOS" },
- { "x86_64", "isapc", "-cpu qemu32 -device sga", "SGABIOS" },
+ { "x86_64", "pc", "-cpu qemu32 -device sga", "SGABIOS" },
{ "x86_64", "q35", "-device sga", "SGABIOS" },
{ "sparc", "LX", "", "TMS390S10" },
{ "sparc", "SS-4", "", "MB86904" },
diff --git a/tests/cpu-plug-test.c b/tests/cpu-plug-test.c
index 5f39ba0..48b8d09 100644
--- a/tests/cpu-plug-test.c
+++ b/tests/cpu-plug-test.c
@@ -192,7 +192,8 @@ static void add_pseries_test_case(const char *mname)
PlugTestData *data;
if (!g_str_has_prefix(mname, "pseries-") ||
- (g_str_has_prefix(mname, "pseries-2.") && atoi(&mname[10]) < 7)) {
+ (g_str_has_prefix(mname, "pseries-2.") && atoi(&mname[10]) < 7) ||
+ strcmp(mname,"pseries-rhel7.2.0") == 0) {
return;
}
data = g_new(PlugTestData, 1);
diff --git a/tests/e1000-test.c b/tests/e1000-test.c
index 0c5fcdc..b830432 100644
--- a/tests/e1000-test.c
+++ b/tests/e1000-test.c
@@ -29,8 +29,10 @@ static void test_device(gconstpointer data)
static const char *models[] = {
"e1000",
"e1000-82540em",
+#if 0 /* Disabled for Red Hat Enterprise Linux */
"e1000-82544gc",
"e1000-82545em",
+#endif
};
int main(int argc, char **argv)
diff --git a/tests/endianness-test.c b/tests/endianness-test.c
index 546e096..440353d 100644
--- a/tests/endianness-test.c
+++ b/tests/endianness-test.c
@@ -37,10 +37,12 @@ static const TestCase test_cases[] = {
{ "ppc", "g3beige", 0xfe000000, .bswap = true, .superio = "i82378" },
{ "ppc", "prep", 0x80000000, .bswap = true },
{ "ppc", "bamboo", 0xe8000000, .bswap = true, .superio = "i82378" },
+#if 0 /* Disabled for RHEL, since ISA is not enabled */
{ "ppc64", "mac99", 0xf2000000, .bswap = true, .superio = "i82378" },
{ "ppc64", "pseries", (1ULL << 45), .bswap = true, .superio = "i82378" },
{ "ppc64", "pseries-2.7", 0x10080000000ULL,
.bswap = true, .superio = "i82378" },
+#endif /* Disabled for RHEL, since ISA is not enabled */
{ "sh4", "r2d", 0xfe240000, .superio = "i82378" },
{ "sh4eb", "r2d", 0xfe240000, .bswap = true, .superio = "i82378" },
{ "sparc64", "sun4u", 0x1fe02000000LL, .bswap = true },
diff --git a/tests/prom-env-test.c b/tests/prom-env-test.c
index 8c867e6..cc9b6ec 100644
--- a/tests/prom-env-test.c
+++ b/tests/prom-env-test.c
@@ -82,7 +82,9 @@ int main(int argc, char *argv[])
if (!strcmp(arch, "ppc")) {
add_tests(ppc_machines);
} else if (!strcmp(arch, "ppc64")) {
+#if 0 /* Disabled for Red Hat Enterprise Linux */
add_tests(ppc_machines);
+#endif
if (g_test_slow()) {
qtest_add_data_func("prom-env/pseries", "pseries", test_machine);
}
diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051
index ee9c820..c5cc0ee 100755
--- a/tests/qemu-iotests/051
+++ b/tests/qemu-iotests/051
@@ -183,11 +183,11 @@ run_qemu -drive if=virtio
case "$QEMU_DEFAULT_MACHINE" in
pc)
run_qemu -drive if=none,id=disk -device ide-cd,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-cd,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-cd,drive=disk
run_qemu -drive if=none,id=disk -device ide-drive,drive=disk
run_qemu -drive if=none,id=disk -device ide-hd,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-disk,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-hd,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-disk,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-hd,drive=disk
;;
*)
;;
@@ -212,11 +212,11 @@ run_qemu -drive file="$TEST_IMG",if=virtio,readonly=on
case "$QEMU_DEFAULT_MACHINE" in
pc)
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-cd,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-cd,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-cd,drive=disk
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-drive,drive=disk
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-hd,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-disk,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-hd,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-disk,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-hd,drive=disk
;;
*)
;;
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index b973dc8..f1059f6 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -77,7 +77,7 @@
068 rw auto quick
069 rw auto quick
070 rw auto quick
-071 rw auto quick
+# 071 rw auto quick -- requires whitelisted blkverify
072 rw auto quick
073 rw auto quick
074 rw auto quick
@@ -105,7 +105,7 @@
096 rw auto quick
097 rw auto backing
098 rw auto backing quick
-099 rw auto quick
+# 099 rw auto quick -- requires whitelisted blkverify
# 100 was removed, do not reuse
101 rw auto quick
102 rw auto quick
diff --git a/tests/qom-test.c b/tests/qom-test.c
index e6f712c..ebd15fd 100644
--- a/tests/qom-test.c
+++ b/tests/qom-test.c
@@ -16,7 +16,7 @@
#include "libqtest.h"
static const char *blacklist_x86[] = {
- "xenfv", "xenpv", NULL
+ "xenfv", "xenpv", "isapc", NULL
};
static const struct {
diff --git a/tests/test-x86-cpuid-compat.c b/tests/test-x86-cpuid-compat.c
index 84ce9c7..c1ee197 100644
--- a/tests/test-x86-cpuid-compat.c
+++ b/tests/test-x86-cpuid-compat.c
@@ -306,6 +306,7 @@ int main(int argc, char **argv)
"-cpu 486,xlevel2=0xC0000002,+xstore",
"xlevel2", 0xC0000002);
+#if 0 /* Disabled in Red Hat Enterprise Linux */
/* Check compatibility of old machine-types that didn't
* auto-increase level/xlevel/xlevel2: */
@@ -356,6 +357,7 @@ int main(int argc, char **argv)
add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
"-machine pc-i440fx-2.4 -cpu SandyBridge,+npt",
"xlevel", 0x80000008);
+#endif
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
diff --git a/tests/usb-hcd-xhci-test.c b/tests/usb-hcd-xhci-test.c
index 5b1b681..85fa150 100644
--- a/tests/usb-hcd-xhci-test.c
+++ b/tests/usb-hcd-xhci-test.c
@@ -21,6 +21,7 @@ static void test_xhci_hotplug(void)
usb_test_hotplug("xhci", 1, NULL);
}
+#if 0 /* Disabled for Red Hat Enterprise Linux */
static void test_usb_uas_hotplug(void)
{
qtest_qmp_device_add("usb-uas", "uas", NULL);
@@ -34,6 +35,7 @@ static void test_usb_uas_hotplug(void)
qtest_qmp_device_del("scsihd");
qtest_qmp_device_del("uas");
}
+#endif
static void test_usb_ccid_hotplug(void)
{
@@ -52,7 +54,9 @@ int main(int argc, char **argv)
qtest_add_func("/xhci/pci/init", test_xhci_init);
qtest_add_func("/xhci/pci/hotplug", test_xhci_hotplug);
+#if 0 /* Disabled for Red Hat Enterprise Linux */
qtest_add_func("/xhci/pci/hotplug/usb-uas", test_usb_uas_hotplug);
+#endif
qtest_add_func("/xhci/pci/hotplug/usb-ccid", test_usb_ccid_hotplug);
qtest_start("-device nec-usb-xhci,id=xhci"
--
1.8.3.1

View File

@ -0,0 +1,298 @@
From 51a0ce09fb01c87cb9bd7f1fca850e8d5d573f5f Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Fri, 19 Oct 2018 13:48:41 +0200
Subject: Enable make check
Fixing tests after device disabling and machine types changes and enabling
make check run during build.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
redhat/qemu-kvm.spec.template | 2 +-
tests/Makefile.include | 32 ++++++++++++++++----------------
tests/boot-serial-test.c | 6 +++++-
tests/cpu-plug-test.c | 3 ++-
tests/e1000-test.c | 2 ++
tests/prom-env-test.c | 2 ++
tests/qemu-iotests/051 | 12 ++++++------
tests/qemu-iotests/group | 4 ++--
tests/test-x86-cpuid-compat.c | 2 ++
tests/usb-hcd-xhci-test.c | 4 ++++
10 files changed, 42 insertions(+), 27 deletions(-)
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 613242b..baeb608 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -171,20 +171,20 @@ check-qtest-pci-$(CONFIG_IPACK) += $(check-qtest-ipack-y)
check-qtest-pci-y += tests/display-vga-test$(EXESUF)
check-qtest-pci-$(CONFIG_HDA) += tests/intel-hda-test$(EXESUF)
check-qtest-pci-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF)
-check-qtest-pci-y += tests/megasas-test$(EXESUF)
+#check-qtest-pci-y += tests/megasas-test$(EXESUF)
check-qtest-i386-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
-check-qtest-i386-y += tests/fdc-test$(EXESUF)
+#check-qtest-i386-y += tests/fdc-test$(EXESUF)
check-qtest-i386-y += tests/ide-test$(EXESUF)
check-qtest-i386-y += tests/ahci-test$(EXESUF)
check-qtest-i386-y += tests/hd-geo-test$(EXESUF)
check-qtest-i386-y += tests/boot-order-test$(EXESUF)
-check-qtest-i386-y += tests/bios-tables-test$(EXESUF)
+#check-qtest-i386-y += tests/bios-tables-test$(EXESUF)
check-qtest-i386-$(CONFIG_SGA) += tests/boot-serial-test$(EXESUF)
check-qtest-i386-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF)
check-qtest-i386-y += tests/rtc-test$(EXESUF)
-check-qtest-i386-y += tests/ipmi-kcs-test$(EXESUF)
-check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
+#check-qtest-i386-y += tests/ipmi-kcs-test$(EXESUF)
+#check-qtest-i386-y += tests/ipmi-bt-test$(EXESUF)
check-qtest-i386-y += tests/i440fx-test$(EXESUF)
check-qtest-i386-y += tests/fw_cfg-test$(EXESUF)
check-qtest-i386-y += tests/drive_del-test$(EXESUF)
@@ -238,15 +238,15 @@ check-qtest-mips64el-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
check-qtest-moxie-y += tests/boot-serial-test$(EXESUF)
check-qtest-ppc-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
-check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
+#check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
check-qtest-ppc-y += tests/prom-env-test$(EXESUF)
check-qtest-ppc-y += tests/drive_del-test$(EXESUF)
check-qtest-ppc-y += tests/boot-serial-test$(EXESUF)
-check-qtest-ppc-y += tests/m48t59-test$(EXESUF)
+#check-qtest-ppc-y += tests/m48t59-test$(EXESUF)
check-qtest-ppc64-y += $(check-qtest-ppc-y)
check-qtest-ppc64-y += tests/spapr-phb-test$(EXESUF)
-check-qtest-ppc64-y += tests/pnv-xscom-test$(EXESUF)
+#check-qtest-ppc64-y += tests/pnv-xscom-test$(EXESUF)
check-qtest-ppc64-y += tests/migration-test$(EXESUF)
check-qtest-ppc64-y += tests/rtas-test$(EXESUF)
check-qtest-ppc64-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF)
@@ -254,8 +254,8 @@ check-qtest-ppc64-$(CONFIG_USB_OHCI) += tests/usb-hcd-ohci-test$(EXESUF)
check-qtest-ppc64-$(CONFIG_USB_UHCI) += tests/usb-hcd-uhci-test$(EXESUF)
check-qtest-ppc64-$(CONFIG_USB_XHCI_NEC) += tests/usb-hcd-xhci-test$(EXESUF)
check-qtest-ppc64-y += $(check-qtest-virtio-y)
-check-qtest-ppc64-$(CONFIG_SLIRP) += tests/test-netfilter$(EXESUF)
-check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-mirror$(EXESUF)
+#check-qtest-ppc64-$(CONFIG_SLIRP) += tests/test-netfilter$(EXESUF)
+#check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-mirror$(EXESUF)
check-qtest-ppc64-$(CONFIG_RTL8139_PCI) += tests/test-filter-redirector$(EXESUF)
check-qtest-ppc64-y += tests/display-vga-test$(EXESUF)
check-qtest-ppc64-y += tests/numa-test$(EXESUF)
@@ -685,15 +685,15 @@ tests/endianness-test$(EXESUF): tests/endianness-test.o
tests/spapr-phb-test$(EXESUF): tests/spapr-phb-test.o $(libqos-obj-y)
tests/prom-env-test$(EXESUF): tests/prom-env-test.o $(libqos-obj-y)
tests/rtas-test$(EXESUF): tests/rtas-test.o $(libqos-spapr-obj-y)
-tests/fdc-test$(EXESUF): tests/fdc-test.o
+#tests/fdc-test$(EXESUF): tests/fdc-test.o
tests/ide-test$(EXESUF): tests/ide-test.o $(libqos-pc-obj-y)
tests/ahci-test$(EXESUF): tests/ahci-test.o $(libqos-pc-obj-y)
-tests/ipmi-kcs-test$(EXESUF): tests/ipmi-kcs-test.o
-tests/ipmi-bt-test$(EXESUF): tests/ipmi-bt-test.o
+#tests/ipmi-kcs-test$(EXESUF): tests/ipmi-kcs-test.o
+#tests/ipmi-bt-test$(EXESUF): tests/ipmi-bt-test.o
tests/hd-geo-test$(EXESUF): tests/hd-geo-test.o
tests/boot-order-test$(EXESUF): tests/boot-order-test.o $(libqos-obj-y)
tests/boot-serial-test$(EXESUF): tests/boot-serial-test.o $(libqos-obj-y)
-tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o \
+#tests/bios-tables-test$(EXESUF): tests/bios-tables-test.o \
tests/boot-sector.o tests/acpi-utils.o $(libqos-obj-y)
tests/pxe-test$(EXESUF): tests/pxe-test.o tests/boot-sector.o $(libqos-obj-y)
tests/tmp105-test$(EXESUF): tests/tmp105-test.o $(libqos-omap-obj-y)
@@ -707,7 +707,7 @@ tests/e1000-test$(EXESUF): tests/e1000-test.o
tests/e1000e-test$(EXESUF): tests/e1000e-test.o $(libqos-pc-obj-y)
tests/rtl8139-test$(EXESUF): tests/rtl8139-test.o $(libqos-pc-obj-y)
tests/pcnet-test$(EXESUF): tests/pcnet-test.o
-tests/pnv-xscom-test$(EXESUF): tests/pnv-xscom-test.o
+#tests/pnv-xscom-test$(EXESUF): tests/pnv-xscom-test.o
tests/eepro100-test$(EXESUF): tests/eepro100-test.o
tests/vmxnet3-test$(EXESUF): tests/vmxnet3-test.o
tests/ne2000-test$(EXESUF): tests/ne2000-test.o
@@ -755,7 +755,7 @@ tests/test-filter-mirror$(EXESUF): tests/test-filter-mirror.o $(qtest-obj-y)
tests/test-filter-redirector$(EXESUF): tests/test-filter-redirector.o $(qtest-obj-y)
tests/test-x86-cpuid-compat$(EXESUF): tests/test-x86-cpuid-compat.o $(qtest-obj-y)
tests/ivshmem-test$(EXESUF): tests/ivshmem-test.o contrib/ivshmem-server/ivshmem-server.o $(libqos-pc-obj-y) $(libqos-spapr-obj-y)
-tests/megasas-test$(EXESUF): tests/megasas-test.o $(libqos-spapr-obj-y) $(libqos-pc-obj-y)
+#tests/megasas-test$(EXESUF): tests/megasas-test.o $(libqos-spapr-obj-y) $(libqos-pc-obj-y)
tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o $(test-util-obj-y) libvhost-user.a
tests/test-uuid$(EXESUF): tests/test-uuid.o $(test-util-obj-y)
tests/test-arm-mptimer$(EXESUF): tests/test-arm-mptimer.o
diff --git a/tests/boot-serial-test.c b/tests/boot-serial-test.c
index 8ec6aed..6a533b9 100644
--- a/tests/boot-serial-test.c
+++ b/tests/boot-serial-test.c
@@ -97,16 +97,20 @@ static testdef_t tests[] = {
{ "ppc", "g3beige", "", "PowerPC,750" },
{ "ppc", "mac99", "", "PowerPC,G4" },
{ "ppc", "sam460ex", "-m 256", "DRAM: 256 MiB" },
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "ppc64", "ppce500", "", "U-Boot" },
{ "ppc64", "40p", "-m 192", "Memory: 192M" },
{ "ppc64", "mac99", "", "PowerPC,970FX" },
+#endif
{ "ppc64", "pseries", "", "Open Firmware" },
+#if 0 /* Disabled for Red Hat Enterprise Linux */
{ "ppc64", "powernv", "-cpu POWER8", "OPAL" },
{ "ppc64", "sam460ex", "-device e1000", "8086 100e" },
+#endif
{ "i386", "isapc", "-cpu qemu32 -device sga", "SGABIOS" },
{ "i386", "pc", "-device sga", "SGABIOS" },
{ "i386", "q35", "-device sga", "SGABIOS" },
- { "x86_64", "isapc", "-cpu qemu32 -device sga", "SGABIOS" },
+ { "x86_64", "pc", "-cpu qemu32 -device sga", "SGABIOS" },
{ "x86_64", "q35", "-device sga", "SGABIOS" },
{ "sparc", "LX", "", "TMS390S10" },
{ "sparc", "SS-4", "", "MB86904" },
diff --git a/tests/cpu-plug-test.c b/tests/cpu-plug-test.c
index f4a677d..70a3d1d 100644
--- a/tests/cpu-plug-test.c
+++ b/tests/cpu-plug-test.c
@@ -193,7 +193,8 @@ static void add_pseries_test_case(const char *mname)
PlugTestData *data;
if (!g_str_has_prefix(mname, "pseries-") ||
- (g_str_has_prefix(mname, "pseries-2.") && atoi(&mname[10]) < 7)) {
+ (g_str_has_prefix(mname, "pseries-2.") && atoi(&mname[10]) < 7) ||
+ strcmp(mname,"pseries-rhel7.2.0") == 0) {
return;
}
data = g_new(PlugTestData, 1);
diff --git a/tests/e1000-test.c b/tests/e1000-test.c
index 0c5fcdc..b830432 100644
--- a/tests/e1000-test.c
+++ b/tests/e1000-test.c
@@ -29,8 +29,10 @@ static void test_device(gconstpointer data)
static const char *models[] = {
"e1000",
"e1000-82540em",
+#if 0 /* Disabled for Red Hat Enterprise Linux */
"e1000-82544gc",
"e1000-82545em",
+#endif
};
int main(int argc, char **argv)
diff --git a/tests/prom-env-test.c b/tests/prom-env-test.c
index 198d007..4bea07f 100644
--- a/tests/prom-env-test.c
+++ b/tests/prom-env-test.c
@@ -82,7 +82,9 @@ int main(int argc, char *argv[])
if (!strcmp(arch, "ppc")) {
add_tests(ppc_machines);
} else if (!strcmp(arch, "ppc64")) {
+#if 0 /* Disabled for Red Hat Enterprise Linux */
add_tests(ppc_machines);
+#endif
if (g_test_slow()) {
qtest_add_data_func("prom-env/pseries", "pseries", test_machine);
}
diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051
index 32741d7..85ef52e 100755
--- a/tests/qemu-iotests/051
+++ b/tests/qemu-iotests/051
@@ -182,11 +182,11 @@ run_qemu -drive if=virtio
case "$QEMU_DEFAULT_MACHINE" in
pc)
run_qemu -drive if=none,id=disk -device ide-cd,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-cd,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-cd,drive=disk
run_qemu -drive if=none,id=disk -device ide-drive,drive=disk
run_qemu -drive if=none,id=disk -device ide-hd,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-disk,drive=disk
- run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-hd,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-disk,drive=disk
+# run_qemu -drive if=none,id=disk -device lsi53c895a -device scsi-hd,drive=disk
;;
*)
;;
@@ -211,11 +211,11 @@ run_qemu -drive file="$TEST_IMG",if=virtio,readonly=on
case "$QEMU_DEFAULT_MACHINE" in
pc)
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-cd,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-cd,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-cd,drive=disk
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-drive,drive=disk
run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device ide-hd,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-disk,drive=disk
- run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-hd,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-disk,drive=disk
+# run_qemu -drive file="$TEST_IMG",if=none,id=disk,readonly=on -device lsi53c895a -device scsi-hd,drive=disk
;;
*)
;;
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index 2722103..ede8887 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -77,7 +77,7 @@
068 rw auto quick
069 rw auto quick
070 rw auto quick
-071 rw auto quick
+# 071 rw auto quick -- requires whitelisted blkverify
072 rw auto quick
073 rw auto quick
074 rw auto quick
@@ -105,7 +105,7 @@
096 rw auto quick
097 rw auto backing
098 rw auto backing quick
-099 rw auto quick
+# 099 rw auto quick -- requires whitelisted blkverify
# 100 was removed, do not reuse
101 rw auto quick
102 rw auto quick
diff --git a/tests/test-x86-cpuid-compat.c b/tests/test-x86-cpuid-compat.c
index e75b959..6b46b73 100644
--- a/tests/test-x86-cpuid-compat.c
+++ b/tests/test-x86-cpuid-compat.c
@@ -300,6 +300,7 @@ int main(int argc, char **argv)
"-cpu 486,xlevel2=0xC0000002,+xstore",
"xlevel2", 0xC0000002);
+#if 0 /* Disabled in Red Hat Enterprise Linux */
/* Check compatibility of old machine-types that didn't
* auto-increase level/xlevel/xlevel2: */
@@ -350,6 +351,7 @@ int main(int argc, char **argv)
add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
"-machine pc-i440fx-2.4 -cpu SandyBridge,+npt",
"xlevel", 0x80000008);
+#endif
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
diff --git a/tests/usb-hcd-xhci-test.c b/tests/usb-hcd-xhci-test.c
index 9eb24b0..465ed26 100644
--- a/tests/usb-hcd-xhci-test.c
+++ b/tests/usb-hcd-xhci-test.c
@@ -21,6 +21,7 @@ static void test_xhci_hotplug(void)
usb_test_hotplug("xhci", "1", NULL);
}
+#if 0 /* Disabled for Red Hat Enterprise Linux */
static void test_usb_uas_hotplug(void)
{
qtest_qmp_device_add("usb-uas", "uas", "{}");
@@ -34,6 +35,7 @@ static void test_usb_uas_hotplug(void)
qtest_qmp_device_del("scsihd");
qtest_qmp_device_del("uas");
}
+#endif
static void test_usb_ccid_hotplug(void)
{
@@ -52,7 +54,9 @@ int main(int argc, char **argv)
qtest_add_func("/xhci/pci/init", test_xhci_init);
qtest_add_func("/xhci/pci/hotplug", test_xhci_hotplug);
+#if 0 /* Disabled for Red Hat Enterprise Linux */
qtest_add_func("/xhci/pci/hotplug/usb-uas", test_usb_uas_hotplug);
+#endif
qtest_add_func("/xhci/pci/hotplug/usb-ccid", test_usb_ccid_hotplug);
qtest_start("-device nec-usb-xhci,id=xhci"
--
1.8.3.1

View File

@ -1,4 +1,4 @@
From ce4cd21e28e1511e056877e3cc8dcf6f0b8c7baa Mon Sep 17 00:00:00 2001
From b91ee13e30cef65d02e3e0f9324931f1e2589426 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 18 Dec 2014 06:27:49 +0100
Subject: Use kvm by default
@ -8,6 +8,15 @@ Bugzilla: 906185
RHEL uses kvm accelerator by default, if available.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Rebase notes (2.10.0)
- variable rename (upstream)
Rebase notes (2.2.0):
- Move code from vl.c to accel.c
(cherry picked from commit abcd662eb8e516ebe4a6b401e83a62f749491a15)
(cherry picked from commit eca6d5766d956c37e3f7f28d70903d357308c846)
---
accel/accel.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

View File

@ -1,4 +1,4 @@
From 43a09e06e76cba94c6ecd448f51912362b42f94d Mon Sep 17 00:00:00 2001
From 3094b4ac400f54f26b837226f44fc0a18f0726e6 Mon Sep 17 00:00:00 2001
From: Bandan Das <bsd@redhat.com>
Date: Tue, 3 Dec 2013 20:05:13 +0100
Subject: vfio: cap number of devices that can be assigned
@ -23,12 +23,21 @@ matches the number of slots on a PCI bus and is also a nice power
of two.
Signed-off-by: Bandan Das <bsd@redhat.com>
Rebase notes (2.8.0):
- removed return value for vfio_realize (commit 1a22aca)
Merged patches (2.9.0):
- 17eb774 vfio: Use error_setg when reporting max assigned device overshoot
(cherry picked from commit 9fa3c9fc6dfcde76d80db1aa601b2d577f72ceec)
(cherry picked from commit 3cb35556dc7d994f203d732fe952f95fcdb03c0a)
---
hw/vfio/pci.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 6cbb8fa..59b3c0f 100644
index 5c7bd96..598f771 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -36,6 +36,7 @@
@ -37,15 +46,16 @@ index 6cbb8fa..59b3c0f 100644
#define MSIX_CAP_LENGTH 12
+#define MAX_DEV_ASSIGN_CMDLINE 32
static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
@@ -2809,7 +2810,19 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
#define TYPE_VFIO_PCI "vfio-pci"
#define PCI_VFIO(obj) OBJECT_CHECK(VFIOPCIDevice, obj, TYPE_VFIO_PCI)
@@ -2811,9 +2812,21 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
ssize_t len;
struct stat st;
int groupid;
- int i, ret;
+ int ret, i = 0;
+
bool is_mdev;
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ i++;
@ -57,9 +67,10 @@ index 6cbb8fa..59b3c0f 100644
+ "already attached", MAX_DEV_ASSIGN_CMDLINE);
+ return;
+ }
+
if (!vdev->vbasedev.sysfsdev) {
if (!(~vdev->host.domain || ~vdev->host.bus ||
~vdev->host.slot || ~vdev->host.function)) {
--
1.8.3.1

View File

@ -1,4 +1,4 @@
From f8e7911bb97eb942a4eadad1731b7c59c43fd2eb Mon Sep 17 00:00:00 2001
From 2bfcbb3ece3cda4cf977cb3983df84830bde90a3 Mon Sep 17 00:00:00 2001
From: Eduardo Habkost <ehabkost@redhat.com>
Date: Wed, 4 Dec 2013 18:53:17 +0100
Subject: Add support statement to -help output
@ -16,15 +16,17 @@ Add support statement to -help output, reporting direct qemu-kvm usage
as unsupported by Red Hat, and advising users to use libvirt instead.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 2a07700936e39856cc9f149c6a6517f0715536a6)
(cherry picked from commit 5dd2f4706e2fef945771949e59a8fcc1b5452de9)
---
vl.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/vl.c b/vl.c
index 4f96203..43c4b78 100644
index 39d152a..db628b8 100644
--- a/vl.c
+++ b/vl.c
@@ -1876,9 +1876,17 @@ static void version(void)
@@ -1904,9 +1904,17 @@ static void version(void)
QEMU_COPYRIGHT "\n");
}
@ -42,7 +44,7 @@ index 4f96203..43c4b78 100644
printf("usage: %s [options] [disk_image]\n\n"
"'disk_image' is a raw hard disk image for IDE hard disk 0\n\n",
error_get_progname());
@@ -1895,6 +1903,7 @@ static void help(int exitcode)
@@ -1923,6 +1931,7 @@ static void help(int exitcode)
"\n"
QEMU_HELP_BOTTOM "\n");

View File

@ -1,89 +0,0 @@
From 8413778453742aeb3ad6b38d5f4440a0dbabca7d Mon Sep 17 00:00:00 2001
From: Andrew Jones <drjones@redhat.com>
Date: Tue, 21 Jan 2014 10:46:52 +0100
Subject: globally limit the maximum number of CPUs
We now globally limit the number of VCPUs.
Especially, there is no way one can specify more than
max_cpus VCPUs for a VM.
This allows us the restore the ppc max_cpus limitation to the upstream
default and minimize the ppc hack in kvm-all.c.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Signed-off-by: Danilo Cesar Lemes de Paula <ddepaula@redhat.com>
---
accel/kvm/kvm-all.c | 12 ++++++++++++
vl.c | 18 ++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index eb7db92..c2e7095 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -1586,6 +1586,18 @@ static int kvm_init(MachineState *ms)
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
+#ifdef HOST_PPC64
+ /*
+ * On POWER, the kernel advertises a soft limit based on the
+ * number of CPU threads on the host. We want to allow exceeding
+ * this for testing purposes, so we don't want to set hard limit
+ * to soft limit as on x86.
+ */
+#else
+ /* RHEL doesn't support nr_vcpus > soft_vcpus_limit */
+ hard_vcpus_limit = soft_vcpus_limit;
+#endif
+
while (nc->name) {
if (nc->num > soft_vcpus_limit) {
warn_report("Number of %s cpus requested (%d) exceeds "
diff --git a/vl.c b/vl.c
index 43c4b78..b50dbe4 100644
--- a/vl.c
+++ b/vl.c
@@ -133,6 +133,8 @@ int main(int argc, char **argv)
#define MAX_VIRTIO_CONSOLES 1
+#define RHEL_MAX_CPUS 384
+
static const char *data_dir[16];
static int data_dir_idx;
const char *bios_name = NULL;
@@ -1430,6 +1432,20 @@ MachineClass *find_default_machine(void)
return mc;
}
+/* Maximum number of CPUs limited for Red Hat Enterprise Linux */
+static void limit_max_cpus_in_machines(void)
+{
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
+
+ for (el = machines; el; el = el->next) {
+ MachineClass *mc = el->data;
+
+ if (mc->max_cpus > RHEL_MAX_CPUS) {
+ mc->max_cpus = RHEL_MAX_CPUS;
+ }
+ }
+}
+
MachineInfoList *qmp_query_machines(Error **errp)
{
GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
@@ -3993,6 +4009,8 @@ int main(int argc, char **argv, char **envp)
"mutually exclusive");
exit(EXIT_FAILURE);
}
+ /* Maximum number of CPUs limited for Red Hat Enterprise Linux */
+ limit_max_cpus_in_machines();
machine_class = select_machine();
--
1.8.3.1

View File

@ -0,0 +1,153 @@
From c214bfc318a3128dc92fe5017ca0dd54fc50ffed Mon Sep 17 00:00:00 2001
From: Andrew Jones <drjones@redhat.com>
Date: Tue, 21 Jan 2014 10:46:52 +0100
Subject: globally limit the maximum number of CPUs
We now globally limit the number of VCPUs.
Especially, there is no way one can specify more than
max_cpus VCPUs for a VM.
This allows us the restore the ppc max_cpus limitation to the upstream
default and minimize the ppc hack in kvm-all.c.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Signed-off-by: Danilo Cesar Lemes de Paula <ddepaula@redhat.com>
Rebase notes (2.11.0):
- Removed CONFIG_RHV reference
- Update commit log
Merged patches (2.11.0):
- 92fef14623 redhat: remove manual max_cpus limitations for ppc
- bb722e9eff redhat: globally limit the maximum number of CPUs
- fdeef3c1c7 RHEL: Set vcpus hard limit to 240 for Power
- 0584216921 Match POWER max cpus to x86
Signed-off-by: Andrew Jones <drjones@redhat.com>
(cherry picked from commit a4ceb63bdc5cbac19f5f633ec761b9de0dedb55e)
(cherry picked from commit a1f26d85171b4d554225150053700e93ba6eba10)
redhat: globally limit the maximum number of CPUs
RH-Author: David Hildenbrand <david@redhat.com>
Message-id: <20180109103253.24517-2-david@redhat.com>
Patchwork-id: 78531
O-Subject: [RHEL-7.5 qemu-kvm-ma PATCH v2 1/2] redhat: globally limit the maximum number of CPUs
Bugzilla: 1527449
RH-Acked-by: David Gibson <dgibson@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
Upstream-status: n/a
For RHEL, we support 240, for RHV up to 384 VCPUs. Let's limit this
globally instead of fixing up all machines. This way, we can easily
change (increase) the product specific levels later.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
redhat: remove manual max_cpus limitations for ppc
RH-Author: David Hildenbrand <david@redhat.com>
Message-id: <20180109103253.24517-3-david@redhat.com>
Patchwork-id: 78532
O-Subject: [RHEL-7.5 qemu-kvm-ma PATCH v2 2/2] redhat: remove manual max_cpus limitations for ppc
Bugzilla: 1527449
RH-Acked-by: David Gibson <dgibson@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
Upstream-status: n/a
RH-Author: Andrew Jones <drjones@redhat.com>
Message-id: <1390301212-15344-1-git-send-email-drjones@redhat.com>
Patchwork-id: 56862
O-Subject: [RHEL7.0 qemu-kvm PATCH v6] use recommended max vcpu count
Bugzilla: 998708
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
RH-Acked-by: Marcelo Tosatti <mtosatti@redhat.com>
The recommended vcpu max limit (KVM_CAP_NR_VCPUS) should be used instead
of the actual max vcpu limit (KVM_CAP_MAX_VCPUS) to give an error.
This commit matches the limit to current KVM_CAP_NR_VCPUS value.
Conflicts:
vl.c
---
accel/kvm/kvm-all.c | 12 ++++++++++++
vl.c | 18 ++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 4880a05..a8f5d47 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -1627,6 +1627,18 @@ static int kvm_init(MachineState *ms)
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
+#ifdef HOST_PPC64
+ /*
+ * On POWER, the kernel advertises a soft limit based on the
+ * number of CPU threads on the host. We want to allow exceeding
+ * this for testing purposes, so we don't want to set hard limit
+ * to soft limit as on x86.
+ */
+#else
+ /* RHEL doesn't support nr_vcpus > soft_vcpus_limit */
+ hard_vcpus_limit = soft_vcpus_limit;
+#endif
+
while (nc->name) {
if (nc->num > soft_vcpus_limit) {
warn_report("Number of %s cpus requested (%d) exceeds "
diff --git a/vl.c b/vl.c
index db628b8..7b0f19a 100644
--- a/vl.c
+++ b/vl.c
@@ -133,6 +133,8 @@ int main(int argc, char **argv)
#define MAX_VIRTIO_CONSOLES 1
+#define RHEL_MAX_CPUS 384
+
static const char *data_dir[16];
static int data_dir_idx;
const char *bios_name = NULL;
@@ -1460,6 +1462,20 @@ MachineClass *find_default_machine(void)
return mc;
}
+/* Maximum number of CPUs limited for Red Hat Enterprise Linux */
+static void limit_max_cpus_in_machines(void)
+{
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
+
+ for (el = machines; el; el = el->next) {
+ MachineClass *mc = el->data;
+
+ if (mc->max_cpus > RHEL_MAX_CPUS) {
+ mc->max_cpus = RHEL_MAX_CPUS;
+ }
+ }
+}
+
MachineInfoList *qmp_query_machines(Error **errp)
{
GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
@@ -4012,6 +4028,8 @@ int main(int argc, char **argv, char **envp)
"mutually exclusive");
exit(EXIT_FAILURE);
}
+ /* Maximum number of CPUs limited for Red Hat Enterprise Linux */
+ limit_max_cpus_in_machines();
configure_rtc(qemu_find_opts_singleton("rtc"));
--
1.8.3.1

View File

@ -1,4 +1,4 @@
From f262acdee88f36b625fcbd5eb1cd66739428cca3 Mon Sep 17 00:00:00 2001
From 30887ffc7e908ebed5381c08181cd6a2a6bc5e98 Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Thu, 8 Oct 2015 09:50:17 +0200
Subject: Add support for simpletrace
@ -6,6 +6,23 @@ Subject: Add support for simpletrace
As simpletrace is upstream, we just need to properly handle it during rpmbuild.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
Rebase notes (weekly-180727):
- Fixed python 2 to python3 switch
Rebase notes (2.9.0):
- Added group argument for tracetool.py (upstream)
Rebase notes (2.8.0):
- Changed tracetool.py parameters
Merged patches (2.3.0):
- db959d6 redhat/qemu-kvm.spec.template: Install qemu-kvm-simpletrace.stp
- 5292fc3 trace: add SystemTap init scripts for simpletrace bridge
- eda9e5e simpletrace: install simpletrace.py
- 85c4c8f trace: add systemtap-initscript README file to RPM
(cherry picked from commit bfc1d7f3628f2ffbabbae71d57a506cea6663ddf)
---
.gitignore | 2 ++
Makefile | 4 +++
@ -19,10 +36,10 @@ Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
create mode 100644 scripts/systemtap/script.d/qemu_kvm.stp
diff --git a/Makefile b/Makefile
index eb4c57a..6b6d3f6 100644
index 152821a..8710720 100644
--- a/Makefile
+++ b/Makefile
@@ -880,6 +880,10 @@ endif
@@ -892,6 +892,10 @@ endif
$(INSTALL_DATA) $(SRC_PATH)/pc-bios/keymaps/$$x "$(DESTDIR)$(qemu_datadir)/keymaps"; \
done
$(INSTALL_DATA) $(BUILD_DIR)/trace-events-all "$(DESTDIR)$(qemu_datadir)/trace-events-all"

View File

@ -1,4 +1,4 @@
From 33e2c01c1b0b64a76d5193b60378d2329a86626b Mon Sep 17 00:00:00 2001
From d0656d8b2e0de42d04c224db36fe9c1ec015a9cc Mon Sep 17 00:00:00 2001
From: Miroslav Rezanina <mrezanin@redhat.com>
Date: Fri, 14 Nov 2014 08:51:50 +0100
Subject: Use qemu-kvm in documentation instead of qemu-system-<arch>
@ -17,15 +17,39 @@ to reflect this change. Only architectures available in RHEL are updated.
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
docs/COLO-FT.txt | 4 +-
docs/can.txt | 8 +--
docs/pr-manager.rst | 4 +-
docs/qemu-block-drivers.texi | 70 ++++++++++-----------
docs/qemu-block-drivers.texi | 70 +++++++++++-----------
docs/qemu-cpu-models.texi | 8 +--
docs/replay.txt | 4 +-
docs/specs/tpm.txt | 8 +--
qemu-doc.texi | 70 ++++++++++-----------
qemu-options.hx | 144 ++++++++++++++++++++++---------------------
7 files changed, 156 insertions(+), 152 deletions(-)
qemu-doc.texi | 70 +++++++++++-----------
qemu-options.hx | 140 ++++++++++++++++++++++---------------------
9 files changed, 160 insertions(+), 156 deletions(-)
diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt
index e2686bb..8c48f86 100644
--- a/docs/COLO-FT.txt
+++ b/docs/COLO-FT.txt
@@ -147,7 +147,7 @@ in test procedure.
== Test procedure ==
1. Startup qemu
Primary:
-# qemu-system-x86_64 -accel kvm -m 2048 -smp 2 -qmp stdio -name primary \
+# qemu-kvm -accel kvm -m 2048 -smp 2 -qmp stdio -name primary \
-device piix3-usb-uhci -vnc :7 \
-device usb-tablet -netdev tap,id=hn0,vhost=off \
-device virtio-net-pci,id=net-pci0,netdev=hn0 \
@@ -155,7 +155,7 @@ Primary:
children.0.file.filename=1.raw,\
children.0.driver=raw -S
Secondary:
-# qemu-system-x86_64 -accel kvm -m 2048 -smp 2 -qmp stdio -name secondary \
+# qemu-kvm -accel kvm -m 2048 -smp 2 -qmp stdio -name secondary \
-device piix3-usb-uhci -vnc :7 \
-device usb-tablet -netdev tap,id=hn0,vhost=off \
-device virtio-net-pci,id=net-pci0,netdev=hn0 \
diff --git a/docs/can.txt b/docs/can.txt
index 7ba23b2..4ae5690 100644
--- a/docs/can.txt
@ -287,8 +311,42 @@ index 38e9f34..2e71ec9 100644
@end example
@var{host}:@var{bus}:@var{slot}.@var{func} is the NVMe controller's PCI device
diff --git a/docs/qemu-cpu-models.texi b/docs/qemu-cpu-models.texi
index 1935f98..93672de 100644
--- a/docs/qemu-cpu-models.texi
+++ b/docs/qemu-cpu-models.texi
@@ -387,25 +387,25 @@ CPU models / features in QEMU and libvirt
@item Host passthrough
@example
- $ qemu-system-x86_64 -cpu host
+ $ qemu-kvm -cpu host
@end example
With feature customization:
@example
- $ qemu-system-x86_64 -cpu host,-vmx,...
+ $ qemu-kvm -cpu host,-vmx,...
@end example
@item Named CPU models
@example
- $ qemu-system-x86_64 -cpu Westmere
+ $ qemu-kvm -cpu Westmere
@end example
With feature customization:
@example
- $ qemu-system-x86_64 -cpu Westmere,+pcid,...
+ $ qemu-kvm -cpu Westmere,+pcid,...
@end example
@end table
diff --git a/docs/replay.txt b/docs/replay.txt
index 2e21e9c..f1923e8 100644
index 3497585..8d67ea7 100644
--- a/docs/replay.txt
+++ b/docs/replay.txt
@@ -25,7 +25,7 @@ Deterministic replay has the following features:
@ -310,10 +368,10 @@ index 2e21e9c..f1923e8 100644
-drive file=disk.qcow2,if=none,id=img-direct \
-drive driver=blkreplay,if=none,image=img-direct,id=img-blkreplay \
diff --git a/docs/specs/tpm.txt b/docs/specs/tpm.txt
index 0e9bbeb..9320fbd 100644
index 1af82bb..b2741ac 100644
--- a/docs/specs/tpm.txt
+++ b/docs/specs/tpm.txt
@@ -98,7 +98,7 @@ QEMU files related to the TPM passthrough device:
@@ -113,7 +113,7 @@ QEMU files related to the TPM passthrough device:
Command line to start QEMU with the TPM passthrough device using the host's
hardware TPM /dev/tpm0:
@ -322,7 +380,7 @@ index 0e9bbeb..9320fbd 100644
-m 1024 -boot d -bios bios-256k.bin -boot menu=on \
-tpmdev passthrough,id=tpm0,path=/dev/tpm0 \
-device tpm-tis,tpmdev=tpm0 test.img
@@ -164,7 +164,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
@@ -179,7 +179,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
Command line to start QEMU with the TPM emulator device communicating with
the swtpm:
@ -331,7 +389,7 @@ index 0e9bbeb..9320fbd 100644
-m 1024 -boot d -bios bios-256k.bin -boot menu=on \
-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \
-tpmdev emulator,id=tpm0,chardev=chrtpm \
@@ -222,7 +222,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
@@ -237,7 +237,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
In a 2nd terminal start the VM:
@ -340,7 +398,7 @@ index 0e9bbeb..9320fbd 100644
-m 1024 -boot d -bios bios-256k.bin -boot menu=on \
-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \
-tpmdev emulator,id=tpm0,chardev=chrtpm \
@@ -255,7 +255,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
@@ -270,7 +270,7 @@ swtpm socket --tpmstate dir=/tmp/mytpm1 \
In the 2nd terminal restore the state of the VM using the additional
'-incoming' option.
@ -350,10 +408,10 @@ index 0e9bbeb..9320fbd 100644
-chardev socket,id=chrtpm,path=/tmp/mytpm1/swtpm-sock \
-tpmdev emulator,id=tpm0,chardev=chrtpm \
diff --git a/qemu-doc.texi b/qemu-doc.texi
index abfd2db..5827bed 100644
index f7ad1df..0e4b34a 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -204,12 +204,12 @@ Note that, by default, GUS shares IRQ(7) with parallel ports and so
@@ -205,12 +205,12 @@ Note that, by default, GUS shares IRQ(7) with parallel ports and so
QEMU must be told to not have parallel ports to have working GUS.
@example
@ -368,7 +426,7 @@ index abfd2db..5827bed 100644
@end example
Or some other unclaimed IRQ.
@@ -225,7 +225,7 @@ CS4231A is the chip used in Windows Sound System and GUSMAX products
@@ -226,7 +226,7 @@ CS4231A is the chip used in Windows Sound System and GUSMAX products
Download and uncompress the linux image (@file{linux.img}) and type:
@example
@ -377,7 +435,7 @@ index abfd2db..5827bed 100644
@end example
Linux should boot and give you a prompt.
@@ -235,7 +235,7 @@ Linux should boot and give you a prompt.
@@ -236,7 +236,7 @@ Linux should boot and give you a prompt.
@example
@c man begin SYNOPSIS
@ -386,7 +444,7 @@ index abfd2db..5827bed 100644
@c man end
@end example
@@ -275,21 +275,21 @@ is specified in seconds. The default is 0 which means no timeout. Libiscsi
@@ -276,21 +276,21 @@ is specified in seconds. The default is 0 which means no timeout. Libiscsi
Example (without authentication):
@example
@ -411,7 +469,7 @@ index abfd2db..5827bed 100644
@end example
@item NBD
@@ -304,12 +304,12 @@ Syntax for specifying a NBD device using Unix Domain Sockets
@@ -305,12 +305,12 @@ Syntax for specifying a NBD device using Unix Domain Sockets
Example for TCP
@example
@ -426,7 +484,7 @@ index abfd2db..5827bed 100644
@end example
@item SSH
@@ -317,8 +317,8 @@ QEMU supports SSH (Secure Shell) access to remote disks.
@@ -318,8 +318,8 @@ QEMU supports SSH (Secure Shell) access to remote disks.
Examples:
@example
@ -437,7 +495,7 @@ index abfd2db..5827bed 100644
@end example
Currently authentication must be done using ssh-agent. Other
@@ -336,7 +336,7 @@ sheepdog[+tcp|+unix]://[host:port]/vdiname[?socket=path][#snapid|#tag]
@@ -337,7 +337,7 @@ sheepdog[+tcp|+unix]://[host:port]/vdiname[?socket=path][#snapid|#tag]
Example
@example
@ -446,7 +504,7 @@ index abfd2db..5827bed 100644
@end example
See also @url{https://sheepdog.github.io/sheepdog/}.
@@ -362,17 +362,17 @@ JSON:
@@ -363,17 +363,17 @@ JSON:
Example
@example
URI:
@ -467,7 +525,7 @@ index abfd2db..5827bed 100644
@ file.debug=9,file.logfile=/var/log/qemu-gluster.log,
@ file.server.0.type=tcp,file.server.0.host=1.2.3.4,file.server.0.port=24007,
@ file.server.1.type=unix,file.server.1.socket=/var/run/glusterd.socket
@@ -437,9 +437,9 @@ of <protocol>.
@@ -438,9 +438,9 @@ of <protocol>.
Example: boot from a remote Fedora 20 live ISO image
@example
@ -479,7 +537,7 @@ index abfd2db..5827bed 100644
@end example
Example: boot from a remote Fedora 20 cloud image using a local overlay for
@@ -447,7 +447,7 @@ writes, copy-on-read, and a readahead of 64k
@@ -448,7 +448,7 @@ writes, copy-on-read, and a readahead of 64k
@example
qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"http",, "file.url":"https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/x86_64/Fedora-x86_64-20-20131211.1-sda.qcow2",, "file.readahead":"64k"@}' /tmp/Fedora-x86_64-20-20131211.1-sda.qcow2
@ -488,7 +546,7 @@ index abfd2db..5827bed 100644
@end example
Example: boot from an image stored on a VMware vSphere server with a self-signed
@@ -456,7 +456,7 @@ of 10 seconds.
@@ -457,7 +457,7 @@ of 10 seconds.
@example
qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"https",, "file.url":"https://user:password@@vsphere.example.com/folder/test/test-flat.vmdk?dcPath=Datacenter&dsName=datastore1",, "file.sslverify":"off",, "file.readahead":"64k",, "file.timeout":10@}' /tmp/test.qcow2
@ -497,7 +555,7 @@ index abfd2db..5827bed 100644
@end example
@end table
@@ -818,7 +818,7 @@ On Linux hosts, a shared memory device is available. The basic syntax
@@ -824,7 +824,7 @@ On Linux hosts, a shared memory device is available. The basic syntax
is:
@example
@ -506,7 +564,7 @@ index abfd2db..5827bed 100644
@end example
where @var{hostmem} names a host memory backend. For a POSIX shared
@@ -839,7 +839,7 @@ memory server is:
@@ -845,7 +845,7 @@ memory server is:
ivshmem-server -p @var{pidfile} -S @var{path} -m @var{shm-name} -l @var{shm-size} -n @var{vectors}
# Then start your qemu instances with matching arguments
@ -515,7 +573,7 @@ index abfd2db..5827bed 100644
-chardev socket,path=@var{path},id=@var{id}
@end example
@@ -864,7 +864,7 @@ Instead of specifying the <shm size> using POSIX shm, you may specify
@@ -870,7 +870,7 @@ Instead of specifying the <shm size> using POSIX shm, you may specify
a memory backend that has hugepage support:
@example
@ -524,7 +582,7 @@ index abfd2db..5827bed 100644
-device ivshmem-plain,memdev=mb1
@end example
@@ -880,7 +880,7 @@ kernel testing.
@@ -886,7 +886,7 @@ kernel testing.
The syntax is:
@example
@ -533,7 +591,7 @@ index abfd2db..5827bed 100644
@end example
Use @option{-kernel} to provide the Linux kernel image and
@@ -895,7 +895,7 @@ If you do not need graphical output, you can disable it and redirect
@@ -901,7 +901,7 @@ If you do not need graphical output, you can disable it and redirect
the virtual serial port and the QEMU monitor to the console with the
@option{-nographic} option. The typical command line is:
@example
@ -542,7 +600,7 @@ index abfd2db..5827bed 100644
-append "root=/dev/hda console=ttyS0" -nographic
@end example
@@ -961,7 +961,7 @@ Network adapter that supports CDC ethernet and RNDIS protocols. @var{id}
@@ -967,7 +967,7 @@ Network adapter that supports CDC ethernet and RNDIS protocols. @var{id}
specifies a netdev defined with @code{-netdev @dots{},id=@var{id}}.
For instance, user-mode networking can be used with
@example
@ -551,7 +609,7 @@ index abfd2db..5827bed 100644
@end example
@item usb-ccid
Smartcard reader device
@@ -980,7 +980,7 @@ no type is given, the HCI logic corresponds to @code{-bt hci,vlan=0}.
@@ -986,7 +986,7 @@ no type is given, the HCI logic corresponds to @code{-bt hci,vlan=0}.
This USB device implements the USB Transport Layer of HCI. Example
usage:
@example
@ -560,7 +618,7 @@ index abfd2db..5827bed 100644
@end example
@end table
@@ -1057,7 +1057,7 @@ For this setup it is recommended to restrict it to listen on a UNIX domain
@@ -1063,7 +1063,7 @@ For this setup it is recommended to restrict it to listen on a UNIX domain
socket only. For example
@example
@ -569,7 +627,7 @@ index abfd2db..5827bed 100644
@end example
This ensures that only users on local box with read/write access to that
@@ -1080,7 +1080,7 @@ is running the password is set with the monitor. Until the monitor is used to
@@ -1086,7 +1086,7 @@ is running the password is set with the monitor. Until the monitor is used to
set the password all clients will be rejected.
@example
@ -578,34 +636,34 @@ index abfd2db..5827bed 100644
(qemu) change vnc password
Password: ********
(qemu)
@@ -1097,7 +1097,7 @@ support provides a secure session, but no authentication. This allows any
@@ -1103,7 +1103,7 @@ support provides a secure session, but no authentication. This allows any
client to connect, and provides an encrypted session.
@example
-qemu-system-i386 [...OPTIONS...] -vnc :1,tls,x509=/etc/pki/qemu -monitor stdio
+qemu-kvm [...OPTIONS...] -vnc :1,tls,x509=/etc/pki/qemu -monitor stdio
-qemu-system-i386 [...OPTIONS...] \
+qemu-kvm [...OPTIONS...] \
-object tls-creds-x509,id=tls0,dir=/etc/pki/qemu,endpoint=server,verify-peer=no \
-vnc :1,tls-creds=tls0 -monitor stdio
@end example
In the above example @code{/etc/pki/qemu} should contain at least three files,
@@ -1115,7 +1115,7 @@ then validate against the CA certificate. This is a good choice if deploying
in an environment with a private internal certificate authority.
@@ -1125,7 +1125,7 @@ same syntax as previously, but with @code{verify-peer} set to @code{yes}
instead.
@example
-qemu-system-i386 [...OPTIONS...] -vnc :1,tls,x509verify=/etc/pki/qemu -monitor stdio
+qemu-kvm [...OPTIONS...] -vnc :1,tls,x509verify=/etc/pki/qemu -monitor stdio
-qemu-system-i386 [...OPTIONS...] \
+qemu-kvm [...OPTIONS...] \
-object tls-creds-x509,id=tls0,dir=/etc/pki/qemu,endpoint=server,verify-peer=yes \
-vnc :1,tls-creds=tls0 -monitor stdio
@end example
@@ -1126,7 +1126,7 @@ Finally, the previous method can be combined with VNC password authentication
@@ -1138,7 +1138,7 @@ Finally, the previous method can be combined with VNC password authentication
to provide two layers of authentication for clients.
@example
-qemu-system-i386 [...OPTIONS...] -vnc :1,password,tls,x509verify=/etc/pki/qemu -monitor stdio
+qemu-kvm [...OPTIONS...] -vnc :1,password,tls,x509verify=/etc/pki/qemu -monitor stdio
-qemu-system-i386 [...OPTIONS...] \
+qemu-kvm [...OPTIONS...] \
-object tls-creds-x509,id=tls0,dir=/etc/pki/qemu,endpoint=server,verify-peer=yes \
-vnc :1,tls-creds=tls0,password -monitor stdio
(qemu) change vnc password
Password: ********
(qemu)
@@ -1149,7 +1149,7 @@ used for authentication, but assuming use of one supporting SSF,
@@ -1163,7 +1163,7 @@ used for authentication, but assuming use of one supporting SSF,
then QEMU can be launched with:
@example
@ -614,16 +672,16 @@ index abfd2db..5827bed 100644
@end example
@node vnc_sec_certificate_sasl
@@ -1163,7 +1163,7 @@ credentials. This can be enabled, by combining the 'sasl' option
@@ -1177,7 +1177,7 @@ credentials. This can be enabled, by combining the 'sasl' option
with the aforementioned TLS + x509 options:
@example
-qemu-system-i386 [...OPTIONS...] -vnc :1,tls,x509,sasl -monitor stdio
+qemu-kvm [...OPTIONS...] -vnc :1,tls,x509,sasl -monitor stdio
-qemu-system-i386 [...OPTIONS...] \
+qemu-kvm [...OPTIONS...] \
-object tls-creds-x509,id=tls0,dir=/etc/pki/qemu,endpoint=server,verify-peer=yes \
-vnc :1,tls-creds=tls0,sasl -monitor stdio
@end example
@node vnc_setup_sasl
@@ -1556,7 +1556,7 @@ QEMU has a primitive support to work with gdb, so that you can do
@@ -1572,7 +1572,7 @@ QEMU has a primitive support to work with gdb, so that you can do
In order to use gdb, launch QEMU with the '-s' option. It will wait for a
gdb connection:
@example
@ -632,7 +690,7 @@ index abfd2db..5827bed 100644
-append "root=/dev/hda"
Connected to host network interface: tun0
Waiting gdb connection on port 1234
@@ -1802,7 +1802,7 @@ Set the initial VGA graphic mode. The default is 800x600x32.
@@ -1818,7 +1818,7 @@ Set the initial VGA graphic mode. The default is 800x600x32.
Set OpenBIOS variables in NVRAM, for example:
@example
@ -642,10 +700,10 @@ index abfd2db..5827bed 100644
-prom-env 'boot-args=conf=hd:2,\yaboot.conf'
@end example
diff --git a/qemu-options.hx b/qemu-options.hx
index 37f2aa8..41cb1f3 100644
index cd2b25b..a65c63b 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -263,7 +263,7 @@ This option defines a free-form string that can be used to describe @var{fd}.
@@ -253,7 +253,7 @@ This option defines a free-form string that can be used to describe @var{fd}.
You can open an image using pre-opened file descriptors from an fd set:
@example
@ -654,7 +712,7 @@ index 37f2aa8..41cb1f3 100644
-add-fd fd=3,set=2,opaque="rdwr:/path/to/file"
-add-fd fd=4,set=2,opaque="rdonly:/path/to/file"
-drive file=/dev/fdset/2,index=0,media=disk
@@ -292,7 +292,7 @@ STEXI
@@ -282,7 +282,7 @@ STEXI
Set default value of @var{driver}'s property @var{prop} to @var{value}, e.g.:
@example
@ -663,7 +721,7 @@ index 37f2aa8..41cb1f3 100644
@end example
In particular, you can use this to set driver properties for devices which are
@@ -346,11 +346,11 @@ bootindex options. The default is non-strict boot.
@@ -336,11 +336,11 @@ bootindex options. The default is non-strict boot.
@example
# try to boot from network first, then from hard disk
@ -678,7 +736,7 @@ index 37f2aa8..41cb1f3 100644
@end example
Note: The legacy format '-boot @var{drives}' is still supported but its
@@ -379,7 +379,7 @@ For example, the following command-line sets the guest startup RAM size to
@@ -369,7 +369,7 @@ For example, the following command-line sets the guest startup RAM size to
memory the guest can reach to 4GB:
@example
@ -687,7 +745,7 @@ index 37f2aa8..41cb1f3 100644
@end example
If @var{slots} and @var{maxmem} are not specified, memory hotplug won't
@@ -448,12 +448,12 @@ Enable audio and selected sound hardware. Use 'help' to print all
@@ -438,12 +438,12 @@ Enable audio and selected sound hardware. Use 'help' to print all
available sound hardware.
@example
@ -706,7 +764,7 @@ index 37f2aa8..41cb1f3 100644
@end example
Note that Linux's i810_audio OSS kernel (for AC97) module might
@@ -946,21 +946,21 @@ is off.
@@ -918,21 +918,21 @@ is off.
Instead of @option{-cdrom} you can use:
@example
@ -734,7 +792,7 @@ index 37f2aa8..41cb1f3 100644
-add-fd fd=3,set=2,opaque="rdwr:/path/to/file"
-add-fd fd=4,set=2,opaque="rdonly:/path/to/file"
-drive file=/dev/fdset/2,index=0,media=disk
@@ -968,28 +968,28 @@ qemu-system-i386
@@ -940,28 +940,28 @@ qemu-system-i386
You can connect a CDROM to the slave of ide0:
@example
@ -769,7 +827,7 @@ index 37f2aa8..41cb1f3 100644
@end example
ETEXI
@@ -2055,8 +2055,8 @@ The following two example do exactly the same, to show how @option{-nic} can
@@ -1975,8 +1975,8 @@ The following two example do exactly the same, to show how @option{-nic} can
be used to shorten the command line length (note that the e1000 is the default
on i386, so the @option{model=e1000} parameter could even be omitted here, too):
@example
@ -780,7 +838,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -nic none
@@ -2127,7 +2127,7 @@ can not be resolved.
@@ -2047,7 +2047,7 @@ can not be resolved.
Example:
@example
@ -789,7 +847,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item domainname=@var{domain}
@@ -2146,7 +2146,7 @@ a guest from a local directory.
@@ -2071,7 +2071,7 @@ a guest from a local directory.
Example (using pxelinux):
@example
@ -798,7 +856,7 @@ index 37f2aa8..41cb1f3 100644
-netdev user,id=n1,tftp=/path/to/tftp/files,bootfile=/pxelinux.0
@end example
@@ -2180,7 +2180,7 @@ screen 0, use the following:
@@ -2105,7 +2105,7 @@ screen 0, use the following:
@example
# on the host
@ -807,7 +865,7 @@ index 37f2aa8..41cb1f3 100644
# this host xterm should open in the guest X11 server
xterm -display :1
@end example
@@ -2190,7 +2190,7 @@ the guest, use the following:
@@ -2115,7 +2115,7 @@ the guest, use the following:
@example
# on the host
@ -816,7 +874,7 @@ index 37f2aa8..41cb1f3 100644
telnet localhost 5555
@end example
@@ -2209,7 +2209,7 @@ lifetime, like in the following example:
@@ -2134,7 +2134,7 @@ lifetime, like in the following example:
@example
# open 10.10.1.1:4321 on bootup, connect 10.0.2.100:1234 to it whenever
# the guest accesses it
@ -825,7 +883,7 @@ index 37f2aa8..41cb1f3 100644
@end example
Or you can execute a command on every TCP connection established by the guest,
@@ -2218,7 +2218,7 @@ so that QEMU behaves similar to an inetd process for that virtual server:
@@ -2143,7 +2143,7 @@ so that QEMU behaves similar to an inetd process for that virtual server:
@example
# call "netcat 10.10.1.1 4321" on every TCP connection to 10.0.2.100:1234
# and connect the TCP stream to its stdin/stdout
@ -834,7 +892,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@end table
@@ -2250,21 +2250,22 @@ Examples:
@@ -2170,21 +2170,22 @@ Examples:
@example
#launch a QEMU instance with the default network script
@ -860,7 +918,7 @@ index 37f2aa8..41cb1f3 100644
-netdev tap,id=n1,"helper=/path/to/qemu-bridge-helper"
@end example
@@ -2281,13 +2282,13 @@ Examples:
@@ -2201,13 +2202,13 @@ Examples:
@example
#launch a QEMU instance with the default network helper to
#connect a TAP device to bridge br0
@ -876,7 +934,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -netdev socket,id=@var{id}[,fd=@var{h}][,listen=[@var{host}]:@var{port}][,connect=@var{host}:@var{port}]
@@ -2302,13 +2303,13 @@ specifies an already opened TCP socket.
@@ -2222,13 +2223,13 @@ specifies an already opened TCP socket.
Example:
@example
# launch a first QEMU instance
@ -896,7 +954,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -netdev socket,id=@var{id}[,fd=@var{h}][,mcast=@var{maddr}:@var{port}[,localaddr=@var{addr}]]
@@ -2331,23 +2332,23 @@ Use @option{fd=h} to specify an already opened UDP multicast socket.
@@ -2251,15 +2252,15 @@ Use @option{fd=h} to specify an already opened UDP multicast socket.
Example:
@example
# launch one QEMU instance
@ -915,13 +973,11 @@ index 37f2aa8..41cb1f3 100644
+ -netdev socket,id=n2,mcast=230.0.0.1:1234
# launch yet another QEMU instance on same "bus"
-qemu-system-i386 linux.img \
- -device e1000,netdev=n3,macaddr=52:54:00:12:34:58 \
- -netdev socket,id=n3,mcast=230.0.0.1:1234
+qemu-kvm linux.img \
+ -device e1000,netdev=n3,macaddr=52:54:00:12:34:58 \
+ -netdev socket,id=n3,mcast=230.0.0.1:1234
-device e1000,netdev=n3,mac=52:54:00:12:34:58 \
-netdev socket,id=n3,mcast=230.0.0.1:1234
@end example
@@ -2267,7 +2268,7 @@ qemu-system-i386 linux.img \
Example (User Mode Linux compat.):
@example
# launch QEMU instance (note mcast address selected is UML's default)
@ -930,7 +986,7 @@ index 37f2aa8..41cb1f3 100644
-device e1000,netdev=n1,mac=52:54:00:12:34:56 \
-netdev socket,id=n1,mcast=239.192.168.1:1102
# launch UML
@@ -2356,9 +2357,12 @@ qemu-system-i386 linux.img \
@@ -2276,9 +2277,12 @@ qemu-system-i386 linux.img \
Example (send packets from host's 1.2.3.4):
@example
@ -946,7 +1002,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -netdev l2tpv3,id=@var{id},src=@var{srcaddr},dst=@var{dstaddr}[,srcport=@var{srcport}][,dstport=@var{dstport}],txsession=@var{txsession}[,rxsession=@var{rxsession}][,ipv6][,udp][,cookie64][,counter][,pincounter][,txcookie=@var{txcookie}][,rxcookie=@var{rxcookie}][,offset=@var{offset}]
@@ -2416,7 +2420,7 @@ brctl addif br-lan vmtunnel0
@@ -2336,7 +2340,7 @@ brctl addif br-lan vmtunnel0
# on 4.3.2.1
# launch QEMU instance - if your network has reorder or is very lossy add ,pincounter
@ -955,7 +1011,7 @@ index 37f2aa8..41cb1f3 100644
-netdev l2tpv3,id=n1,src=4.2.3.1,dst=1.2.3.4,udp,srcport=16384,dstport=16384,rxsession=0xffffffff,txsession=0xffffffff,counter
@end example
@@ -2433,7 +2437,7 @@ Example:
@@ -2353,7 +2357,7 @@ Example:
# launch vde switch
vde_switch -F -sock /tmp/myswitch
# launch QEMU instance
@ -964,7 +1020,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
@@ -2447,11 +2451,11 @@ be created for multiqueue vhost-user.
@@ -2367,11 +2371,11 @@ be created for multiqueue vhost-user.
Example:
@example
@ -981,7 +1037,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -netdev hubport,id=@var{id},hubid=@var{hubid}[,netdev=@var{nd}]
@@ -2877,7 +2881,7 @@ and communicate. Requires the Linux @code{vhci} driver installed. Can
@@ -2804,7 +2808,7 @@ and communicate. Requires the Linux @code{vhci} driver installed. Can
be used as following:
@example
@ -990,7 +1046,7 @@ index 37f2aa8..41cb1f3 100644
@end example
@item -bt device:@var{dev}[,vlan=@var{n}]
@@ -3322,14 +3326,14 @@ ETEXI
@@ -3253,14 +3257,14 @@ ETEXI
DEF("realtime", HAS_ARG, QEMU_OPTION_realtime,
"-realtime [mlock=on|off]\n"
@ -1008,7 +1064,7 @@ index 37f2aa8..41cb1f3 100644
(enabled by default).
ETEXI
@@ -3367,7 +3371,7 @@ connections will likely be TCP-based, but also UDP, pseudo TTY, or even
@@ -3298,7 +3302,7 @@ connections will likely be TCP-based, but also UDP, pseudo TTY, or even
stdio are reasonable use case. The latter is allowing to start QEMU from
within gdb and establish the connection via a pipe:
@example
@ -1017,7 +1073,7 @@ index 37f2aa8..41cb1f3 100644
@end example
ETEXI
@@ -4299,7 +4303,7 @@ which specify the queue number of cryptodev backend, the default of
@@ -4247,7 +4251,7 @@ which specify the queue number of cryptodev backend, the default of
@example
@ -1026,7 +1082,7 @@ index 37f2aa8..41cb1f3 100644
[...] \
-object cryptodev-backend-builtin,id=cryptodev0 \
-device virtio-crypto-pci,id=crypto0,cryptodev=cryptodev0 \
@@ -4319,7 +4323,7 @@ of cryptodev backend for multiqueue vhost-user, the default of @var{queues} is 1
@@ -4267,7 +4271,7 @@ of cryptodev backend for multiqueue vhost-user, the default of @var{queues} is 1
@example

View File

@ -1,4 +1,4 @@
From 69912b533a88bda6377292231fb94475a674a90d Mon Sep 17 00:00:00 2001
From 0c8a71a5751106013f9ecfdc20f308cc1e44045b Mon Sep 17 00:00:00 2001
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Date: Fri, 5 May 2017 19:06:14 +0200
Subject: usb-xhci: Fix PCI capability order
@ -43,6 +43,19 @@ qemu-kvm: load of migration failed: Invalid argument
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
--
Rebase notes (2.9.0):
- Change in assert condition (upstream)
(cherry picked from commit aad727a5ecde1ad4935eb8427604d4df5a1f1f35)
(cherry picked from commit 2dd7402227e77d748a7375233ac9e7feab244bda)
Conflicts:
hw/usb/hcd-xhci.c
(cherry picked from commit a42f86dc906cc7d2c16d02bf125ed76847b469cb)
(cherry picked from commit 992ab2e4f6e15d3e51bc716763aa8d6f45c6d29d)
---
hw/usb/hcd-xhci.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

View File

@ -1,72 +0,0 @@
From f3d0b355f946ab87b281ef75ebfb52f7b7592f2a Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Thu, 9 Aug 2018 10:15:08 +0000
Subject: linux-headers: asm-s390/kvm.h header sync
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1533813309-9643-2-git-send-email-thuth@redhat.com>
Patchwork-id: 81688
O-Subject: [RHEL-8.0 qemu-kvm PATCH 1/2] linux-headers: asm-s390/kvm.h header sync
Bugzilla: 1612938
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreiman@redhat.com>
This is a header sync with the linux uapi header. The corresponding
kernel commit id is a3da7b4a3be51f37f434f14e11e60491f098b6ea (in
the kvm/next branch)
Signed-off-by: Thomas Huth <thuth@redhat.com>
Merged patches (3.0.0):
- 57332f1 linux-headers: Update to include KVM_CAP_S390_HPAGE_1M
---
linux-headers/asm-s390/kvm.h | 5 ++++-
linux-headers/linux/kvm.h | 1 +
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h
index 11def14..1ab9901 100644
--- a/linux-headers/asm-s390/kvm.h
+++ b/linux-headers/asm-s390/kvm.h
@@ -4,7 +4,7 @@
/*
* KVM s390 specific structures and definitions
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
struct {
__u64 reserved1[2];
__u64 gscb[4];
+ __u64 etoken;
+ __u64 etoken_extension;
};
};
};
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index 98f389a..2aae948 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
#ifdef KVM_CAP_IRQ_ROUTING
--
1.8.3.1

View File

@ -1,4 +1,4 @@
From a883dbcc1c55cab189ff4a48cbdd12c4b4246b9c Mon Sep 17 00:00:00 2001
From 7d316f7aebb9634c3ac5b79f5b1198e27726a0fd Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Wed, 14 Jun 2017 15:37:01 +0200
Subject: virtio-scsi: Reject scsi-cd if data plane enabled [RHEL only]
@ -36,13 +36,16 @@ crashing.
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
(cherry picked from commit b0caf00bbc35c7d89e02999bdce86e1f867728e8)
(cherry picked from commit c9c4f117d8b507c2f86035c282d537c0a327364f)
(cherry picked from commit 5d586bb2543337f0ff172c6ce942dba3acbcedff)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
hw/scsi/virtio-scsi.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 5a3057d..52a3c1d 100644
index 3aa9971..9f754c4 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -790,6 +790,15 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,

View File

@ -1,114 +0,0 @@
From 3b4526245dcb2daad3a6393b6b129f85f9e2c7a2 Mon Sep 17 00:00:00 2001
From: David Hildenbrand <david@redhat.com>
Date: Mon, 6 Aug 2018 14:18:41 +0100
Subject: s390x: Enable KVM huge page backing support
RH-Author: David Hildenbrand <david@redhat.com>
Message-id: <20180806141842.23963-3-david@redhat.com>
Patchwork-id: 81645
O-Subject: [RHEL-8.0 qemu-kvm PATCH v2 2/3] s390x: Enable KVM huge page backing support
Bugzilla: 1610906
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1610906
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=17624600
Upstream: N/A
Kernel part is in kvm/next, scheduled for 4.19. Patch has been reviewed
upstream but cannot get picked up yet due to the outstanding linux
header sync. Conflict to upstream patch: We have no units.h, therefore
we have to unfold "4*KiB" and "1*MiB".
QEMU has had huge page support for a longer time already, but KVM
memory management under s390x needed some changes to work with huge
backings.
Now that we have support, let's enable it if requested and
available. Otherwise we now properly tell the user if there is no
support and back out instead of failing to run the VM later on.
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
target/s390x/kvm.c | 34 ++++++++++++++++++++++++++++++++--
1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index bbcbeed..c36ff36f 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -34,6 +34,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
+#include "qemu/mmap-alloc.h"
#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
#include "hw/hw.h"
@@ -139,6 +140,7 @@ static int cap_mem_op;
static int cap_s390_irq;
static int cap_ri;
static int cap_gs;
+static int cap_hpage_1m;
static int active_cmma;
@@ -220,9 +222,9 @@ static void kvm_s390_enable_cmma(void)
.attr = KVM_S390_VM_MEM_ENABLE_CMMA,
};
- if (mem_path) {
+ if (cap_hpage_1m) {
warn_report("CMM will not be enabled because it is not "
- "compatible with hugetlbfs.");
+ "compatible with huge memory backings.");
return;
}
rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
@@ -281,10 +283,38 @@ void kvm_s390_crypto_reset(void)
}
}
+static int kvm_s390_configure_mempath_backing(KVMState *s)
+{
+ size_t path_psize = qemu_mempath_getpagesize(mem_path);
+
+ if (path_psize == 4 * 1024) {
+ return 0;
+ }
+
+ if (path_psize != 1024 * 1024) {
+ error_report("Memory backing with 2G pages was specified, "
+ "but KVM does not support this memory backing");
+ return -EINVAL;
+ }
+
+ if (kvm_vm_enable_cap(s, KVM_CAP_S390_HPAGE_1M, 0)) {
+ error_report("Memory backing with 1M pages was specified, "
+ "but KVM does not support this memory backing");
+ return -EINVAL;
+ }
+
+ cap_hpage_1m = 1;
+ return 0;
+}
+
int kvm_arch_init(MachineState *ms, KVMState *s)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
+ if (mem_path && kvm_s390_configure_mempath_backing(s)) {
+ return -EINVAL;
+ }
+
mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
--
1.8.3.1

View File

@ -1,190 +0,0 @@
From 8eacbf0e8e26b2a8aa3de955a57a7a3cb680d922 Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Thu, 9 Aug 2018 10:15:09 +0000
Subject: s390x/kvm: add etoken facility
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1533813309-9643-3-git-send-email-thuth@redhat.com>
Patchwork-id: 81687
O-Subject: [RHEL-8.0 qemu-kvm PATCH 2/2] s390x/kvm: add etoken facility
Bugzilla: 1612938
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreiman@redhat.com>
Provide the etoken facility. We need to handle cpu model, migration and
clear reset.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
---
target/s390x/cpu.h | 3 +++
target/s390x/cpu_features.c | 3 ++-
target/s390x/cpu_features_def.h | 3 ++-
target/s390x/gen-features.c | 3 ++-
target/s390x/kvm.c | 11 +++++++++++
target/s390x/machine.c | 20 +++++++++++++++++++-
6 files changed, 39 insertions(+), 4 deletions(-)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 2c3dd2d..21b2f21 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -2,6 +2,7 @@
* S/390 virtual CPU header
*
* Copyright (c) 2009 Ulrich Hecht
+ * Copyright IBM Corp. 2012, 2018
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -68,6 +69,8 @@ struct CPUS390XState {
uint32_t aregs[16]; /* access registers */
uint8_t riccb[64]; /* runtime instrumentation control */
uint64_t gscb[4]; /* guarded storage control */
+ uint64_t etoken; /* etoken */
+ uint64_t etoken_extension; /* etoken extension */
/* Fields up to this point are not cleared by initial CPU reset */
struct {} start_initial_reset_fields;
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index 3b9e274..e05e6aa 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -1,7 +1,7 @@
/*
* CPU features/facilities for s390x
*
- * Copyright 2016 IBM Corp.
+ * Copyright IBM Corp. 2016, 2018
*
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
*
@@ -106,6 +106,7 @@ static const S390FeatDef s390_features[] = {
FEAT_INIT("irbm", S390_FEAT_TYPE_STFL, 145, "Insert-reference-bits-multiple facility"),
FEAT_INIT("msa8-base", S390_FEAT_TYPE_STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)"),
FEAT_INIT("cmmnt", S390_FEAT_TYPE_STFL, 147, "CMM: ESSA-enhancement (no translate) facility"),
+ FEAT_INIT("etoken", S390_FEAT_TYPE_STFL, 156, "Etoken facility"),
/* SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
FEAT_INIT("gsls", S390_FEAT_TYPE_SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility"),
diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h
index 7c5915c..ac2c947 100644
--- a/target/s390x/cpu_features_def.h
+++ b/target/s390x/cpu_features_def.h
@@ -1,7 +1,7 @@
/*
* CPU features/facilities for s390
*
- * Copyright 2016 IBM Corp.
+ * Copyright IBM Corp. 2016, 2018
*
* Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
* David Hildenbrand <dahi@linux.vnet.ibm.com>
@@ -93,6 +93,7 @@ typedef enum {
S390_FEAT_INSERT_REFERENCE_BITS_MULT,
S390_FEAT_MSA_EXT_8,
S390_FEAT_CMM_NT,
+ S390_FEAT_ETOKEN,
/* Sclp Conf Char */
S390_FEAT_SIE_GSLS,
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 6626b6f..5af042c 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -1,7 +1,7 @@
/*
* S390 feature list generator
*
- * Copyright 2016 IBM Corp.
+ * Copyright IBM Corp. 2016, 2018
*
* Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
* David Hildenbrand <dahi@linux.vnet.ibm.com>
@@ -471,6 +471,7 @@ static uint16_t full_GEN14_GA1[] = {
S390_FEAT_GROUP_MSA_EXT_7,
S390_FEAT_GROUP_MSA_EXT_8,
S390_FEAT_CMM_NT,
+ S390_FEAT_ETOKEN,
S390_FEAT_HPMA2,
S390_FEAT_SIE_KSS,
S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index c36ff36f..71d90f2 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -523,6 +523,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC;
}
+ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
+ cs->kvm_run->s.regs.etoken = env->etoken;
+ cs->kvm_run->s.regs.etoken_extension = env->etoken_extension;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN;
+ }
+
/* Finally the prefix */
if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
cs->kvm_run->s.regs.prefix = env->psa;
@@ -637,6 +643,11 @@ int kvm_arch_get_registers(CPUState *cs)
env->bpbc = cs->kvm_run->s.regs.bpbc;
}
+ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
+ env->etoken = cs->kvm_run->s.regs.etoken;
+ env->etoken_extension = cs->kvm_run->s.regs.etoken_extension;
+ }
+
/* pfault parameters */
if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
env->pfault_token = cs->kvm_run->s.regs.pft;
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
index bd3230d..cb792aa 100644
--- a/target/s390x/machine.c
+++ b/target/s390x/machine.c
@@ -1,7 +1,7 @@
/*
* S390x machine definitions and functions
*
- * Copyright IBM Corp. 2014
+ * Copyright IBM Corp. 2014, 2018
*
* Authors:
* Thomas Huth <thuth@linux.vnet.ibm.com>
@@ -216,6 +216,23 @@ const VMStateDescription vmstate_bpbc = {
}
};
+static bool etoken_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_ETOKEN);
+}
+
+const VMStateDescription vmstate_etoken = {
+ .name = "cpu/etoken",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = etoken_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.etoken, S390CPU),
+ VMSTATE_UINT64(env.etoken_extension, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_s390_cpu = {
.name = "cpu",
.post_load = cpu_post_load,
@@ -251,6 +268,7 @@ const VMStateDescription vmstate_s390_cpu = {
&vmstate_exval,
&vmstate_gscb,
&vmstate_bpbc,
+ &vmstate_etoken,
NULL
},
};
--
1.8.3.1

View File

@ -1,51 +0,0 @@
From 29df663d045345a8c498dc3966cc59dcf091a50d Mon Sep 17 00:00:00 2001
From: Cornelia Huck <cohuck@redhat.com>
Date: Tue, 7 Aug 2018 09:05:54 +0000
Subject: s390x/cpumodel: default enable bpb and ppa15 for z196 and later
RH-Author: Cornelia Huck <cohuck@redhat.com>
Message-id: <20180807100554.29643-3-cohuck@redhat.com>
Patchwork-id: 81660
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 2/2] s390x/cpumodel: default enable bpb and ppa15 for z196 and later
Bugzilla: 1595718
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Jens Freimann <jfreiman@redhat.com>
Upstream: downstream version of 8727315111 ("s390x/cpumodel: default
enable bpb and ppa15 for z196 and later"); downstream does
not have the upstream machine types, instead we need to
turn off the bits for the RHEL 7.5 machine
Most systems and host kernels provide the necessary building blocks for
bpb and ppa15. We can reverse the logic and default enable those
features, while still allowing to disable it via cpu model.
So let us add bpb and ppa15 to z196 and later default CPU model for the
qemu rhel7.6.0 machine. (like -cpu z13). Older machine types (i.e.
s390-ccw-virtio-rhel7.5.0) will retain the old value and not provide those
bits in the default model.
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
hw/s390x/s390-virtio-ccw.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 0f135c9..cdf4558 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -931,6 +931,10 @@ static void ccw_machine_rhel750_instance_options(MachineState *machine)
/* before 2.12 we emulated the very first z900, and RHEL 7.5 is
based on 2.10 */
s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat);
+
+ /* bpb and ppa15 were only in the full model in RHEL 7.5 */
+ s390_cpudef_featoff_greater(11, 1, S390_FEAT_PPA15);
+ s390_cpudef_featoff_greater(11, 1, S390_FEAT_BPB);
}
static void ccw_machine_rhel750_class_options(MachineClass *mc)
--
1.8.3.1

View File

@ -1,87 +0,0 @@
From 43b08a1e4bc47d810212f569cc0fc30eebfd7036 Mon Sep 17 00:00:00 2001
From: Markus Armbruster <armbru@redhat.com>
Date: Fri, 31 Aug 2018 13:59:22 +0100
Subject: i386: Fix arch_query_cpu_model_expansion() leak
RH-Author: Markus Armbruster <armbru@redhat.com>
Message-id: <20180831135922.6073-3-armbru@redhat.com>
Patchwork-id: 81980
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 2/2] i386: Fix arch_query_cpu_model_expansion() leak
Bugzilla: 1615717
RH-Acked-by: Eduardo Habkost <ehabkost@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
From: Eduardo Habkost <ehabkost@redhat.com>
Reported by Coverity:
Error: RESOURCE_LEAK (CWE-772): [#def439]
qemu-2.12.0/target/i386/cpu.c:3179: alloc_fn: Storage is returned from allocation function "qdict_new".
qemu-2.12.0/qobject/qdict.c:34:5: alloc_fn: Storage is returned from allocation function "g_malloc0".
qemu-2.12.0/qobject/qdict.c:34:5: var_assign: Assigning: "qdict" = "g_malloc0(4120UL)".
qemu-2.12.0/qobject/qdict.c:37:5: return_alloc: Returning allocated memory "qdict".
qemu-2.12.0/target/i386/cpu.c:3179: var_assign: Assigning: "props" = storage returned from "qdict_new()".
qemu-2.12.0/target/i386/cpu.c:3217: leaked_storage: Variable "props" going out of scope leaks the storage it points to.
This was introduced by commit b8097deb359b ("i386: Improve
query-cpu-model-expansion full mode").
The leak is only theoretical: if ret->model->props is set to
props, the qapi_free_CpuModelExpansionInfo() call will free props
too in case of errors. The only way for this to not happen is if
we enter the default branch of the switch statement, which would
never happen because all CpuModelExpansionType values are being
handled.
It's still worth to change this to make the allocation logic
easier to follow and make the Coverity error go away. To make
everything simpler, initialize ret->model and ret->model->props
earlier in the function.
While at it, remove redundant check for !prop because prop is
always initialized at the beginning of the function.
Fixes: b8097deb359bbbd92592b9670adfe9e245b2d0bd
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180816183509.8231-1-ehabkost@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit e38bf612477fca62b205ebd909b1372a7e45a8c0)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
target/i386/cpu.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 051018a..71e2808 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3784,6 +3784,9 @@ arch_query_cpu_model_expansion(CpuModelExpansionType type,
}
props = qdict_new();
+ ret->model = g_new0(CpuModelInfo, 1);
+ ret->model->props = QOBJECT(props);
+ ret->model->has_props = true;
switch (type) {
case CPU_MODEL_EXPANSION_TYPE_STATIC:
@@ -3804,15 +3807,9 @@ arch_query_cpu_model_expansion(CpuModelExpansionType type,
goto out;
}
- if (!props) {
- props = qdict_new();
- }
x86_cpu_to_dict(xc, props);
- ret->model = g_new0(CpuModelInfo, 1);
ret->model->name = g_strdup(base_name);
- ret->model->props = QOBJECT(props);
- ret->model->has_props = true;
out:
object_unref(OBJECT(xc));
--
1.8.3.1

View File

@ -1,54 +0,0 @@
From 628b10cd4d5cd8fde97dab66f143db78fe03398a Mon Sep 17 00:00:00 2001
From: Eduardo Habkost <ehabkost@redhat.com>
Date: Tue, 21 Aug 2018 19:15:41 +0100
Subject: i386: Disable TOPOEXT by default on "-cpu host"
RH-Author: Eduardo Habkost <ehabkost@redhat.com>
Message-id: <20180821191541.31916-2-ehabkost@redhat.com>
Patchwork-id: 81904
O-Subject: [qemu-kvm RHEL8/virt212 PATCH v2 1/1] i386: Disable TOPOEXT by default on "-cpu host"
Bugzilla: 1619804
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
RH-Acked-by: Igor Mammedov <imammedo@redhat.com>
Enabling TOPOEXT is always allowed, but it can't be enabled
blindly by "-cpu host" because it may make guests crash if the
rest of the cache topology information isn't provided or isn't
consistent.
This addresses the bug reported at:
https://bugzilla.redhat.com/show_bug.cgi?id=1613277
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Message-Id: <20180809221852.15285-1-ehabkost@redhat.com>
Tested-by: Richard W.M. Jones <rjones@redhat.com>
Reviewed-by: Babu Moger <babu.moger@amd.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
(cherry picked from commit 7210a02c58572b2686a3a8d610c6628f87864aed)
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
target/i386/cpu.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 71e2808..198d578 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -849,6 +849,12 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT3_FEATURES,
+ /*
+ * TOPOEXT is always allowed but can't be enabled blindly by
+ * "-cpu host", as it requires consistent cache topology info
+ * to be provided so it doesn't confuse guests.
+ */
+ .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
},
[FEAT_C000_0001_EDX] = {
.feat_names = {
--
1.8.3.1

View File

@ -1,77 +0,0 @@
From 1ed2bb0d831983b68bcdecd057c2c5bfd419c304 Mon Sep 17 00:00:00 2001
From: Jeffrey Cody <jcody@redhat.com>
Date: Wed, 26 Sep 2018 04:08:14 +0100
Subject: curl: Make sslverify=off disable host as well as peer verification.
RH-Author: Jeffrey Cody <jcody@redhat.com>
Message-id: <543d2f667af465dd809329fcba5175bc974d58d4.1537933576.git.jcody@redhat.com>
Patchwork-id: 82293
O-Subject: [RHEL8/rhel qemu-kvm PATCH 1/1] curl: Make sslverify=off disable host as well as peer verification.
Bugzilla: 1575925
RH-Acked-by: Richard Jones <rjones@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
From: "Richard W.M. Jones" <rjones@redhat.com>
The sslverify setting is supposed to turn off all TLS certificate
checks in libcurl. However because of the way we use it, it only
turns off peer certificate authenticity checks
(CURLOPT_SSL_VERIFYPEER). This patch makes it also turn off the check
that the server name in the certificate is the same as the server
you're connecting to (CURLOPT_SSL_VERIFYHOST).
We can use Google's server at 8.8.8.8 which happens to have a bad TLS
certificate to demonstrate this:
$ ./qemu-img create -q -f qcow2 -b 'json: { "file.sslverify": "off", "file.driver": "https", "file.url": "https://8.8.8.8/foo" }' /var/tmp/file.qcow2
qemu-img: /var/tmp/file.qcow2: CURL: Error opening file: SSL: no alternative certificate subject name matches target host name '8.8.8.8'
Could not open backing image to determine size.
With this patch applied, qemu-img connects to the server regardless of
the bad certificate:
$ ./qemu-img create -q -f qcow2 -b 'json: { "file.sslverify": "off", "file.driver": "https", "file.url": "https://8.8.8.8/foo" }' /var/tmp/file.qcow2
qemu-img: /var/tmp/file.qcow2: CURL: Error opening file: The requested URL returned error: 404 Not Found
(The 404 error is expected because 8.8.8.8 is not actually serving a
file called "/foo".)
Of course the default (without sslverify=off) remains to always check
the certificate:
$ ./qemu-img create -q -f qcow2 -b 'json: { "file.driver": "https", "file.url": "https://8.8.8.8/foo" }' /var/tmp/file.qcow2
qemu-img: /var/tmp/file.qcow2: CURL: Error opening file: SSL: no alternative certificate subject name matches target host name '8.8.8.8'
Could not open backing image to determine size.
Further information about the two settings is available here:
https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html
https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html
Signed-off-by: Richard W.M. Jones <rjones@redhat.com>
Message-id: 20180914095622.19698-1-rjones@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
(cherry picked from commit 637fa44ab80c6b317adf1d117494325a95daad60)
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/curl.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/block/curl.c b/block/curl.c
index 229bb84..fabb2b4 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -483,6 +483,8 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
curl_easy_setopt(state->curl, CURLOPT_URL, s->url);
curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER,
(long) s->sslverify);
+ curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST,
+ s->sslverify ? 2L : 0L);
if (s->cookie) {
curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie);
}
--
1.8.3.1

View File

@ -1,51 +0,0 @@
From 096b7abf1d2755ad469e4bcb3dc6302021979814 Mon Sep 17 00:00:00 2001
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Date: Mon, 1 Oct 2018 10:54:48 +0100
Subject: migration/postcopy: Clear have_listen_thread
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-id: <20181001105449.41090-2-dgilbert@redhat.com>
Patchwork-id: 82326
O-Subject: [RHEL-8.0 qemu-kvm PATCH 1/2] migration/postcopy: Clear have_listen_thread
Bugzilla: 1608765
RH-Acked-by: Pankaj Gupta <pagupta@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Clear have_listen_thread when we exit the thread.
The fallout from this was that various things thought there was
an ongoing postcopy after the postcopy had finished.
The case that failed was postcopy->savevm->loadvm.
This corresponds to RH bug https://bugzilla.redhat.com/show_bug.cgi?id=1608765
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20180914170430.54271-2-dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 9cf4bb8730c669c40550e635a9e2b8ee4f1664ca)
Manual merge due to context
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
migration/savevm.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/migration/savevm.c b/migration/savevm.c
index 7f92567..762c4b2 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1676,6 +1676,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
migration_incoming_state_destroy();
qemu_loadvm_state_cleanup();
+ mis->have_listen_thread = false;
return NULL;
}
--
1.8.3.1

View File

@ -1,52 +0,0 @@
From bff052b89b0c32c179d858bd8eed91e0d9f98db4 Mon Sep 17 00:00:00 2001
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Date: Mon, 1 Oct 2018 10:54:49 +0100
Subject: migration: cleanup in error paths in loadvm
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-id: <20181001105449.41090-3-dgilbert@redhat.com>
Patchwork-id: 82325
O-Subject: [RHEL-8.0 qemu-kvm PATCH 2/2] migration: cleanup in error paths in loadvm
Bugzilla: 1608765
RH-Acked-by: Pankaj Gupta <pagupta@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
There's a couple of error paths in qemu_loadvm_state
which happen early on but after we've initialised the
load state; that needs to be cleaned up otherwise
we can hit asserts if the state gets reinitialised later.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20180914170430.54271-3-dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
(cherry picked from commit 096c83b7219c5a2145435afc8be750281e9cb447)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
migration/savevm.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/migration/savevm.c b/migration/savevm.c
index 762c4b2..27e054d 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -2328,11 +2328,13 @@ int qemu_loadvm_state(QEMUFile *f)
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
+ qemu_loadvm_state_cleanup();
return -EINVAL;
}
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
if (ret) {
+ qemu_loadvm_state_cleanup();
return ret;
}
}
--
1.8.3.1

View File

@ -1,372 +0,0 @@
From 2999207ffd4de9f139922b444edba07b051d4a67 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:07 +0100
Subject: jobs: change start callback to run callback
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-2-jsnow@redhat.com>
Patchwork-id: 82261
O-Subject: [RHEL8/rhel qemu-kvm PATCH 01/25] jobs: change start callback to run callback
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Presently we codify the entry point for a job as the "start" callback,
but a more apt name would be "run" to clarify the idea that when this
function returns we consider the job to have "finished," except for
any cleanup which occurs in separate callbacks later.
As part of this clarification, change the signature to include an error
object and a return code. The error ptr is not yet used, and the return
code while captured, will be overwritten by actions in the job_completed
function.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-2-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit f67432a2019caf05b57a146bf45c1024a5cb608e)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/backup.c | 7 ++++---
block/commit.c | 7 ++++---
block/create.c | 8 +++++---
block/mirror.c | 10 ++++++----
block/stream.c | 7 ++++---
include/qemu/job.h | 2 +-
job.c | 6 +++---
tests/test-bdrv-drain.c | 7 ++++---
tests/test-blockjob-txn.c | 16 ++++++++--------
tests/test-blockjob.c | 7 ++++---
10 files changed, 43 insertions(+), 34 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index 8630d32..5d47781 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -480,9 +480,9 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
bdrv_dirty_iter_free(dbi);
}
-static void coroutine_fn backup_run(void *opaque)
+static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
{
- BackupBlockJob *job = opaque;
+ BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk);
int64_t offset, nb_clusters;
@@ -587,6 +587,7 @@ static void coroutine_fn backup_run(void *opaque)
data = g_malloc(sizeof(*data));
data->ret = ret;
job_defer_to_main_loop(&job->common.job, backup_complete, data);
+ return ret;
}
static const BlockJobDriver backup_job_driver = {
@@ -596,7 +597,7 @@ static const BlockJobDriver backup_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = backup_run,
+ .run = backup_run,
.commit = backup_commit,
.abort = backup_abort,
.clean = backup_clean,
diff --git a/block/commit.c b/block/commit.c
index e1814d9..905a1c5 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -134,9 +134,9 @@ static void commit_complete(Job *job, void *opaque)
bdrv_unref(top);
}
-static void coroutine_fn commit_run(void *opaque)
+static int coroutine_fn commit_run(Job *job, Error **errp)
{
- CommitBlockJob *s = opaque;
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
CommitCompleteData *data;
int64_t offset;
uint64_t delay_ns = 0;
@@ -213,6 +213,7 @@ out:
data = g_malloc(sizeof(*data));
data->ret = ret;
job_defer_to_main_loop(&s->common.job, commit_complete, data);
+ return ret;
}
static const BlockJobDriver commit_job_driver = {
@@ -222,7 +223,7 @@ static const BlockJobDriver commit_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = commit_run,
+ .run = commit_run,
},
};
diff --git a/block/create.c b/block/create.c
index 915cd41..04733c3 100644
--- a/block/create.c
+++ b/block/create.c
@@ -45,9 +45,9 @@ static void blockdev_create_complete(Job *job, void *opaque)
job_completed(job, s->ret, s->err);
}
-static void coroutine_fn blockdev_create_run(void *opaque)
+static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
{
- BlockdevCreateJob *s = opaque;
+ BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
job_progress_set_remaining(&s->common, 1);
s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
@@ -55,12 +55,14 @@ static void coroutine_fn blockdev_create_run(void *opaque)
qapi_free_BlockdevCreateOptions(s->opts);
job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
+
+ return s->ret;
}
static const JobDriver blockdev_create_job_driver = {
.instance_size = sizeof(BlockdevCreateJob),
.job_type = JOB_TYPE_CREATE,
- .start = blockdev_create_run,
+ .run = blockdev_create_run,
};
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
diff --git a/block/mirror.c b/block/mirror.c
index b48c3f8..b3363e9 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -812,9 +812,9 @@ static int mirror_flush(MirrorBlockJob *s)
return ret;
}
-static void coroutine_fn mirror_run(void *opaque)
+static int coroutine_fn mirror_run(Job *job, Error **errp)
{
- MirrorBlockJob *s = opaque;
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
MirrorExitData *data;
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *target_bs = blk_bs(s->target);
@@ -1041,7 +1041,9 @@ immediate_exit:
if (need_drain) {
bdrv_drained_begin(bs);
}
+
job_defer_to_main_loop(&s->common.job, mirror_exit, data);
+ return ret;
}
static void mirror_complete(Job *job, Error **errp)
@@ -1138,7 +1140,7 @@ static const BlockJobDriver mirror_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = mirror_run,
+ .run = mirror_run,
.pause = mirror_pause,
.complete = mirror_complete,
},
@@ -1154,7 +1156,7 @@ static const BlockJobDriver commit_active_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = mirror_run,
+ .run = mirror_run,
.pause = mirror_pause,
.complete = mirror_complete,
},
diff --git a/block/stream.c b/block/stream.c
index 9264b68..b4b987d 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -97,9 +97,9 @@ out:
g_free(data);
}
-static void coroutine_fn stream_run(void *opaque)
+static int coroutine_fn stream_run(Job *job, Error **errp)
{
- StreamBlockJob *s = opaque;
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
StreamCompleteData *data;
BlockBackend *blk = s->common.blk;
BlockDriverState *bs = blk_bs(blk);
@@ -206,6 +206,7 @@ out:
data = g_malloc(sizeof(*data));
data->ret = ret;
job_defer_to_main_loop(&s->common.job, stream_complete, data);
+ return ret;
}
static const BlockJobDriver stream_job_driver = {
@@ -213,7 +214,7 @@ static const BlockJobDriver stream_job_driver = {
.instance_size = sizeof(StreamBlockJob),
.job_type = JOB_TYPE_STREAM,
.free = block_job_free,
- .start = stream_run,
+ .run = stream_run,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 18c9223..9cf463d 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -169,7 +169,7 @@ struct JobDriver {
JobType job_type;
/** Mandatory: Entrypoint for the Coroutine. */
- CoroutineEntry *start;
+ int coroutine_fn (*run)(Job *job, Error **errp);
/**
* If the callback is not NULL, it will be invoked when the job transitions
diff --git a/job.c b/job.c
index fa671b4..898260b 100644
--- a/job.c
+++ b/job.c
@@ -544,16 +544,16 @@ static void coroutine_fn job_co_entry(void *opaque)
{
Job *job = opaque;
- assert(job && job->driver && job->driver->start);
+ assert(job && job->driver && job->driver->run);
job_pause_point(job);
- job->driver->start(job);
+ job->ret = job->driver->run(job, NULL);
}
void job_start(Job *job)
{
assert(job && !job_started(job) && job->paused &&
- job->driver && job->driver->start);
+ job->driver && job->driver->run);
job->co = qemu_coroutine_create(job_co_entry, job);
job->pause_count--;
job->busy = true;
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 17bb850..a753386 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -757,9 +757,9 @@ static void test_job_completed(Job *job, void *opaque)
job_completed(job, 0, NULL);
}
-static void coroutine_fn test_job_start(void *opaque)
+static int coroutine_fn test_job_run(Job *job, Error **errp)
{
- TestBlockJob *s = opaque;
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
job_transition_to_ready(&s->common.job);
while (!s->should_complete) {
@@ -771,6 +771,7 @@ static void coroutine_fn test_job_start(void *opaque)
}
job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
+ return 0;
}
static void test_job_complete(Job *job, Error **errp)
@@ -785,7 +786,7 @@ BlockJobDriver test_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = test_job_start,
+ .run = test_job_run,
.complete = test_job_complete,
},
};
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index 58d9b87..3194924 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -38,25 +38,25 @@ static void test_block_job_complete(Job *job, void *opaque)
bdrv_unref(bs);
}
-static void coroutine_fn test_block_job_run(void *opaque)
+static int coroutine_fn test_block_job_run(Job *job, Error **errp)
{
- TestBlockJob *s = opaque;
- BlockJob *job = &s->common;
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
while (s->iterations--) {
if (s->use_timer) {
- job_sleep_ns(&job->job, 0);
+ job_sleep_ns(job, 0);
} else {
- job_yield(&job->job);
+ job_yield(job);
}
- if (job_is_cancelled(&job->job)) {
+ if (job_is_cancelled(job)) {
break;
}
}
- job_defer_to_main_loop(&job->job, test_block_job_complete,
+ job_defer_to_main_loop(job, test_block_job_complete,
(void *)(intptr_t)s->rc);
+ return s->rc;
}
typedef struct {
@@ -80,7 +80,7 @@ static const BlockJobDriver test_block_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = test_block_job_run,
+ .run = test_block_job_run,
},
};
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index cb42f06..b0462bf 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -176,9 +176,9 @@ static void cancel_job_complete(Job *job, Error **errp)
s->should_complete = true;
}
-static void coroutine_fn cancel_job_start(void *opaque)
+static int coroutine_fn cancel_job_run(Job *job, Error **errp)
{
- CancelJob *s = opaque;
+ CancelJob *s = container_of(job, CancelJob, common.job);
while (!s->should_complete) {
if (job_is_cancelled(&s->common.job)) {
@@ -194,6 +194,7 @@ static void coroutine_fn cancel_job_start(void *opaque)
defer:
job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
+ return 0;
}
static const BlockJobDriver test_cancel_driver = {
@@ -202,7 +203,7 @@ static const BlockJobDriver test_cancel_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
- .start = cancel_job_start,
+ .run = cancel_job_run,
.complete = cancel_job_complete,
},
};
--
1.8.3.1

View File

@ -1,283 +0,0 @@
From df9702d737eea1720a10d350c24bdcc3f54bcba9 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 29 Aug 2018 21:57:27 -0400
Subject: jobs: canonize Error object
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-3-jsnow@redhat.com>
Patchwork-id: 82262
O-Subject: [RHEL8/rhel qemu-kvm PATCH 02/25] jobs: canonize Error object
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Jobs presently use both an Error object in the case of the create job,
and char strings in the case of generic errors elsewhere.
Unify the two paths as just j->err, and remove the extra argument from
job_completed. The integer error code for job_completed is kept for now,
to be removed shortly in a separate patch.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180830015734.19765-3-jsnow@redhat.com
[mreitz: Dropped a superfluous g_strdup()]
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 3d1f8b07a4c241f81949eff507d9f3a8fd73b87b)
Signed-off-by: John Snow <jsnow@redhat.com>
---
block/backup.c | 2 +-
block/commit.c | 2 +-
block/create.c | 5 ++---
block/mirror.c | 2 +-
block/stream.c | 2 +-
include/qemu/job.h | 14 ++++++++------
job-qmp.c | 5 +++--
job.c | 18 ++++++------------
tests/test-bdrv-drain.c | 2 +-
tests/test-blockjob-txn.c | 2 +-
tests/test-blockjob.c | 2 +-
11 files changed, 26 insertions(+), 30 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index 5d47781..1e965d5 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -388,7 +388,7 @@ static void backup_complete(Job *job, void *opaque)
{
BackupCompleteData *data = opaque;
- job_completed(job, data->ret, NULL);
+ job_completed(job, data->ret);
g_free(data);
}
diff --git a/block/commit.c b/block/commit.c
index 905a1c5..af7579d 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -117,7 +117,7 @@ static void commit_complete(Job *job, void *opaque)
* bdrv_set_backing_hd() to fail. */
block_job_remove_all_bdrv(bjob);
- job_completed(job, ret, NULL);
+ job_completed(job, ret);
g_free(data);
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
diff --git a/block/create.c b/block/create.c
index 04733c3..26a385c 100644
--- a/block/create.c
+++ b/block/create.c
@@ -35,14 +35,13 @@ typedef struct BlockdevCreateJob {
BlockDriver *drv;
BlockdevCreateOptions *opts;
int ret;
- Error *err;
} BlockdevCreateJob;
static void blockdev_create_complete(Job *job, void *opaque)
{
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
- job_completed(job, s->ret, s->err);
+ job_completed(job, s->ret);
}
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
@@ -50,7 +49,7 @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
job_progress_set_remaining(&s->common, 1);
- s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
+ s->ret = s->drv->bdrv_co_create(s->opts, errp);
job_progress_update(&s->common, 1);
qapi_free_BlockdevCreateOptions(s->opts);
diff --git a/block/mirror.c b/block/mirror.c
index b3363e9..6637f2b 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -710,7 +710,7 @@ static void mirror_exit(Job *job, void *opaque)
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
bs_opaque->job = NULL;
- job_completed(job, data->ret, NULL);
+ job_completed(job, data->ret);
g_free(data);
bdrv_drained_end(src);
diff --git a/block/stream.c b/block/stream.c
index b4b987d..26a7753 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -93,7 +93,7 @@ out:
}
g_free(s->backing_file_str);
- job_completed(job, data->ret, NULL);
+ job_completed(job, data->ret);
g_free(data);
}
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 9cf463d..e0e9987 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -124,12 +124,16 @@ typedef struct Job {
/** Estimated progress_current value at the completion of the job */
int64_t progress_total;
- /** Error string for a failed job (NULL if, and only if, job->ret == 0) */
- char *error;
-
/** ret code passed to job_completed. */
int ret;
+ /**
+ * Error object for a failed job.
+ * If job->ret is nonzero and an error object was not set, it will be set
+ * to strerror(-job->ret) during job_completed.
+ */
+ Error *err;
+
/** The completion function that will be called when the job completes. */
BlockCompletionFunc *cb;
@@ -484,15 +488,13 @@ void job_transition_to_ready(Job *job);
/**
* @job: The job being completed.
* @ret: The status code.
- * @error: The error message for a failing job (only with @ret < 0). If @ret is
- * negative, but NULL is given for @error, strerror() is used.
*
* Marks @job as completed. If @ret is non-zero, the job transaction it is part
* of is aborted. If @ret is zero, the job moves into the WAITING state. If it
* is the last job to complete in its transaction, all jobs in the transaction
* move from WAITING to PENDING.
*/
-void job_completed(Job *job, int ret, Error *error);
+void job_completed(Job *job, int ret);
/** Asynchronously complete the specified @job. */
void job_complete(Job *job, Error **errp);
diff --git a/job-qmp.c b/job-qmp.c
index 410775d..a969b2b 100644
--- a/job-qmp.c
+++ b/job-qmp.c
@@ -146,8 +146,9 @@ static JobInfo *job_query_single(Job *job, Error **errp)
.status = job->status,
.current_progress = job->progress_current,
.total_progress = job->progress_total,
- .has_error = !!job->error,
- .error = g_strdup(job->error),
+ .has_error = !!job->err,
+ .error = job->err ? \
+ g_strdup(error_get_pretty(job->err)) : NULL,
};
return info;
diff --git a/job.c b/job.c
index 898260b..276024a 100644
--- a/job.c
+++ b/job.c
@@ -369,7 +369,7 @@ void job_unref(Job *job)
QLIST_REMOVE(job, job_list);
- g_free(job->error);
+ error_free(job->err);
g_free(job->id);
g_free(job);
}
@@ -546,7 +546,7 @@ static void coroutine_fn job_co_entry(void *opaque)
assert(job && job->driver && job->driver->run);
job_pause_point(job);
- job->ret = job->driver->run(job, NULL);
+ job->ret = job->driver->run(job, &job->err);
}
@@ -666,8 +666,8 @@ static void job_update_rc(Job *job)
job->ret = -ECANCELED;
}
if (job->ret) {
- if (!job->error) {
- job->error = g_strdup(strerror(-job->ret));
+ if (!job->err) {
+ error_setg(&job->err, "%s", strerror(-job->ret));
}
job_state_transition(job, JOB_STATUS_ABORTING);
}
@@ -865,17 +865,11 @@ static void job_completed_txn_success(Job *job)
}
}
-void job_completed(Job *job, int ret, Error *error)
+void job_completed(Job *job, int ret)
{
assert(job && job->txn && !job_is_completed(job));
job->ret = ret;
- if (error) {
- assert(job->ret < 0);
- job->error = g_strdup(error_get_pretty(error));
- error_free(error);
- }
-
job_update_rc(job);
trace_job_completed(job, ret, job->ret);
if (job->ret) {
@@ -893,7 +887,7 @@ void job_cancel(Job *job, bool force)
}
job_cancel_async(job, force);
if (!job_started(job)) {
- job_completed(job, -ECANCELED, NULL);
+ job_completed(job, -ECANCELED);
} else if (job->deferred_to_main_loop) {
job_completed_txn_abort(job);
} else {
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index a753386..00604df 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -754,7 +754,7 @@ typedef struct TestBlockJob {
static void test_job_completed(Job *job, void *opaque)
{
- job_completed(job, 0, NULL);
+ job_completed(job, 0);
}
static int coroutine_fn test_job_run(Job *job, Error **errp)
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index 3194924..82cedee 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -34,7 +34,7 @@ static void test_block_job_complete(Job *job, void *opaque)
rc = -ECANCELED;
}
- job_completed(job, rc, NULL);
+ job_completed(job, rc);
bdrv_unref(bs);
}
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index b0462bf..408a226 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -167,7 +167,7 @@ static void cancel_job_completed(Job *job, void *opaque)
{
CancelJob *s = opaque;
s->completed = true;
- job_completed(job, 0, NULL);
+ job_completed(job, 0);
}
static void cancel_job_complete(Job *job, Error **errp)
--
1.8.3.1

View File

@ -1,108 +0,0 @@
From 17511eb281e005da6e617acd12c81a0a1fa1771d Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:09 +0100
Subject: jobs: add exit shim
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-4-jsnow@redhat.com>
Patchwork-id: 82273
O-Subject: [RHEL8/rhel qemu-kvm PATCH 03/25] jobs: add exit shim
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
All jobs do the same thing when they leave their running loop:
- Store the return code in a structure
- wait to receive this structure in the main thread
- signal job completion via job_completed
Few jobs do anything beyond exactly this. Consolidate this exit
logic for a net reduction in SLOC.
More seriously, when we utilize job_defer_to_main_loop_bh to call
a function that calls job_completed, job_finalize_single will run
in a context where it has recursively taken the aio_context lock,
which can cause hangs if it puts down a reference that causes a flush.
You can observe this in practice by looking at mirror_exit's careful
placement of job_completed and bdrv_unref calls.
If we centralize job exiting, we can signal job completion from outside
of the aio_context, which should allow for job cleanup code to run with
only one lock, which makes cleanup callbacks less tricky to write.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-4-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 00359a71d45a414ee47d8e423104dc0afd24ec65)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/qemu/job.h | 11 +++++++++++
job.c | 18 ++++++++++++++++++
2 files changed, 29 insertions(+)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index e0e9987..1144d67 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -209,6 +209,17 @@ struct JobDriver {
void (*drain)(Job *job);
/**
+ * If the callback is not NULL, exit will be invoked from the main thread
+ * when the job's coroutine has finished, but before transactional
+ * convergence; before @prepare or @abort.
+ *
+ * FIXME TODO: This callback is only temporary to transition remaining jobs
+ * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
+ * is released.
+ */
+ void (*exit)(Job *job);
+
+ /**
* If the callback is not NULL, prepare will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's completion
* if it is not in a transaction.
diff --git a/job.c b/job.c
index 276024a..abe91af 100644
--- a/job.c
+++ b/job.c
@@ -535,6 +535,18 @@ void job_drain(Job *job)
}
}
+static void job_exit(void *opaque)
+{
+ Job *job = (Job *)opaque;
+ AioContext *aio_context = job->aio_context;
+
+ if (job->driver->exit) {
+ aio_context_acquire(aio_context);
+ job->driver->exit(job);
+ aio_context_release(aio_context);
+ }
+ job_completed(job, job->ret);
+}
/**
* All jobs must allow a pause point before entering their job proper. This
@@ -547,6 +559,12 @@ static void coroutine_fn job_co_entry(void *opaque)
assert(job && job->driver && job->driver->run);
job_pause_point(job);
job->ret = job->driver->run(job, &job->err);
+ if (!job->deferred_to_main_loop) {
+ job->deferred_to_main_loop = true;
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
+ job_exit,
+ job);
+ }
}
--
1.8.3.1

View File

@ -1,115 +0,0 @@
From 912e8eaa87f8dab40466cf0d45c3290d02e6a9d5 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:10 +0100
Subject: block/commit: utilize job_exit shim
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-5-jsnow@redhat.com>
Patchwork-id: 82265
O-Subject: [RHEL8/rhel qemu-kvm PATCH 04/25] block/commit: utilize job_exit shim
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Change the manual deferment to commit_complete into the implicit
callback to job_exit, renaming commit_complete to commit_exit.
This conversion does change the timing of when job_completed is
called to after the bdrv_replace_node and bdrv_unref calls, which
could have implications for bjob->blk which will now be put down
after this cleanup.
Kevin highlights that we did not take any permissions for that backend
at job creation time, so it is safe to reorder these operations.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-5-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit f369b48dc4095861223f9bc4329935599e03b1c5)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/commit.c | 22 +++++-----------------
1 file changed, 5 insertions(+), 17 deletions(-)
diff --git a/block/commit.c b/block/commit.c
index af7579d..25b3cb8 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -68,19 +68,13 @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
return 0;
}
-typedef struct {
- int ret;
-} CommitCompleteData;
-
-static void commit_complete(Job *job, void *opaque)
+static void commit_exit(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
BlockJob *bjob = &s->common;
- CommitCompleteData *data = opaque;
BlockDriverState *top = blk_bs(s->top);
BlockDriverState *base = blk_bs(s->base);
BlockDriverState *commit_top_bs = s->commit_top_bs;
- int ret = data->ret;
bool remove_commit_top_bs = false;
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
@@ -91,10 +85,10 @@ static void commit_complete(Job *job, void *opaque)
* the normal backing chain can be restored. */
blk_unref(s->base);
- if (!job_is_cancelled(job) && ret == 0) {
+ if (!job_is_cancelled(job) && job->ret == 0) {
/* success */
- ret = bdrv_drop_intermediate(s->commit_top_bs, base,
- s->backing_file_str);
+ job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
+ s->backing_file_str);
} else {
/* XXX Can (or should) we somehow keep 'consistent read' blocked even
* after the failed/cancelled commit job is gone? If we already wrote
@@ -117,9 +111,6 @@ static void commit_complete(Job *job, void *opaque)
* bdrv_set_backing_hd() to fail. */
block_job_remove_all_bdrv(bjob);
- job_completed(job, ret);
- g_free(data);
-
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
* filter driver from the backing chain. Do this as the final step so that
* the 'consistent read' permission can be granted. */
@@ -137,7 +128,6 @@ static void commit_complete(Job *job, void *opaque)
static int coroutine_fn commit_run(Job *job, Error **errp)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
- CommitCompleteData *data;
int64_t offset;
uint64_t delay_ns = 0;
int ret = 0;
@@ -210,9 +200,6 @@ static int coroutine_fn commit_run(Job *job, Error **errp)
out:
qemu_vfree(buf);
- data = g_malloc(sizeof(*data));
- data->ret = ret;
- job_defer_to_main_loop(&s->common.job, commit_complete, data);
return ret;
}
@@ -224,6 +211,7 @@ static const BlockJobDriver commit_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = commit_run,
+ .exit = commit_exit,
},
};
--
1.8.3.1

View File

@ -1,152 +0,0 @@
From 2322917770da98e175e7ae8bf0bb1a624ec3cebc Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 29 Aug 2018 21:57:30 -0400
Subject: block/mirror: utilize job_exit shim
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-6-jsnow@redhat.com>
Patchwork-id: 82269
O-Subject: [RHEL8/rhel qemu-kvm PATCH 05/25] block/mirror: utilize job_exit
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Change the manual deferment to mirror_exit into the implicit
callback to job_exit and the mirror_exit callback.
This does change the order of some bdrv_unref calls and job_completed,
but thanks to the new context in which we call .exit, this is safe to
defer the possible flushing of any nodes to the job_finalize_single
cleanup stage.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180830015734.19765-6-jsnow@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 7b508f6b7a38a8d9729772fa6e525da883fb120b)
Signed-off-by: John Snow <jsnow@redhat.com>
---
block/mirror.c | 29 +++++++++++------------------
1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 6637f2b..4a9558d 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -607,26 +607,22 @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
}
}
-typedef struct {
- int ret;
-} MirrorExitData;
-
-static void mirror_exit(Job *job, void *opaque)
+static void mirror_exit(Job *job)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
- MirrorExitData *data = opaque;
MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
AioContext *replace_aio_context = NULL;
BlockDriverState *src = s->mirror_top_bs->backing->bs;
BlockDriverState *target_bs = blk_bs(s->target);
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
Error *local_err = NULL;
+ int ret = job->ret;
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
- /* Make sure that the source BDS doesn't go away before we called
- * job_completed(). */
+ /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
+ * before we can call bdrv_drained_end */
bdrv_ref(src);
bdrv_ref(mirror_top_bs);
bdrv_ref(target_bs);
@@ -652,7 +648,7 @@ static void mirror_exit(Job *job, void *opaque)
bdrv_set_backing_hd(target_bs, backing, &local_err);
if (local_err) {
error_report_err(local_err);
- data->ret = -EPERM;
+ ret = -EPERM;
}
}
}
@@ -662,7 +658,7 @@ static void mirror_exit(Job *job, void *opaque)
aio_context_acquire(replace_aio_context);
}
- if (s->should_complete && data->ret == 0) {
+ if (s->should_complete && ret == 0) {
BlockDriverState *to_replace = src;
if (s->to_replace) {
to_replace = s->to_replace;
@@ -679,7 +675,7 @@ static void mirror_exit(Job *job, void *opaque)
bdrv_drained_end(target_bs);
if (local_err) {
error_report_err(local_err);
- data->ret = -EPERM;
+ ret = -EPERM;
}
}
if (s->to_replace) {
@@ -710,12 +706,12 @@ static void mirror_exit(Job *job, void *opaque)
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
bs_opaque->job = NULL;
- job_completed(job, data->ret);
- g_free(data);
bdrv_drained_end(src);
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
+
+ job->ret = ret;
}
static void mirror_throttle(MirrorBlockJob *s)
@@ -815,7 +811,6 @@ static int mirror_flush(MirrorBlockJob *s)
static int coroutine_fn mirror_run(Job *job, Error **errp)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
- MirrorExitData *data;
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
@@ -1035,14 +1030,10 @@ immediate_exit:
g_free(s->in_flight_bitmap);
bdrv_dirty_iter_free(s->dbi);
- data = g_malloc(sizeof(*data));
- data->ret = ret;
-
if (need_drain) {
bdrv_drained_begin(bs);
}
- job_defer_to_main_loop(&s->common.job, mirror_exit, data);
return ret;
}
@@ -1141,6 +1132,7 @@ static const BlockJobDriver mirror_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = mirror_run,
+ .exit = mirror_exit,
.pause = mirror_pause,
.complete = mirror_complete,
},
@@ -1157,6 +1149,7 @@ static const BlockJobDriver commit_active_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = mirror_run,
+ .exit = mirror_exit,
.pause = mirror_pause,
.complete = mirror_complete,
},
--
1.8.3.1

View File

@ -1,307 +0,0 @@
From 83d2840eeadd8a55b796eae5454783d42913963c Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:12 +0100
Subject: jobs: utilize job_exit shim
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-7-jsnow@redhat.com>
Patchwork-id: 82267
O-Subject: [RHEL8/rhel qemu-kvm PATCH 06/25] jobs: utilize job_exit shim
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Utilize the job_exit shim by not calling job_defer_to_main_loop, and
where applicable, converting the deferred callback into the job_exit
callback.
This converts backup, stream, create, and the unit tests all at once.
Most of these jobs do not see any changes to the order in which they
clean up their resources, except the test-blockjob-txn test, which
now puts down its bs before job_completed is called.
This is safe for the same reason the reordering in the mirror job is
safe, because job_completed no longer runs under two locks, making
the unref safe even if it causes a flush.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-7-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit eb23654dbe43b549ea2a9ebff9d8edf544d34a73)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/backup.c | 16 ----------------
block/create.c | 14 +++-----------
block/stream.c | 22 +++++++---------------
tests/test-bdrv-drain.c | 6 ------
tests/test-blockjob-txn.c | 11 ++---------
tests/test-blockjob.c | 10 ++++------
6 files changed, 16 insertions(+), 63 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index 1e965d5..a67b7fa 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -380,18 +380,6 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
}
}
-typedef struct {
- int ret;
-} BackupCompleteData;
-
-static void backup_complete(Job *job, void *opaque)
-{
- BackupCompleteData *data = opaque;
-
- job_completed(job, data->ret);
- g_free(data);
-}
-
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
{
uint64_t delay_ns;
@@ -483,7 +471,6 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
{
BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
- BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk);
int64_t offset, nb_clusters;
int ret = 0;
@@ -584,9 +571,6 @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
qemu_co_rwlock_unlock(&job->flush_rwlock);
hbitmap_free(job->copy_bitmap);
- data = g_malloc(sizeof(*data));
- data->ret = ret;
- job_defer_to_main_loop(&job->common.job, backup_complete, data);
return ret;
}
diff --git a/block/create.c b/block/create.c
index 26a385c..9534121 100644
--- a/block/create.c
+++ b/block/create.c
@@ -34,28 +34,20 @@ typedef struct BlockdevCreateJob {
Job common;
BlockDriver *drv;
BlockdevCreateOptions *opts;
- int ret;
} BlockdevCreateJob;
-static void blockdev_create_complete(Job *job, void *opaque)
-{
- BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
-
- job_completed(job, s->ret);
-}
-
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
{
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
+ int ret;
job_progress_set_remaining(&s->common, 1);
- s->ret = s->drv->bdrv_co_create(s->opts, errp);
+ ret = s->drv->bdrv_co_create(s->opts, errp);
job_progress_update(&s->common, 1);
qapi_free_BlockdevCreateOptions(s->opts);
- job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
- return s->ret;
+ return ret;
}
static const JobDriver blockdev_create_job_driver = {
diff --git a/block/stream.c b/block/stream.c
index 26a7753..67e1e72 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -54,20 +54,16 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
}
-typedef struct {
- int ret;
-} StreamCompleteData;
-
-static void stream_complete(Job *job, void *opaque)
+static void stream_exit(Job *job)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common;
- StreamCompleteData *data = opaque;
BlockDriverState *bs = blk_bs(bjob->blk);
BlockDriverState *base = s->base;
Error *local_err = NULL;
+ int ret = job->ret;
- if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
+ if (!job_is_cancelled(job) && bs->backing && ret == 0) {
const char *base_id = NULL, *base_fmt = NULL;
if (base) {
base_id = s->backing_file_str;
@@ -75,11 +71,11 @@ static void stream_complete(Job *job, void *opaque)
base_fmt = base->drv->format_name;
}
}
- data->ret = bdrv_change_backing_file(bs, base_id, base_fmt);
+ ret = bdrv_change_backing_file(bs, base_id, base_fmt);
bdrv_set_backing_hd(bs, base, &local_err);
if (local_err) {
error_report_err(local_err);
- data->ret = -EPERM;
+ ret = -EPERM;
goto out;
}
}
@@ -93,14 +89,12 @@ out:
}
g_free(s->backing_file_str);
- job_completed(job, data->ret);
- g_free(data);
+ job->ret = ret;
}
static int coroutine_fn stream_run(Job *job, Error **errp)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
- StreamCompleteData *data;
BlockBackend *blk = s->common.blk;
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *base = s->base;
@@ -203,9 +197,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
out:
/* Modify backing chain and close BDSes in main loop */
- data = g_malloc(sizeof(*data));
- data->ret = ret;
- job_defer_to_main_loop(&s->common.job, stream_complete, data);
return ret;
}
@@ -215,6 +206,7 @@ static const BlockJobDriver stream_job_driver = {
.job_type = JOB_TYPE_STREAM,
.free = block_job_free,
.run = stream_run,
+ .exit = stream_exit,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 00604df..9bcb3c7 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -752,11 +752,6 @@ typedef struct TestBlockJob {
bool should_complete;
} TestBlockJob;
-static void test_job_completed(Job *job, void *opaque)
-{
- job_completed(job, 0);
-}
-
static int coroutine_fn test_job_run(Job *job, Error **errp)
{
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
@@ -770,7 +765,6 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
job_pause_point(&s->common.job);
}
- job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
return 0;
}
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index 82cedee..ef29f35 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -24,17 +24,11 @@ typedef struct {
int *result;
} TestBlockJob;
-static void test_block_job_complete(Job *job, void *opaque)
+static void test_block_job_exit(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
BlockDriverState *bs = blk_bs(bjob->blk);
- int rc = (intptr_t)opaque;
- if (job_is_cancelled(job)) {
- rc = -ECANCELED;
- }
-
- job_completed(job, rc);
bdrv_unref(bs);
}
@@ -54,8 +48,6 @@ static int coroutine_fn test_block_job_run(Job *job, Error **errp)
}
}
- job_defer_to_main_loop(job, test_block_job_complete,
- (void *)(intptr_t)s->rc);
return s->rc;
}
@@ -81,6 +73,7 @@ static const BlockJobDriver test_block_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = test_block_job_run,
+ .exit = test_block_job_exit,
},
};
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index 408a226..ad4a65b 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -163,11 +163,10 @@ typedef struct CancelJob {
bool completed;
} CancelJob;
-static void cancel_job_completed(Job *job, void *opaque)
+static void cancel_job_exit(Job *job)
{
- CancelJob *s = opaque;
+ CancelJob *s = container_of(job, CancelJob, common.job);
s->completed = true;
- job_completed(job, 0);
}
static void cancel_job_complete(Job *job, Error **errp)
@@ -182,7 +181,7 @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
while (!s->should_complete) {
if (job_is_cancelled(&s->common.job)) {
- goto defer;
+ return 0;
}
if (!job_is_ready(&s->common.job) && s->should_converge) {
@@ -192,8 +191,6 @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
job_sleep_ns(&s->common.job, 100000);
}
- defer:
- job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
return 0;
}
@@ -204,6 +201,7 @@ static const BlockJobDriver test_cancel_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = cancel_job_run,
+ .exit = cancel_job_exit,
.complete = cancel_job_complete,
},
};
--
1.8.3.1

View File

@ -1,165 +0,0 @@
From b5532575bb8aa748dc066834d7ac150bbb6575a7 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:13 +0100
Subject: block/backup: make function variables consistently named
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-8-jsnow@redhat.com>
Patchwork-id: 82272
O-Subject: [RHEL8/rhel qemu-kvm PATCH 07/25] block/backup: make function variables consistently named
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Rename opaque_job to job to be consistent with other job implementations.
Rename 'job', the BackupBlockJob object, to 's' to also be consistent.
Suggested-by: Eric Blake <eblake@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-8-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 6870277535493fea31761d8d11ec23add2de0fb0)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/backup.c | 62 +++++++++++++++++++++++++++++-----------------------------
1 file changed, 31 insertions(+), 31 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index a67b7fa..4d084f6 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -468,59 +468,59 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
bdrv_dirty_iter_free(dbi);
}
-static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
+static int coroutine_fn backup_run(Job *job, Error **errp)
{
- BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
- BlockDriverState *bs = blk_bs(job->common.blk);
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
+ BlockDriverState *bs = blk_bs(s->common.blk);
int64_t offset, nb_clusters;
int ret = 0;
- QLIST_INIT(&job->inflight_reqs);
- qemu_co_rwlock_init(&job->flush_rwlock);
+ QLIST_INIT(&s->inflight_reqs);
+ qemu_co_rwlock_init(&s->flush_rwlock);
- nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
- job_progress_set_remaining(&job->common.job, job->len);
+ nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
+ job_progress_set_remaining(job, s->len);
- job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
- if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
- backup_incremental_init_copy_bitmap(job);
+ s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
+ if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
+ backup_incremental_init_copy_bitmap(s);
} else {
- hbitmap_set(job->copy_bitmap, 0, nb_clusters);
+ hbitmap_set(s->copy_bitmap, 0, nb_clusters);
}
- job->before_write.notify = backup_before_write_notify;
- bdrv_add_before_write_notifier(bs, &job->before_write);
+ s->before_write.notify = backup_before_write_notify;
+ bdrv_add_before_write_notifier(bs, &s->before_write);
- if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
+ if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
- while (!job_is_cancelled(&job->common.job)) {
+ while (!job_is_cancelled(job)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
- job_yield(&job->common.job);
+ job_yield(job);
}
- } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
- ret = backup_run_incremental(job);
+ } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
+ ret = backup_run_incremental(s);
} else {
/* Both FULL and TOP SYNC_MODE's require copying.. */
- for (offset = 0; offset < job->len;
- offset += job->cluster_size) {
+ for (offset = 0; offset < s->len;
+ offset += s->cluster_size) {
bool error_is_read;
int alloced = 0;
- if (yield_and_check(job)) {
+ if (yield_and_check(s)) {
break;
}
- if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
int i;
int64_t n;
/* Check to see if these blocks are already in the
* backing file. */
- for (i = 0; i < job->cluster_size;) {
+ for (i = 0; i < s->cluster_size;) {
/* bdrv_is_allocated() only returns true/false based
* on the first set of sectors it comes across that
* are are all in the same state.
@@ -529,7 +529,7 @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
* needed but at some point that is always the case. */
alloced =
bdrv_is_allocated(bs, offset + i,
- job->cluster_size - i, &n);
+ s->cluster_size - i, &n);
i += n;
if (alloced || n == 0) {
@@ -547,29 +547,29 @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
if (alloced < 0) {
ret = alloced;
} else {
- ret = backup_do_cow(job, offset, job->cluster_size,
+ ret = backup_do_cow(s, offset, s->cluster_size,
&error_is_read, false);
}
if (ret < 0) {
/* Depending on error action, fail now or retry cluster */
BlockErrorAction action =
- backup_error_action(job, error_is_read, -ret);
+ backup_error_action(s, error_is_read, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
break;
} else {
- offset -= job->cluster_size;
+ offset -= s->cluster_size;
continue;
}
}
}
}
- notifier_with_return_remove(&job->before_write);
+ notifier_with_return_remove(&s->before_write);
/* wait until pending backup_do_cow() calls have completed */
- qemu_co_rwlock_wrlock(&job->flush_rwlock);
- qemu_co_rwlock_unlock(&job->flush_rwlock);
- hbitmap_free(job->copy_bitmap);
+ qemu_co_rwlock_wrlock(&s->flush_rwlock);
+ qemu_co_rwlock_unlock(&s->flush_rwlock);
+ hbitmap_free(s->copy_bitmap);
return ret;
}
--
1.8.3.1

View File

@ -1,153 +0,0 @@
From 7fe6d53387852907871d82997fbccc2cf774bdb4 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:14 +0100
Subject: jobs: remove ret argument to job_completed; privatize it
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-9-jsnow@redhat.com>
Patchwork-id: 82271
O-Subject: [RHEL8/rhel qemu-kvm PATCH 08/25] jobs: remove ret argument to job_completed; privatize it
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Jobs are now expected to return their retcode on the stack, from the
.run callback, so we can remove that argument.
job_cancel does not need to set -ECANCELED because job_completed will
update the return code itself if the job was canceled.
While we're here, make job_completed static to job.c and remove it from
job.h; move the documentation of return code to the .run() callback and
to the job->ret property, accordingly.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180830015734.19765-9-jsnow@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 404ff28d6ae59fc1c24d631710d4063fc68aed03)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/qemu/job.h | 28 +++++++++++++++-------------
job.c | 11 ++++++-----
trace-events | 2 +-
3 files changed, 22 insertions(+), 19 deletions(-)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 1144d67..23395c1 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -124,7 +124,11 @@ typedef struct Job {
/** Estimated progress_current value at the completion of the job */
int64_t progress_total;
- /** ret code passed to job_completed. */
+ /**
+ * Return code from @run and/or @prepare callback(s).
+ * Not final until the job has reached the CONCLUDED status.
+ * 0 on success, -errno on failure.
+ */
int ret;
/**
@@ -172,7 +176,16 @@ struct JobDriver {
/** Enum describing the operation */
JobType job_type;
- /** Mandatory: Entrypoint for the Coroutine. */
+ /**
+ * Mandatory: Entrypoint for the Coroutine.
+ *
+ * This callback will be invoked when moving from CREATED to RUNNING.
+ *
+ * If this callback returns nonzero, the job transaction it is part of is
+ * aborted. If it returns zero, the job moves into the WAITING state. If it
+ * is the last job to complete in its transaction, all jobs in the
+ * transaction move from WAITING to PENDING.
+ */
int coroutine_fn (*run)(Job *job, Error **errp);
/**
@@ -496,17 +509,6 @@ void job_early_fail(Job *job);
/** Moves the @job from RUNNING to READY */
void job_transition_to_ready(Job *job);
-/**
- * @job: The job being completed.
- * @ret: The status code.
- *
- * Marks @job as completed. If @ret is non-zero, the job transaction it is part
- * of is aborted. If @ret is zero, the job moves into the WAITING state. If it
- * is the last job to complete in its transaction, all jobs in the transaction
- * move from WAITING to PENDING.
- */
-void job_completed(Job *job, int ret);
-
/** Asynchronously complete the specified @job. */
void job_complete(Job *job, Error **errp);
diff --git a/job.c b/job.c
index abe91af..61e091a 100644
--- a/job.c
+++ b/job.c
@@ -535,6 +535,8 @@ void job_drain(Job *job)
}
}
+static void job_completed(Job *job);
+
static void job_exit(void *opaque)
{
Job *job = (Job *)opaque;
@@ -545,7 +547,7 @@ static void job_exit(void *opaque)
job->driver->exit(job);
aio_context_release(aio_context);
}
- job_completed(job, job->ret);
+ job_completed(job);
}
/**
@@ -883,13 +885,12 @@ static void job_completed_txn_success(Job *job)
}
}
-void job_completed(Job *job, int ret)
+static void job_completed(Job *job)
{
assert(job && job->txn && !job_is_completed(job));
- job->ret = ret;
job_update_rc(job);
- trace_job_completed(job, ret, job->ret);
+ trace_job_completed(job, job->ret);
if (job->ret) {
job_completed_txn_abort(job);
} else {
@@ -905,7 +906,7 @@ void job_cancel(Job *job, bool force)
}
job_cancel_async(job, force);
if (!job_started(job)) {
- job_completed(job, -ECANCELED);
+ job_completed(job);
} else if (job->deferred_to_main_loop) {
job_completed_txn_abort(job);
} else {
diff --git a/trace-events b/trace-events
index c445f54..4fd2cb4 100644
--- a/trace-events
+++ b/trace-events
@@ -107,7 +107,7 @@ gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packe
# job.c
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
-job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
+job_completed(void *job, int ret) "job %p ret %d"
# job-qmp.c
qmp_job_cancel(void *job) "job %p"
--
1.8.3.1

View File

@ -1,119 +0,0 @@
From 1827993a08cc8c86cc40ca9ccb7ef668261b2bc4 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:15 +0100
Subject: jobs: remove job_defer_to_main_loop
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-10-jsnow@redhat.com>
Patchwork-id: 82275
O-Subject: [RHEL8/rhel qemu-kvm PATCH 09/25] jobs: remove job_defer_to_main_loop
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Now that the job infrastructure is handling the job_completed call for
all implemented jobs, we can remove the interface that allowed jobs to
schedule their own completion.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180830015734.19765-10-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit e21a1c9831fc80ae3f3c1affdfa43350035d8588)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/qemu/job.h | 17 -----------------
job.c | 40 ++--------------------------------------
2 files changed, 2 insertions(+), 55 deletions(-)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 23395c1..e0cff70 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -568,23 +568,6 @@ void job_finalize(Job *job, Error **errp);
*/
void job_dismiss(Job **job, Error **errp);
-typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
-
-/**
- * @job: The job
- * @fn: The function to run in the main loop
- * @opaque: The opaque value that is passed to @fn
- *
- * This function must be called by the main job coroutine just before it
- * returns. @fn is executed in the main loop with the job AioContext acquired.
- *
- * Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
- * bdrv_drain_all() in the main loop.
- *
- * The @job AioContext is held while @fn executes.
- */
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
-
/**
* Synchronously finishes the given @job. If @finish is given, it is called to
* trigger completion or cancellation of the job.
diff --git a/job.c b/job.c
index 61e091a..e8d7aee 100644
--- a/job.c
+++ b/job.c
@@ -561,12 +561,8 @@ static void coroutine_fn job_co_entry(void *opaque)
assert(job && job->driver && job->driver->run);
job_pause_point(job);
job->ret = job->driver->run(job, &job->err);
- if (!job->deferred_to_main_loop) {
- job->deferred_to_main_loop = true;
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
- job_exit,
- job);
- }
+ job->deferred_to_main_loop = true;
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
}
@@ -969,38 +965,6 @@ void job_complete(Job *job, Error **errp)
job->driver->complete(job, errp);
}
-
-typedef struct {
- Job *job;
- JobDeferToMainLoopFn *fn;
- void *opaque;
-} JobDeferToMainLoopData;
-
-static void job_defer_to_main_loop_bh(void *opaque)
-{
- JobDeferToMainLoopData *data = opaque;
- Job *job = data->job;
- AioContext *aio_context = job->aio_context;
-
- aio_context_acquire(aio_context);
- data->fn(data->job, data->opaque);
- aio_context_release(aio_context);
-
- g_free(data);
-}
-
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
-{
- JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
- data->job = job;
- data->fn = fn;
- data->opaque = opaque;
- job->deferred_to_main_loop = true;
-
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
- job_defer_to_main_loop_bh, data);
-}
-
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
{
Error *local_err = NULL;
--
1.8.3.1

View File

@ -1,110 +0,0 @@
From 6c8da2ba018d7546a15c3917f52ad1cc2b5b133c Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:16 +0100
Subject: block/commit: add block job creation flags
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-11-jsnow@redhat.com>
Patchwork-id: 82264
O-Subject: [RHEL8/rhel qemu-kvm PATCH 10/25] block/commit: add block job creation flags
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Add support for taking and passing forward job creation flags.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 20180906130225.5118-2-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 5360782d0827854383097d560715d8d8027ee590)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/commit.c | 5 +++--
blockdev.c | 7 ++++---
include/block/block_int.h | 5 ++++-
3 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/block/commit.c b/block/commit.c
index 25b3cb8..c737664 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -254,7 +254,8 @@ static BlockDriver bdrv_commit_top = {
};
void commit_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, BlockDriverState *top, int64_t speed,
+ BlockDriverState *base, BlockDriverState *top,
+ int creation_flags, int64_t speed,
BlockdevOnError on_error, const char *backing_file_str,
const char *filter_node_name, Error **errp)
{
@@ -272,7 +273,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
}
s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
- speed, JOB_DEFAULT, NULL, NULL, errp);
+ speed, creation_flags, NULL, NULL, errp);
if (!s) {
return;
}
diff --git a/blockdev.c b/blockdev.c
index dcf8c8d..88ad8d9 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3324,6 +3324,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
* BlockdevOnError change for blkmirror makes it in
*/
BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
+ int job_flags = JOB_DEFAULT;
if (!has_speed) {
speed = 0;
@@ -3405,15 +3406,15 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
goto out;
}
commit_active_start(has_job_id ? job_id : NULL, bs, base_bs,
- JOB_DEFAULT, speed, on_error,
+ job_flags, speed, on_error,
filter_node_name, NULL, NULL, false, &local_err);
} else {
BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
goto out;
}
- commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, speed,
- on_error, has_backing_file ? backing_file : NULL,
+ commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, job_flags,
+ speed, on_error, has_backing_file ? backing_file : NULL,
filter_node_name, &local_err);
}
if (local_err != NULL) {
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 903b9c1..ffab0b4 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -980,6 +980,8 @@ void stream_start(const char *job_id, BlockDriverState *bs,
* @bs: Active block device.
* @top: Top block device to be committed.
* @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
* @backing_file_str: String to use as the backing file in @top's overlay
@@ -990,7 +992,8 @@ void stream_start(const char *job_id, BlockDriverState *bs,
*
*/
void commit_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, BlockDriverState *top, int64_t speed,
+ BlockDriverState *base, BlockDriverState *top,
+ int creation_flags, int64_t speed,
BlockdevOnError on_error, const char *backing_file_str,
const char *filter_node_name, Error **errp);
/**
--
1.8.3.1

View File

@ -1,100 +0,0 @@
From d4f6cfe194df3236bf53b1093e0a7f98f0a5da0e Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:17 +0100
Subject: block/mirror: add block job creation flags
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-12-jsnow@redhat.com>
Patchwork-id: 82268
O-Subject: [RHEL8/rhel qemu-kvm PATCH 11/25] block/mirror: add block job creation flags
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Add support for taking and passing forward job creation flags.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 20180906130225.5118-3-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit a1999b33488daba68a1bcd7c6fdf314ddeacc6a2)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/mirror.c | 5 +++--
blockdev.c | 3 ++-
include/block/block_int.h | 5 ++++-
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 4a9558d..cd13835 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -1639,7 +1639,8 @@ fail:
void mirror_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, const char *replaces,
- int64_t speed, uint32_t granularity, int64_t buf_size,
+ int creation_flags, int64_t speed,
+ uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
@@ -1655,7 +1656,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
- mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
+ mirror_start_job(job_id, bs, creation_flags, target, replaces,
speed, granularity, buf_size, backing_mode,
on_source_error, on_target_error, unmap, NULL, NULL,
&mirror_job_driver, is_none_mode, base, false,
diff --git a/blockdev.c b/blockdev.c
index 88ad8d9..d31750b 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3700,6 +3700,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
bool has_copy_mode, MirrorCopyMode copy_mode,
Error **errp)
{
+ int job_flags = JOB_DEFAULT;
if (!has_speed) {
speed = 0;
@@ -3752,7 +3753,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
* and will allow to check whether the node still exist at mirror completion
*/
mirror_start(job_id, bs, target,
- has_replaces ? replaces : NULL,
+ has_replaces ? replaces : NULL, job_flags,
speed, granularity, buf_size, sync, backing_mode,
on_source_error, on_target_error, unmap, filter_node_name,
copy_mode, errp);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index ffab0b4..b40f0bf 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -1029,6 +1029,8 @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
* @target: Block device to write to.
* @replaces: Block graph node name to replace once the mirror is done. Can
* only be used when full mirroring is selected.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @granularity: The chosen granularity for the dirty bitmap.
* @buf_size: The amount of data that can be in flight at one time.
@@ -1050,7 +1052,8 @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
*/
void mirror_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, const char *replaces,
- int64_t speed, uint32_t granularity, int64_t buf_size,
+ int creation_flags, int64_t speed,
+ uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
--
1.8.3.1

View File

@ -1,100 +0,0 @@
From 4fd98648eb0df8157c1238a1cee36373278d44a5 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:18 +0100
Subject: block/stream: add block job creation flags
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-13-jsnow@redhat.com>
Patchwork-id: 82263
O-Subject: [RHEL8/rhel qemu-kvm PATCH 12/25] block/stream: add block job creation flags
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Add support for taking and passing forward job creation flags.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 20180906130225.5118-4-jsnow@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit cf6320df581e6cbde6a95075266859a8f9ba9d55)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/stream.c | 5 +++--
blockdev.c | 3 ++-
include/block/block_int.h | 5 ++++-
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/block/stream.c b/block/stream.c
index 67e1e72..700eb23 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -214,7 +214,8 @@ static const BlockJobDriver stream_job_driver = {
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
- int64_t speed, BlockdevOnError on_error, Error **errp)
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error, Error **errp)
{
StreamBlockJob *s;
BlockDriverState *iter;
@@ -236,7 +237,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
BLK_PERM_GRAPH_MOD,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
BLK_PERM_WRITE,
- speed, JOB_DEFAULT, NULL, NULL, errp);
+ speed, creation_flags, NULL, NULL, errp);
if (!s) {
goto fail;
}
diff --git a/blockdev.c b/blockdev.c
index d31750b..c2e6402 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3233,6 +3233,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
AioContext *aio_context;
Error *local_err = NULL;
const char *base_name = NULL;
+ int job_flags = JOB_DEFAULT;
if (!has_on_error) {
on_error = BLOCKDEV_ON_ERROR_REPORT;
@@ -3295,7 +3296,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
base_name = has_backing_file ? backing_file : base_name;
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
- has_speed ? speed : 0, on_error, &local_err);
+ job_flags, has_speed ? speed : 0, on_error, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out;
diff --git a/include/block/block_int.h b/include/block/block_int.h
index b40f0bf..4000d2a 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -958,6 +958,8 @@ int is_windows_drive(const char *filename);
* flatten the whole backing file chain onto @bs.
* @backing_file_str: The file name that will be written to @bs as the
* the new backing file if the job completes. Ignored if @base is %NULL.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error.
* @errp: Error object.
@@ -971,7 +973,8 @@ int is_windows_drive(const char *filename);
*/
void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str,
- int64_t speed, BlockdevOnError on_error, Error **errp);
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error, Error **errp);
/**
* commit_start:
--
1.8.3.1

View File

@ -1,180 +0,0 @@
From b0b7d48f97dd97efacf93e5529d7597bd2280095 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:19 +0100
Subject: block/commit: refactor commit to use job callbacks
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-14-jsnow@redhat.com>
Patchwork-id: 82279
O-Subject: [RHEL8/rhel qemu-kvm PATCH 13/25] block/commit: refactor commit to use job callbacks
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Use the component callbacks; prepare, abort, and clean.
NB: prepare is only called when the job has not yet failed;
and abort can be called after prepare.
complete -> prepare -> abort -> clean
complete -> abort -> clean
During refactor, a potential problem with bdrv_drop_intermediate
was identified, the patched behavior is no worse than the pre-patch
behavior, so leave a FIXME for now to be fixed in a future patch.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-5-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 22dffcbec62ba918db690ed44beba4bd4e970bb9)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/commit.c | 92 ++++++++++++++++++++++++++++++++--------------------------
1 file changed, 51 insertions(+), 41 deletions(-)
diff --git a/block/commit.c b/block/commit.c
index c737664..b387765 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -36,6 +36,7 @@ typedef struct CommitBlockJob {
BlockDriverState *commit_top_bs;
BlockBackend *top;
BlockBackend *base;
+ BlockDriverState *base_bs;
BlockdevOnError on_error;
int base_flags;
char *backing_file_str;
@@ -68,61 +69,67 @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
return 0;
}
-static void commit_exit(Job *job)
+static int commit_prepare(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
- BlockJob *bjob = &s->common;
- BlockDriverState *top = blk_bs(s->top);
- BlockDriverState *base = blk_bs(s->base);
- BlockDriverState *commit_top_bs = s->commit_top_bs;
- bool remove_commit_top_bs = false;
-
- /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
- bdrv_ref(top);
- bdrv_ref(commit_top_bs);
/* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
* the normal backing chain can be restored. */
blk_unref(s->base);
+ s->base = NULL;
+
+ /* FIXME: bdrv_drop_intermediate treats total failures and partial failures
+ * identically. Further work is needed to disambiguate these cases. */
+ return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
+ s->backing_file_str);
+}
- if (!job_is_cancelled(job) && job->ret == 0) {
- /* success */
- job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
- s->backing_file_str);
- } else {
- /* XXX Can (or should) we somehow keep 'consistent read' blocked even
- * after the failed/cancelled commit job is gone? If we already wrote
- * something to base, the intermediate images aren't valid any more. */
- remove_commit_top_bs = true;
+static void commit_abort(Job *job)
+{
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
+ BlockDriverState *top_bs = blk_bs(s->top);
+
+ /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
+ bdrv_ref(top_bs);
+ bdrv_ref(s->commit_top_bs);
+
+ if (s->base) {
+ blk_unref(s->base);
}
+ /* free the blockers on the intermediate nodes so that bdrv_replace_nodes
+ * can succeed */
+ block_job_remove_all_bdrv(&s->common);
+
+ /* If bdrv_drop_intermediate() failed (or was not invoked), remove the
+ * commit filter driver from the backing chain now. Do this as the final
+ * step so that the 'consistent read' permission can be granted.
+ *
+ * XXX Can (or should) we somehow keep 'consistent read' blocked even
+ * after the failed/cancelled commit job is gone? If we already wrote
+ * something to base, the intermediate images aren't valid any more. */
+ bdrv_child_try_set_perm(s->commit_top_bs->backing, 0, BLK_PERM_ALL,
+ &error_abort);
+ bdrv_replace_node(s->commit_top_bs, backing_bs(s->commit_top_bs),
+ &error_abort);
+
+ bdrv_unref(s->commit_top_bs);
+ bdrv_unref(top_bs);
+}
+
+static void commit_clean(Job *job)
+{
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
+
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
- if (s->base_flags != bdrv_get_flags(base)) {
- bdrv_reopen(base, s->base_flags, NULL);
+ if (s->base_flags != bdrv_get_flags(s->base_bs)) {
+ bdrv_reopen(s->base_bs, s->base_flags, NULL);
}
+
g_free(s->backing_file_str);
blk_unref(s->top);
-
- /* If there is more than one reference to the job (e.g. if called from
- * job_finish_sync()), job_completed() won't free it and therefore the
- * blockers on the intermediate nodes remain. This would cause
- * bdrv_set_backing_hd() to fail. */
- block_job_remove_all_bdrv(bjob);
-
- /* If bdrv_drop_intermediate() didn't already do that, remove the commit
- * filter driver from the backing chain. Do this as the final step so that
- * the 'consistent read' permission can be granted. */
- if (remove_commit_top_bs) {
- bdrv_child_try_set_perm(commit_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
- bdrv_replace_node(commit_top_bs, backing_bs(commit_top_bs),
- &error_abort);
- }
-
- bdrv_unref(commit_top_bs);
- bdrv_unref(top);
}
static int coroutine_fn commit_run(Job *job, Error **errp)
@@ -211,7 +218,9 @@ static const BlockJobDriver commit_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = commit_run,
- .exit = commit_exit,
+ .prepare = commit_prepare,
+ .abort = commit_abort,
+ .clean = commit_clean
},
};
@@ -350,6 +359,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
+ s->base_bs = base;
/* Required permissions are already taken with block_job_add_bdrv() */
s->top = blk_new(0, BLK_PERM_ALL);
--
1.8.3.1

View File

@ -1,45 +0,0 @@
From e849bf276e59b282f3288b42abe9d6dff51dc678 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:20 +0100
Subject: block/mirror: don't install backing chain on abort
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-15-jsnow@redhat.com>
Patchwork-id: 82277
O-Subject: [RHEL8/rhel qemu-kvm PATCH 14/25] block/mirror: don't install backing chain on abort
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
In cases where we abort the block/mirror job, there's no point in
installing the new backing chain before we finish aborting.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180906130225.5118-6-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit c2924ceaa7f1866148e2847c969fc1902a2524fa)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/mirror.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/mirror.c b/block/mirror.c
index cd13835..19b57b8 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -642,7 +642,7 @@ static void mirror_exit(Job *job)
* required before it could become a backing file of target_bs. */
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort);
- if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
+ if (ret == 0 && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
if (backing_bs(target_bs) != backing) {
bdrv_set_backing_hd(target_bs, backing, &local_err);
--
1.8.3.1

View File

@ -1,136 +0,0 @@
From 430c298d6bf9a7c8b90ad30bc2cd445e5cd6dd50 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Thu, 6 Sep 2018 09:02:15 -0400
Subject: block/mirror: conservative mirror_exit refactor
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-16-jsnow@redhat.com>
Patchwork-id: 82270
O-Subject: [RHEL8/rhel qemu-kvm PATCH 15/25] block/mirror: conservative mirr
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
For purposes of minimum code movement, refactor the mirror_exit
callback to use the post-finalization callbacks in a trivial way.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180906130225.5118-7-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
[mreitz: Added comment for the mirror_exit() function]
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 737efc1eda23b904fbe0e66b37715fb0e5c3e58b)
Signed-off-by: John Snow <jsnow@redhat.com>
---
block/mirror.c | 44 +++++++++++++++++++++++++++++++++-----------
1 file changed, 33 insertions(+), 11 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 19b57b8..7efba77 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -79,6 +79,7 @@ typedef struct MirrorBlockJob {
int max_iov;
bool initial_zeroing_ongoing;
int in_active_write_counter;
+ bool prepared;
} MirrorBlockJob;
typedef struct MirrorBDSOpaque {
@@ -607,7 +608,12 @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
}
}
-static void mirror_exit(Job *job)
+/**
+ * mirror_exit_common: handle both abort() and prepare() cases.
+ * for .prepare, returns 0 on success and -errno on failure.
+ * for .abort cases, denoted by abort = true, MUST return 0.
+ */
+static int mirror_exit_common(Job *job)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
@@ -617,7 +623,13 @@ static void mirror_exit(Job *job)
BlockDriverState *target_bs = blk_bs(s->target);
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
Error *local_err = NULL;
- int ret = job->ret;
+ bool abort = job->ret < 0;
+ int ret = 0;
+
+ if (s->prepared) {
+ return 0;
+ }
+ s->prepared = true;
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
@@ -642,7 +654,7 @@ static void mirror_exit(Job *job)
* required before it could become a backing file of target_bs. */
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort);
- if (ret == 0 && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
+ if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
if (backing_bs(target_bs) != backing) {
bdrv_set_backing_hd(target_bs, backing, &local_err);
@@ -658,11 +670,8 @@ static void mirror_exit(Job *job)
aio_context_acquire(replace_aio_context);
}
- if (s->should_complete && ret == 0) {
- BlockDriverState *to_replace = src;
- if (s->to_replace) {
- to_replace = s->to_replace;
- }
+ if (s->should_complete && !abort) {
+ BlockDriverState *to_replace = s->to_replace ?: src;
if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
@@ -711,7 +720,18 @@ static void mirror_exit(Job *job)
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
- job->ret = ret;
+ return ret;
+}
+
+static int mirror_prepare(Job *job)
+{
+ return mirror_exit_common(job);
+}
+
+static void mirror_abort(Job *job)
+{
+ int ret = mirror_exit_common(job);
+ assert(ret == 0);
}
static void mirror_throttle(MirrorBlockJob *s)
@@ -1132,7 +1152,8 @@ static const BlockJobDriver mirror_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = mirror_run,
- .exit = mirror_exit,
+ .prepare = mirror_prepare,
+ .abort = mirror_abort,
.pause = mirror_pause,
.complete = mirror_complete,
},
@@ -1149,7 +1170,8 @@ static const BlockJobDriver commit_active_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = mirror_run,
- .exit = mirror_exit,
+ .prepare = mirror_prepare,
+ .abort = mirror_abort,
.pause = mirror_pause,
.complete = mirror_complete,
},
--
1.8.3.1

View File

@ -1,94 +0,0 @@
From 57ede8577bbecac73a2945ca5278662dfc019dca Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:22 +0100
Subject: block/stream: refactor stream to use job callbacks
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-17-jsnow@redhat.com>
Patchwork-id: 82280
O-Subject: [RHEL8/rhel qemu-kvm PATCH 16/25] block/stream: refactor stream to use job callbacks
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-8-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 1b57488acf1beba157bcd8c926e596342bcb5c60)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/stream.c | 23 +++++++++++++++--------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/block/stream.c b/block/stream.c
index 700eb23..81a7ec8 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -54,16 +54,16 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
}
-static void stream_exit(Job *job)
+static int stream_prepare(Job *job)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common;
BlockDriverState *bs = blk_bs(bjob->blk);
BlockDriverState *base = s->base;
Error *local_err = NULL;
- int ret = job->ret;
+ int ret = 0;
- if (!job_is_cancelled(job) && bs->backing && ret == 0) {
+ if (bs->backing) {
const char *base_id = NULL, *base_fmt = NULL;
if (base) {
base_id = s->backing_file_str;
@@ -75,12 +75,19 @@ static void stream_exit(Job *job)
bdrv_set_backing_hd(bs, base, &local_err);
if (local_err) {
error_report_err(local_err);
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
}
-out:
+ return ret;
+}
+
+static void stream_clean(Job *job)
+{
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
+ BlockJob *bjob = &s->common;
+ BlockDriverState *bs = blk_bs(bjob->blk);
+
/* Reopen the image back in read-only mode if necessary */
if (s->bs_flags != bdrv_get_flags(bs)) {
/* Give up write permissions before making it read-only */
@@ -89,7 +96,6 @@ out:
}
g_free(s->backing_file_str);
- job->ret = ret;
}
static int coroutine_fn stream_run(Job *job, Error **errp)
@@ -206,7 +212,8 @@ static const BlockJobDriver stream_job_driver = {
.job_type = JOB_TYPE_STREAM,
.free = block_job_free,
.run = stream_run,
- .exit = stream_exit,
+ .prepare = stream_prepare,
+ .clean = stream_clean,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},
--
1.8.3.1

View File

@ -1,233 +0,0 @@
From 3817b0c67fb4636bacd9c4ebdef39f51b18e05c1 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:23 +0100
Subject: tests/blockjob: replace Blockjob with Job
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-18-jsnow@redhat.com>
Patchwork-id: 82281
O-Subject: [RHEL8/rhel qemu-kvm PATCH 17/25] tests/blockjob: replace Blockjob with Job
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
These tests don't actually test blockjobs anymore, they test
generic Job lifetimes. Change the types accordingly.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-9-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 0cc4643b01a0138543e886db8e3bf8a3f74ff8f9)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-blockjob.c | 98 ++++++++++++++++++++++++++-------------------------
1 file changed, 50 insertions(+), 48 deletions(-)
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index ad4a65b..8e8b680 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -206,18 +206,20 @@ static const BlockJobDriver test_cancel_driver = {
},
};
-static CancelJob *create_common(BlockJob **pjob)
+static CancelJob *create_common(Job **pjob)
{
BlockBackend *blk;
- BlockJob *job;
+ Job *job;
+ BlockJob *bjob;
CancelJob *s;
blk = create_blk(NULL);
- job = mk_job(blk, "Steve", &test_cancel_driver, true,
- JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
- job_ref(&job->job);
- assert(job->job.status == JOB_STATUS_CREATED);
- s = container_of(job, CancelJob, common);
+ bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
+ JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
+ job = &bjob->job;
+ job_ref(job);
+ assert(job->status == JOB_STATUS_CREATED);
+ s = container_of(bjob, CancelJob, common);
s->blk = blk;
*pjob = job;
@@ -242,7 +244,7 @@ static void cancel_common(CancelJob *s)
static void test_cancel_created(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
@@ -251,119 +253,119 @@ static void test_cancel_created(void)
static void test_cancel_running(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
cancel_common(s);
}
static void test_cancel_paused(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
- job_user_pause(&job->job, &error_abort);
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_PAUSED);
+ job_user_pause(job, &error_abort);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_PAUSED);
cancel_common(s);
}
static void test_cancel_ready(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
s->should_converge = true;
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_READY);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_READY);
cancel_common(s);
}
static void test_cancel_standby(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
s->should_converge = true;
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_READY);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_READY);
- job_user_pause(&job->job, &error_abort);
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_STANDBY);
+ job_user_pause(job, &error_abort);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_STANDBY);
cancel_common(s);
}
static void test_cancel_pending(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
s->should_converge = true;
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_READY);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_READY);
- job_complete(&job->job, &error_abort);
- job_enter(&job->job);
+ job_complete(job, &error_abort);
+ job_enter(job);
while (!s->completed) {
aio_poll(qemu_get_aio_context(), true);
}
- assert(job->job.status == JOB_STATUS_PENDING);
+ assert(job->status == JOB_STATUS_PENDING);
cancel_common(s);
}
static void test_cancel_concluded(void)
{
- BlockJob *job;
+ Job *job;
CancelJob *s;
s = create_common(&job);
- job_start(&job->job);
- assert(job->job.status == JOB_STATUS_RUNNING);
+ job_start(job);
+ assert(job->status == JOB_STATUS_RUNNING);
s->should_converge = true;
- job_enter(&job->job);
- assert(job->job.status == JOB_STATUS_READY);
+ job_enter(job);
+ assert(job->status == JOB_STATUS_READY);
- job_complete(&job->job, &error_abort);
- job_enter(&job->job);
+ job_complete(job, &error_abort);
+ job_enter(job);
while (!s->completed) {
aio_poll(qemu_get_aio_context(), true);
}
- assert(job->job.status == JOB_STATUS_PENDING);
+ assert(job->status == JOB_STATUS_PENDING);
- job_finalize(&job->job, &error_abort);
- assert(job->job.status == JOB_STATUS_CONCLUDED);
+ job_finalize(job, &error_abort);
+ assert(job->status == JOB_STATUS_CONCLUDED);
cancel_common(s);
}
--
1.8.3.1

View File

@ -1,88 +0,0 @@
From f641d3f6946af31724c578aa6f09ba883bb5fab3 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:24 +0100
Subject: tests/test-blockjob: remove exit callback
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-19-jsnow@redhat.com>
Patchwork-id: 82276
O-Subject: [RHEL8/rhel qemu-kvm PATCH 18/25] tests/test-blockjob: remove exit callback
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
We remove the exit callback and the completed boolean along with it.
We can simulate it just fine by waiting for the job to defer to the
main loop, and then giving it one final kick to get the main loop
portion to run.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-10-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 977d26fdbeb35d8d2d0f203f9556d44a353e0dfd)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-blockjob.c | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index 8e8b680..de4c1c2 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -160,15 +160,8 @@ typedef struct CancelJob {
BlockBackend *blk;
bool should_converge;
bool should_complete;
- bool completed;
} CancelJob;
-static void cancel_job_exit(Job *job)
-{
- CancelJob *s = container_of(job, CancelJob, common.job);
- s->completed = true;
-}
-
static void cancel_job_complete(Job *job, Error **errp)
{
CancelJob *s = container_of(job, CancelJob, common.job);
@@ -201,7 +194,6 @@ static const BlockJobDriver test_cancel_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = cancel_job_run,
- .exit = cancel_job_exit,
.complete = cancel_job_complete,
},
};
@@ -335,9 +327,11 @@ static void test_cancel_pending(void)
job_complete(job, &error_abort);
job_enter(job);
- while (!s->completed) {
+ while (!job->deferred_to_main_loop) {
aio_poll(qemu_get_aio_context(), true);
}
+ assert(job->status == JOB_STATUS_READY);
+ aio_poll(qemu_get_aio_context(), true);
assert(job->status == JOB_STATUS_PENDING);
cancel_common(s);
@@ -359,9 +353,11 @@ static void test_cancel_concluded(void)
job_complete(job, &error_abort);
job_enter(job);
- while (!s->completed) {
+ while (!job->deferred_to_main_loop) {
aio_poll(qemu_get_aio_context(), true);
}
+ assert(job->status == JOB_STATUS_READY);
+ aio_poll(qemu_get_aio_context(), true);
assert(job->status == JOB_STATUS_PENDING);
job_finalize(job, &error_abort);
--
1.8.3.1

View File

@ -1,53 +0,0 @@
From 43b1e07411d06cd676f3f55e14e0ac1082a679d0 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:25 +0100
Subject: tests/test-blockjob-txn: move .exit to .clean
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-20-jsnow@redhat.com>
Patchwork-id: 82282
O-Subject: [RHEL8/rhel qemu-kvm PATCH 19/25] tests/test-blockjob-txn: move .exit to .clean
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
The exit callback in this test actually only performs cleanup.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-11-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit e4dad4275d51b594c8abbe726a4927f6f388e427)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-blockjob-txn.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index ef29f35..86606f9 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -24,7 +24,7 @@ typedef struct {
int *result;
} TestBlockJob;
-static void test_block_job_exit(Job *job)
+static void test_block_job_clean(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
BlockDriverState *bs = blk_bs(bjob->blk);
@@ -73,7 +73,7 @@ static const BlockJobDriver test_block_job_driver = {
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.run = test_block_job_run,
- .exit = test_block_job_exit,
+ .clean = test_block_job_clean,
},
};
--
1.8.3.1

View File

@ -1,156 +0,0 @@
From ea31341d12bc2080f7a1b606dcf578376d6a4637 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:26 +0100
Subject: jobs: remove .exit callback
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-21-jsnow@redhat.com>
Patchwork-id: 82283
O-Subject: [RHEL8/rhel qemu-kvm PATCH 20/25] jobs: remove .exit callback
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Now that all of the jobs use the component finalization callbacks,
there's no use for the heavy-hammer .exit callback anymore.
job_exit becomes a glorified type shim so that we can call
job_completed from aio_bh_schedule_oneshot.
Move these three functions down into job.c to eliminate a
forward reference.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-12-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit ccbfb3319aa265e71c16dac976ff857d0a5bcb4b)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/qemu/job.h | 11 --------
job.c | 77 ++++++++++++++++++++++++------------------------------
2 files changed, 34 insertions(+), 54 deletions(-)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index e0cff70..5cb0681 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -222,17 +222,6 @@ struct JobDriver {
void (*drain)(Job *job);
/**
- * If the callback is not NULL, exit will be invoked from the main thread
- * when the job's coroutine has finished, but before transactional
- * convergence; before @prepare or @abort.
- *
- * FIXME TODO: This callback is only temporary to transition remaining jobs
- * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
- * is released.
- */
- void (*exit)(Job *job);
-
- /**
* If the callback is not NULL, prepare will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's completion
* if it is not in a transaction.
diff --git a/job.c b/job.c
index e8d7aee..87c9aa4 100644
--- a/job.c
+++ b/job.c
@@ -535,49 +535,6 @@ void job_drain(Job *job)
}
}
-static void job_completed(Job *job);
-
-static void job_exit(void *opaque)
-{
- Job *job = (Job *)opaque;
- AioContext *aio_context = job->aio_context;
-
- if (job->driver->exit) {
- aio_context_acquire(aio_context);
- job->driver->exit(job);
- aio_context_release(aio_context);
- }
- job_completed(job);
-}
-
-/**
- * All jobs must allow a pause point before entering their job proper. This
- * ensures that jobs can be paused prior to being started, then resumed later.
- */
-static void coroutine_fn job_co_entry(void *opaque)
-{
- Job *job = opaque;
-
- assert(job && job->driver && job->driver->run);
- job_pause_point(job);
- job->ret = job->driver->run(job, &job->err);
- job->deferred_to_main_loop = true;
- aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
-}
-
-
-void job_start(Job *job)
-{
- assert(job && !job_started(job) && job->paused &&
- job->driver && job->driver->run);
- job->co = qemu_coroutine_create(job_co_entry, job);
- job->pause_count--;
- job->busy = true;
- job->paused = false;
- job_state_transition(job, JOB_STATUS_RUNNING);
- aio_co_enter(job->aio_context, job->co);
-}
-
/* Assumes the block_job_mutex is held */
static bool job_timer_not_pending(Job *job)
{
@@ -894,6 +851,40 @@ static void job_completed(Job *job)
}
}
+/** Useful only as a type shim for aio_bh_schedule_oneshot. */
+static void job_exit(void *opaque)
+{
+ Job *job = (Job *)opaque;
+ job_completed(job);
+}
+
+/**
+ * All jobs must allow a pause point before entering their job proper. This
+ * ensures that jobs can be paused prior to being started, then resumed later.
+ */
+static void coroutine_fn job_co_entry(void *opaque)
+{
+ Job *job = opaque;
+
+ assert(job && job->driver && job->driver->run);
+ job_pause_point(job);
+ job->ret = job->driver->run(job, &job->err);
+ job->deferred_to_main_loop = true;
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
+}
+
+void job_start(Job *job)
+{
+ assert(job && !job_started(job) && job->paused &&
+ job->driver && job->driver->run);
+ job->co = qemu_coroutine_create(job_co_entry, job);
+ job->pause_count--;
+ job->busy = true;
+ job->paused = false;
+ job_state_transition(job, JOB_STATUS_RUNNING);
+ aio_co_enter(job->aio_context, job->co);
+}
+
void job_cancel(Job *job, bool force)
{
if (job->status == JOB_STATUS_CONCLUDED) {
--
1.8.3.1

View File

@ -1,90 +0,0 @@
From 756c3ccf83d5612ca2b326a8fed8fdf1f7958adb Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:27 +0100
Subject: qapi/block-commit: expose new job properties
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-22-jsnow@redhat.com>
Patchwork-id: 82285
O-Subject: [RHEL8/rhel qemu-kvm PATCH 21/25] qapi/block-commit: expose new job properties
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-13-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 96fbf5345f60a87fab8e7ea79a2406f381027db9)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockdev.c | 8 ++++++++
qapi/block-core.json | 16 +++++++++++++++-
2 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/blockdev.c b/blockdev.c
index c2e6402..8efc47e 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3314,6 +3314,8 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
bool has_backing_file, const char *backing_file,
bool has_speed, int64_t speed,
bool has_filter_node_name, const char *filter_node_name,
+ bool has_auto_finalize, bool auto_finalize,
+ bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs;
@@ -3333,6 +3335,12 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
if (!has_filter_node_name) {
filter_node_name = NULL;
}
+ if (has_auto_finalize && !auto_finalize) {
+ job_flags |= JOB_MANUAL_FINALIZE;
+ }
+ if (has_auto_dismiss && !auto_dismiss) {
+ job_flags |= JOB_MANUAL_DISMISS;
+ }
/* Important Note:
* libvirt relies on the DeviceNotFound error class in order to probe for
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 5b9084a..ca7d1b3 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1498,6 +1498,19 @@
# above @top. If this option is not given, a node name is
# autogenerated. (Since: 2.9)
#
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
+# Defaults to true. (Since 3.1)
+#
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
+# has completely ceased all work, and awaits @block-job-dismiss.
+# When true, this job will automatically disappear from the query
+# list without user intervention.
+# Defaults to true. (Since 3.1)
+#
# Returns: Nothing on success
# If commit or stream is already active on this device, DeviceInUse
# If @device does not exist, DeviceNotFound
@@ -1518,7 +1531,8 @@
{ 'command': 'block-commit',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
'*backing-file': 'str', '*speed': 'int',
- '*filter-node-name': 'str' } }
+ '*filter-node-name': 'str',
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @drive-backup:
--
1.8.3.1

View File

@ -1,144 +0,0 @@
From 254a2b41a647cf39abaa5d94f17aef62f035d30f Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Thu, 6 Sep 2018 09:02:22 -0400
Subject: qapi/block-mirror: expose new job properties
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-23-jsnow@redhat.com>
Patchwork-id: 82274
O-Subject: [RHEL8/rhel qemu-kvm PATCH 22/25] qapi/block-mirror: expose new j
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-14-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit a6b58adec28ff43c0f29ff7c95cdd5d11e87cf61)
Signed-off-by: John Snow <jsnow@redhat.com>
---
blockdev.c | 14 ++++++++++++++
qapi/block-core.json | 30 ++++++++++++++++++++++++++++--
2 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/blockdev.c b/blockdev.c
index 8efc47e..bbb3279 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3707,6 +3707,8 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
bool has_filter_node_name,
const char *filter_node_name,
bool has_copy_mode, MirrorCopyMode copy_mode,
+ bool has_auto_finalize, bool auto_finalize,
+ bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
int job_flags = JOB_DEFAULT;
@@ -3735,6 +3737,12 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
if (!has_copy_mode) {
copy_mode = MIRROR_COPY_MODE_BACKGROUND;
}
+ if (has_auto_finalize && !auto_finalize) {
+ job_flags |= JOB_MANUAL_FINALIZE;
+ }
+ if (has_auto_dismiss && !auto_dismiss) {
+ job_flags |= JOB_MANUAL_DISMISS;
+ }
if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
@@ -3912,6 +3920,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
arg->has_unmap, arg->unmap,
false, NULL,
arg->has_copy_mode, arg->copy_mode,
+ arg->has_auto_finalize, arg->auto_finalize,
+ arg->has_auto_dismiss, arg->auto_dismiss,
&local_err);
bdrv_unref(target_bs);
error_propagate(errp, local_err);
@@ -3933,6 +3943,8 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
bool has_filter_node_name,
const char *filter_node_name,
bool has_copy_mode, MirrorCopyMode copy_mode,
+ bool has_auto_finalize, bool auto_finalize,
+ bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs;
@@ -3966,6 +3978,8 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
true, true,
has_filter_node_name, filter_node_name,
has_copy_mode, copy_mode,
+ has_auto_finalize, auto_finalize,
+ has_auto_dismiss, auto_dismiss,
&local_err);
error_propagate(errp, local_err);
diff --git a/qapi/block-core.json b/qapi/block-core.json
index ca7d1b3..9193d49 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1732,6 +1732,18 @@
# @copy-mode: when to copy data to the destination; defaults to 'background'
# (Since: 3.0)
#
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
+# Defaults to true. (Since 3.1)
+#
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
+# has completely ceased all work, and awaits @block-job-dismiss.
+# When true, this job will automatically disappear from the query
+# list without user intervention.
+# Defaults to true. (Since 3.1)
# Since: 1.3
##
{ 'struct': 'DriveMirror',
@@ -1741,7 +1753,8 @@
'*speed': 'int', '*granularity': 'uint32',
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
- '*unmap': 'bool', '*copy-mode': 'MirrorCopyMode' } }
+ '*unmap': 'bool', '*copy-mode': 'MirrorCopyMode',
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @BlockDirtyBitmap:
@@ -2007,6 +2020,18 @@
# @copy-mode: when to copy data to the destination; defaults to 'background'
# (Since: 3.0)
#
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
+# Defaults to true. (Since 3.1)
+#
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
+# has completely ceased all work, and awaits @block-job-dismiss.
+# When true, this job will automatically disappear from the query
+# list without user intervention.
+# Defaults to true. (Since 3.1)
# Returns: nothing on success.
#
# Since: 2.6
@@ -2028,7 +2053,8 @@
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*filter-node-name': 'str',
- '*copy-mode': 'MirrorCopyMode' } }
+ '*copy-mode': 'MirrorCopyMode',
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @block_set_io_throttle:
--
1.8.3.1

View File

@ -1,108 +0,0 @@
From 50990953696a8803f6b2b7ad71901c58c375eb8c Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:29 +0100
Subject: qapi/block-stream: expose new job properties
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-24-jsnow@redhat.com>
Patchwork-id: 82278
O-Subject: [RHEL8/rhel qemu-kvm PATCH 23/25] qapi/block-stream: expose new job properties
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-15-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 241ca1ab78542f02e666636e0323bcfe3cb1d5e8)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockdev.c | 9 +++++++++
hmp.c | 5 +++--
qapi/block-core.json | 16 +++++++++++++++-
3 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/blockdev.c b/blockdev.c
index bbb3279..806531d 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3226,6 +3226,8 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
bool has_backing_file, const char *backing_file,
bool has_speed, int64_t speed,
bool has_on_error, BlockdevOnError on_error,
+ bool has_auto_finalize, bool auto_finalize,
+ bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs, *iter;
@@ -3295,6 +3297,13 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
/* backing_file string overrides base bs filename */
base_name = has_backing_file ? backing_file : base_name;
+ if (has_auto_finalize && !auto_finalize) {
+ job_flags |= JOB_MANUAL_FINALIZE;
+ }
+ if (has_auto_dismiss && !auto_dismiss) {
+ job_flags |= JOB_MANUAL_DISMISS;
+ }
+
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
job_flags, has_speed ? speed : 0, on_error, &local_err);
if (local_err) {
diff --git a/hmp.c b/hmp.c
index 2aafb50..e3c3ecd 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1865,8 +1865,9 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
int64_t speed = qdict_get_try_int(qdict, "speed", 0);
qmp_block_stream(true, device, device, base != NULL, base, false, NULL,
- false, NULL, qdict_haskey(qdict, "speed"), speed,
- true, BLOCKDEV_ON_ERROR_REPORT, &error);
+ false, NULL, qdict_haskey(qdict, "speed"), speed, true,
+ BLOCKDEV_ON_ERROR_REPORT, false, false, false, false,
+ &error);
hmp_handle_error(mon, &error);
}
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 9193d49..d1a9c3e 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2320,6 +2320,19 @@
# 'stop' and 'enospc' can only be used if the block device
# supports io-status (see BlockInfo). Since 1.3.
#
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
+# Defaults to true. (Since 3.1)
+#
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
+# has completely ceased all work, and awaits @block-job-dismiss.
+# When true, this job will automatically disappear from the query
+# list without user intervention.
+# Defaults to true. (Since 3.1)
+#
# Returns: Nothing on success. If @device does not exist, DeviceNotFound.
#
# Since: 1.1
@@ -2335,7 +2348,8 @@
{ 'command': 'block-stream',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
'*base-node': 'str', '*backing-file': 'str', '*speed': 'int',
- '*on-error': 'BlockdevOnError' } }
+ '*on-error': 'BlockdevOnError',
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @block-job-set-speed:
--
1.8.3.1

View File

@ -1,73 +0,0 @@
From 6ecfc87059e78892c868227319a91adea909e09e Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:30 +0100
Subject: block/backup: qapi documentation fixup
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-25-jsnow@redhat.com>
Patchwork-id: 82284
O-Subject: [RHEL8/rhel qemu-kvm PATCH 24/25] block/backup: qapi documentation fixup
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Fix documentation to match the other jobs amended for 3.1.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20180906130225.5118-16-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit dfaff2c37dfa52ab045cf87503e60ea56317230a)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
qapi/block-core.json | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/qapi/block-core.json b/qapi/block-core.json
index d1a9c3e..2953991 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1272,13 +1272,14 @@
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
-# finished its work, waiting for @block-job-finalize.
-# When true, this job will automatically perform its abort or
-# commit actions.
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
-# has completed ceased all work, and wait for @block-job-dismiss.
+# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
@@ -1327,13 +1328,14 @@
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
-# finished its work, waiting for @block-job-finalize.
-# When true, this job will automatically perform its abort or
-# commit actions.
+# finished its work, waiting for @block-job-finalize before
+# making any block graph changes.
+# When true, this job will automatically
+# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
-# has completed ceased all work, and wait for @block-job-dismiss.
+# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
--
1.8.3.1

View File

@ -1,53 +0,0 @@
From 00a437d87c6bd8ec956b25fc0dffe8397ce475b8 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Tue, 25 Sep 2018 22:34:31 +0100
Subject: blockdev: document transactional shortcomings
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20180925223431.24791-26-jsnow@redhat.com>
Patchwork-id: 82286
O-Subject: [RHEL8/rhel qemu-kvm PATCH 25/25] blockdev: document transactional shortcomings
Bugzilla: 1632939
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
Presently only the backup job really guarantees what one would consider
transactional semantics. To guard against someone helpfully adding them
in the future, document that there are shortcomings in the model that
would need to be audited at that time.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 20180906130225.5118-17-jsnow@redhat.com
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 66da04ddd3dcb8c61ee664b6faced132da002006)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockdev.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/blockdev.c b/blockdev.c
index 806531d..d97202a 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -2292,7 +2292,13 @@ static const BlkActionOps actions[] = {
.instance_size = sizeof(BlockDirtyBitmapState),
.prepare = block_dirty_bitmap_disable_prepare,
.abort = block_dirty_bitmap_disable_abort,
- }
+ },
+ /* Where are transactions for MIRROR, COMMIT and STREAM?
+ * Although these blockjobs use transaction callbacks like the backup job,
+ * these jobs do not necessarily adhere to transaction semantics.
+ * These jobs may not fully undo all of their actions on abort, nor do they
+ * necessarily work in transactions with more than one job in them.
+ */
};
/**
--
1.8.3.1

View File

@ -1,67 +0,0 @@
From 5b9ccef27363b61223b31312062cde1210216985 Mon Sep 17 00:00:00 2001
From: Eduardo Otubo <otubo@redhat.com>
Date: Fri, 28 Sep 2018 07:56:36 +0100
Subject: seccomp: use SIGSYS signal instead of killing the thread
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Eduardo Otubo <otubo@redhat.com>
Message-id: <20180928075639.16746-3-otubo@redhat.com>
Patchwork-id: 82314
O-Subject: [RHEL-8 qemu-kvm PATCH 2/5] seccomp: use SIGSYS signal instead of killing the thread
Bugzilla: 1618356
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
RH-Acked-by: Marc-André Lureau <marcandre.lureau@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Marc-André Lureau <marcandre.lureau@redhat.com>
commit 6f2231e9b0931e1998d9ed0c509adf7aedc02db2
Author: Marc-André Lureau <marcandre.lureau@redhat.com>
Date: Wed Aug 22 19:02:47 2018 +0200
seccomp: use SIGSYS signal instead of killing the thread
The seccomp action SCMP_ACT_KILL results in immediate termination of
the thread that made the bad system call. However, qemu being
multi-threaded, it keeps running. There is no easy way for parent
process / management layer (libvirt) to know about that situation.
Instead, the default SIGSYS handler when invoked with SCMP_ACT_TRAP
will terminate the program and core dump.
This may not be the most secure solution, but probably better than
just killing the offending thread. SCMP_ACT_KILL_PROCESS has been
added in Linux 4.14 to improve the situation, which I propose to use
by default if available in the next patch.
Related to:
https://bugzilla.redhat.com/show_bug.cgi?id=1594456
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Acked-by: Eduardo Otubo <otubo@redhat.com>
Signed-off-by: Eduardo Otubo <otubo@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
qemu-seccomp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/qemu-seccomp.c b/qemu-seccomp.c
index 9cd8eb9..b117a92 100644
--- a/qemu-seccomp.c
+++ b/qemu-seccomp.c
@@ -125,7 +125,7 @@ static int seccomp_start(uint32_t seccomp_opts)
continue;
}
- rc = seccomp_rule_add_array(ctx, SCMP_ACT_KILL, blacklist[i].num,
+ rc = seccomp_rule_add_array(ctx, SCMP_ACT_TRAP, blacklist[i].num,
blacklist[i].narg, blacklist[i].arg_cmp);
if (rc < 0) {
goto seccomp_return;
--
1.8.3.1

View File

@ -1,110 +0,0 @@
From 80574fd1c226ca5c8555b3bb37bc3fe121bbf69f Mon Sep 17 00:00:00 2001
From: Eduardo Otubo <otubo@redhat.com>
Date: Fri, 28 Sep 2018 07:56:37 +0100
Subject: seccomp: prefer SCMP_ACT_KILL_PROCESS if available
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Eduardo Otubo <otubo@redhat.com>
Message-id: <20180928075639.16746-4-otubo@redhat.com>
Patchwork-id: 82315
O-Subject: [RHEL-8 qemu-kvm PATCH 3/5] seccomp: prefer SCMP_ACT_KILL_PROCESS if available
Bugzilla: 1618356
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
RH-Acked-by: Marc-André Lureau <marcandre.lureau@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Marc-André Lureau <marcandre.lureau@redhat.com>
commit bda08a5764d470f101fa38635d30b41179a313e1
Author: Marc-André Lureau <marcandre.lureau@redhat.com>
Date: Wed Aug 22 19:02:48 2018 +0200
seccomp: prefer SCMP_ACT_KILL_PROCESS if available
The upcoming libseccomp release should have SCMP_ACT_KILL_PROCESS
action (https://github.com/seccomp/libseccomp/issues/96).
SCMP_ACT_KILL_PROCESS is preferable to immediately terminate the
offending process, rather than having the SIGSYS handler running.
Use SECCOMP_GET_ACTION_AVAIL to check availability of kernel support,
as libseccomp will fallback on SCMP_ACT_KILL otherwise, and we still
prefer SCMP_ACT_TRAP.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Acked-by: Eduardo Otubo <otubo@redhat.com>
Signed-off-by: Eduardo Otubo <otubo@rehdat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
qemu-seccomp.c | 31 ++++++++++++++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
diff --git a/qemu-seccomp.c b/qemu-seccomp.c
index b117a92..f0c833f 100644
--- a/qemu-seccomp.c
+++ b/qemu-seccomp.c
@@ -20,6 +20,7 @@
#include <sys/prctl.h>
#include <seccomp.h>
#include "sysemu/seccomp.h"
+#include <linux/seccomp.h>
/* For some architectures (notably ARM) cacheflush is not supported until
* libseccomp 2.2.3, but configure enforces that we are using a more recent
@@ -107,12 +108,40 @@ static const struct QemuSeccompSyscall blacklist[] = {
{ SCMP_SYS(sched_get_priority_min), QEMU_SECCOMP_SET_RESOURCECTL },
};
+static inline __attribute__((unused)) int
+qemu_seccomp(unsigned int operation, unsigned int flags, void *args)
+{
+#ifdef __NR_seccomp
+ return syscall(__NR_seccomp, operation, flags, args);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+static uint32_t qemu_seccomp_get_kill_action(void)
+{
+#if defined(SECCOMP_GET_ACTION_AVAIL) && defined(SCMP_ACT_KILL_PROCESS) && \
+ defined(SECCOMP_RET_KILL_PROCESS)
+ {
+ uint32_t action = SECCOMP_RET_KILL_PROCESS;
+
+ if (qemu_seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &action) == 0) {
+ return SCMP_ACT_KILL_PROCESS;
+ }
+ }
+#endif
+
+ return SCMP_ACT_TRAP;
+}
+
static int seccomp_start(uint32_t seccomp_opts)
{
int rc = 0;
unsigned int i = 0;
scmp_filter_ctx ctx;
+ uint32_t action = qemu_seccomp_get_kill_action();
ctx = seccomp_init(SCMP_ACT_ALLOW);
if (ctx == NULL) {
@@ -125,7 +154,7 @@ static int seccomp_start(uint32_t seccomp_opts)
continue;
}
- rc = seccomp_rule_add_array(ctx, SCMP_ACT_TRAP, blacklist[i].num,
+ rc = seccomp_rule_add_array(ctx, action, blacklist[i].num,
blacklist[i].narg, blacklist[i].arg_cmp);
if (rc < 0) {
goto seccomp_return;
--
1.8.3.1

View File

@ -1,77 +0,0 @@
From ef8bae877ca544af956f8314cdd702d1c62a9b15 Mon Sep 17 00:00:00 2001
From: Eduardo Otubo <otubo@redhat.com>
Date: Fri, 28 Sep 2018 07:56:39 +0100
Subject: seccomp: set the seccomp filter to all threads
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Eduardo Otubo <otubo@redhat.com>
Message-id: <20180928075639.16746-6-otubo@redhat.com>
Patchwork-id: 82316
O-Subject: [RHEL-8 qemu-kvm PATCH 5/5] seccomp: set the seccomp filter to all threads
Bugzilla: 1618356
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
RH-Acked-by: Marc-André Lureau <marcandre.lureau@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Marc-André Lureau <marcandre.lureau@redhat.com>
commit 70dfabeaa79ba4d7a3b699abe1a047c8012db114
Author: Marc-André Lureau <marcandre.lureau@redhat.com>
Date: Wed Aug 22 19:02:50 2018 +0200
seccomp: set the seccomp filter to all threads
When using "-seccomp on", the seccomp policy is only applied to the
main thread, the vcpu worker thread and other worker threads created
after seccomp policy is applied; the seccomp policy is not applied to
e.g. the RCU thread because it is created before the seccomp policy is
applied and SECCOMP_FILTER_FLAG_TSYNC isn't used.
This can be verified with
for task in /proc/`pidof qemu`/task/*; do cat $task/status | grep Secc ; done
Seccomp: 2
Seccomp: 0
Seccomp: 0
Seccomp: 2
Seccomp: 2
Seccomp: 2
Starting with libseccomp 2.2.0 and kernel >= 3.17, we can use
seccomp_attr_set(ctx, > SCMP_FLTATR_CTL_TSYNC, 1) to update the policy
on all threads.
libseccomp requirement was bumped to 2.2.0 in previous patch.
libseccomp should fail to set the filter if it can't honour
SCMP_FLTATR_CTL_TSYNC (untested), and thus -sandbox will now fail on
kernel < 3.17.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Acked-by: Eduardo Otubo <otubo@redhat.com>
Signed-off-by: Eduardo Otubo <otubo@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
qemu-seccomp.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/qemu-seccomp.c b/qemu-seccomp.c
index f0c833f..4729eb1 100644
--- a/qemu-seccomp.c
+++ b/qemu-seccomp.c
@@ -149,6 +149,11 @@ static int seccomp_start(uint32_t seccomp_opts)
goto seccomp_return;
}
+ rc = seccomp_attr_set(ctx, SCMP_FLTATR_CTL_TSYNC, 1);
+ if (rc != 0) {
+ goto seccomp_return;
+ }
+
for (i = 0; i < ARRAY_SIZE(blacklist); i++) {
if (!(seccomp_opts & blacklist[i].set)) {
continue;
--
1.8.3.1

View File

@ -1,185 +0,0 @@
From da9c980b19783915f8675894b88da631f27dd34d Mon Sep 17 00:00:00 2001
From: Igor Mammedov <imammedo@redhat.com>
Date: Fri, 5 Oct 2018 12:59:47 +0100
Subject: memory: cleanup side effects of memory_region_init_foo() on failure
RH-Author: Igor Mammedov <imammedo@redhat.com>
Message-id: <1538744387-84898-1-git-send-email-imammedo@redhat.com>
Patchwork-id: 82391
O-Subject: [RHEL-8 qemu-kvm PATCH] memory: cleanup side effects of memory_region_init_foo() on failure
Bugzilla: 1600365
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
RH-Acked-by: Pankaj Gupta <pagupta@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1600365
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18658506
if MemoryRegion intialization fails it's left in semi-initialized state,
where it's size is not 0 and attached as child to owner object.
And this leds to crash in following use-case:
(monitor) object_add memory-backend-file,id=mem1,size=99999G,mem-path=/tmp/foo,discard-data=yes
memory.c:2083: memory_region_get_ram_ptr: Assertion `mr->ram_block' failed
Aborted (core dumped)
it happens due to assumption that memory region is intialized when
memory_region_size() != 0
and therefore it's ok to access it in
file_backend_unparent()
if (memory_region_size() != 0)
memory_region_get_ram_ptr()
which happens when object_add fails and unparents failed backend making
file_backend_unparent() access invalid memory region.
Fix it by making sure that memory_region_init_foo() APIs cleanup externally
visible side effects on failure (like set size to 0 and unparenting object)
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Message-Id: <1536064777-42312-1-git-send-email-imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 1cd3d492624da399d66c4c3e6a5eabb8f96bb0a2)
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
Conflicts:
memory.c
due missing (cbfc01710 "memory, exec: switch file ram allocation functions to 'flags' parameters")
not related to the patch signature mismatch of
qemu_ram_alloc_from_file()/qemu_ram_alloc_from_fd()
---
memory.c | 48 ++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 42 insertions(+), 6 deletions(-)
diff --git a/memory.c b/memory.c
index e9cd446..88c75d8 100644
--- a/memory.c
+++ b/memory.c
@@ -1518,12 +1518,18 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
bool share,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_resizeable_ram(MemoryRegion *mr,
@@ -1536,13 +1542,19 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
void *host),
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
- mr, errp);
+ mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
#ifdef __linux__
@@ -1555,13 +1567,19 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
const char *path,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->align = align;
- mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
+ mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_ram_from_fd(MemoryRegion *mr,
@@ -1572,12 +1590,18 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr,
int fd,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
+ mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
#endif
@@ -1628,13 +1652,19 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->readonly = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
@@ -1645,6 +1675,7 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
assert(ops);
memory_region_init(mr, owner, name, size);
mr->ops = ops;
@@ -1652,7 +1683,12 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_iommu(void *_iommu_mr,
--
1.8.3.1

View File

@ -1,87 +0,0 @@
From a96ed7a8374891516e626b797321d4be69cb071d Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 13:19:57 +0100
Subject: mirror: Fail gracefully for source == target
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010131957.23198-2-kwolf@redhat.com>
Patchwork-id: 82564
O-Subject: [RHEL-8 qemu-kvm PATCH 1/1] mirror: Fail gracefully for source == target
Bugzilla: 1637963
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Fam Zheng <famz@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
blockdev-mirror with the same node for source and target segfaults
today: A node is in its own backing chain, so mirror_start_job() decides
that this is an active commit. When adding the intermediate nodes with
block_job_add_bdrv(), it starts the iteration through the subchain with
the backing file of source, though, so it never reaches target and
instead runs into NULL at the base.
While we could fix that by starting with source itself, there is no
point in allowing mirroring a node into itself and I wouldn't be
surprised if this caused more problems later.
So just check for this scenario and error out.
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit 86fae10c64d642256cf019e6829929fa0d259c7a)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/mirror.c | 5 +++++
tests/qemu-iotests/041 | 6 ++++++
tests/qemu-iotests/041.out | 4 ++--
3 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/block/mirror.c b/block/mirror.c
index 7efba77..b61f99b 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -1516,6 +1516,11 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
+ if (bs == target) {
+ error_setg(errp, "Can't mirror node into itself");
+ return;
+ }
+
/* In the case of active commit, add dummy driver to provide consistent
* reads on the top, while disabling it in the intermediate nodes, and make
* the backing chain writable. */
diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041
index c20ac7d..9336ab6 100755
--- a/tests/qemu-iotests/041
+++ b/tests/qemu-iotests/041
@@ -234,6 +234,12 @@ class TestSingleBlockdev(TestSingleDrive):
result = self.vm.qmp("blockdev-add", **args)
self.assert_qmp(result, 'return', {})
+ def test_mirror_to_self(self):
+ result = self.vm.qmp(self.qmp_cmd, job_id='job0',
+ device=self.qmp_target, sync='full',
+ target=self.qmp_target)
+ self.assert_qmp(result, 'error/class', 'GenericError')
+
test_large_cluster = None
test_image_not_found = None
test_small_buffer2 = None
diff --git a/tests/qemu-iotests/041.out b/tests/qemu-iotests/041.out
index c28b392..e071d0b 100644
--- a/tests/qemu-iotests/041.out
+++ b/tests/qemu-iotests/041.out
@@ -1,5 +1,5 @@
-.....................................................................................
+........................................................................................
----------------------------------------------------------------------
-Ran 85 tests
+Ran 88 tests
OK
--
1.8.3.1

View File

@ -1,141 +0,0 @@
From 0086e14eef7fc78bc1254ee888bd7d720d6ee5b9 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 13:50:54 +0100
Subject: commit: Add top-node/base-node options
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010135055.3874-2-kwolf@redhat.com>
Patchwork-id: 82569
O-Subject: [RHEL-8 qemu-kvm PATCH 1/2] commit: Add top-node/base-node options
Bugzilla: 1637970
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Fam Zheng <famz@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
The block-commit QMP command required specifying the top and base nodes
of the commit jobs using the file name of that node. While this works
in simple cases (local files with absolute paths), the file names
generated for more complicated setups can be hard to predict.
The block-commit command has more problems than just this, so we want to
replace it altogether in the long run, but libvirt needs a reliable way
to address nodes now. So we don't want to wait for a new, cleaner
command, but just add the minimal thing needed right now.
This adds two new options top-node and base-node to the command, which
allow specifying node names instead. They are mutually exclusive with
the old options.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 3c605f4074ebeb97970eb660fb56a9cb06525923)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockdev.c | 32 ++++++++++++++++++++++++++++++--
qapi/block-core.json | 24 ++++++++++++++++++------
2 files changed, 48 insertions(+), 8 deletions(-)
diff --git a/blockdev.c b/blockdev.c
index d97202a..df256e6 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -3324,7 +3324,9 @@ out:
}
void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
+ bool has_base_node, const char *base_node,
bool has_base, const char *base,
+ bool has_top_node, const char *top_node,
bool has_top, const char *top,
bool has_backing_file, const char *backing_file,
bool has_speed, int64_t speed,
@@ -3385,7 +3387,20 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
/* default top_bs is the active layer */
top_bs = bs;
- if (has_top && top) {
+ if (has_top_node && has_top) {
+ error_setg(errp, "'top-node' and 'top' are mutually exclusive");
+ goto out;
+ } else if (has_top_node) {
+ top_bs = bdrv_lookup_bs(NULL, top_node, errp);
+ if (top_bs == NULL) {
+ goto out;
+ }
+ if (!bdrv_chain_contains(bs, top_bs)) {
+ error_setg(errp, "'%s' is not in this backing file chain",
+ top_node);
+ goto out;
+ }
+ } else if (has_top && top) {
if (strcmp(bs->filename, top) != 0) {
top_bs = bdrv_find_backing_image(bs, top);
}
@@ -3398,7 +3413,20 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
assert(bdrv_get_aio_context(top_bs) == aio_context);
- if (has_base && base) {
+ if (has_base_node && has_base) {
+ error_setg(errp, "'base-node' and 'base' are mutually exclusive");
+ goto out;
+ } else if (has_base_node) {
+ base_bs = bdrv_lookup_bs(NULL, base_node, errp);
+ if (base_bs == NULL) {
+ goto out;
+ }
+ if (!bdrv_chain_contains(top_bs, base_bs)) {
+ error_setg(errp, "'%s' is not in this backing file chain",
+ base_node);
+ goto out;
+ }
+ } else if (has_base && base) {
base_bs = bdrv_find_backing_image(top_bs, base);
} else {
base_bs = bdrv_find_base(top_bs);
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 2953991..6f38dc0 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1457,12 +1457,23 @@
#
# @device: the device name or node-name of a root node
#
-# @base: The file name of the backing image to write data into.
-# If not specified, this is the deepest backing image.
+# @base-node: The node name of the backing image to write data into.
+# If not specified, this is the deepest backing image.
+# (since: 3.1)
#
-# @top: The file name of the backing image within the image chain,
-# which contains the topmost data to be committed down. If
-# not specified, this is the active layer.
+# @base: Same as @base-node, except that it is a file name rather than a node
+# name. This must be the exact filename string that was used to open the
+# node; other strings, even if addressing the same file, are not
+# accepted (deprecated, use @base-node instead)
+#
+# @top-node: The node name of the backing image within the image chain
+# which contains the topmost data to be committed down. If
+# not specified, this is the active layer. (since: 3.1)
+#
+# @top: Same as @top-node, except that it is a file name rather than a node
+# name. This must be the exact filename string that was used to open the
+# node; other strings, even if addressing the same file, are not
+# accepted (deprecated, use @base-node instead)
#
# @backing-file: The backing file string to write into the overlay
# image of 'top'. If 'top' is the active layer,
@@ -1531,7 +1542,8 @@
#
##
{ 'command': 'block-commit',
- 'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
+ 'data': { '*job-id': 'str', 'device': 'str', '*base-node': 'str',
+ '*base': 'str', '*top-node': 'str', '*top': 'str',
'*backing-file': 'str', '*speed': 'int',
'*filter-node-name': 'str',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
--
1.8.3.1

View File

@ -1,127 +0,0 @@
From bb9687c8dadef42d11f3606e68e956a7c60b2487 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 13:50:55 +0100
Subject: qemu-iotests: Test commit with top-node/base-node
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010135055.3874-3-kwolf@redhat.com>
Patchwork-id: 82568
O-Subject: [RHEL-8 qemu-kvm PATCH 2/2] qemu-iotests: Test commit with top-node/base-node
Bugzilla: 1637970
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Fam Zheng <famz@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
This adds some tests for block-commit with the new options top-node and
base-node (taking node names) instead of top and base (taking file
names).
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit d57177a48fc604e5427921bf20b22ee0e6d578b3)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/qemu-iotests/040 | 52 ++++++++++++++++++++++++++++++++++++++++++++--
tests/qemu-iotests/040.out | 4 ++--
2 files changed, 52 insertions(+), 4 deletions(-)
diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040
index 1beb5e6..1cb1cee 100755
--- a/tests/qemu-iotests/040
+++ b/tests/qemu-iotests/040
@@ -57,9 +57,12 @@ class ImageCommitTestCase(iotests.QMPTestCase):
self.assert_no_active_block_jobs()
self.vm.shutdown()
- def run_commit_test(self, top, base, need_ready=False):
+ def run_commit_test(self, top, base, need_ready=False, node_names=False):
self.assert_no_active_block_jobs()
- result = self.vm.qmp('block-commit', device='drive0', top=top, base=base)
+ if node_names:
+ result = self.vm.qmp('block-commit', device='drive0', top_node=top, base_node=base)
+ else:
+ result = self.vm.qmp('block-commit', device='drive0', top=top, base=base)
self.assert_qmp(result, 'return', {})
self.wait_for_complete(need_ready)
@@ -101,6 +104,11 @@ class TestSingleDrive(ImageCommitTestCase):
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
+ def test_commit_node(self):
+ self.run_commit_test("mid", "base", node_names=True)
+ self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
+ self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
+
def test_device_not_found(self):
result = self.vm.qmp('block-commit', device='nonexistent', top='%s' % mid_img)
self.assert_qmp(result, 'error/class', 'DeviceNotFound')
@@ -123,6 +131,30 @@ class TestSingleDrive(ImageCommitTestCase):
self.assert_qmp(result, 'error/class', 'GenericError')
self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found')
+ def test_top_node_invalid(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('block-commit', device='drive0', top_node='badfile', base_node='base')
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
+
+ def test_base_node_invalid(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='badfile')
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
+
+ def test_top_path_and_node(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='base', top='%s' % mid_img)
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "'top-node' and 'top' are mutually exclusive")
+
+ def test_base_path_and_node(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='base', base='%s' % backing_img)
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "'base-node' and 'base' are mutually exclusive")
+
def test_top_is_active(self):
self.run_commit_test(test_img, backing_img, need_ready=True)
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
@@ -139,6 +171,22 @@ class TestSingleDrive(ImageCommitTestCase):
self.assert_qmp(result, 'error/class', 'GenericError')
self.assert_qmp(result, 'error/desc', 'Base \'%s\' not found' % mid_img)
+ def test_top_and_base_node_reversed(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('block-commit', device='drive0', top_node='base', base_node='top')
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "'top' is not in this backing file chain")
+
+ def test_top_node_in_wrong_chain(self):
+ self.assert_no_active_block_jobs()
+
+ result = self.vm.qmp('blockdev-add', driver='null-co', node_name='null')
+ self.assert_qmp(result, 'return', {})
+
+ result = self.vm.qmp('block-commit', device='drive0', top_node='null', base_node='base')
+ self.assert_qmp(result, 'error/class', 'GenericError')
+ self.assert_qmp(result, 'error/desc', "'null' is not in this backing file chain")
+
# When the job is running on a BB that is automatically deleted on hot
# unplug, the job is cancelled when the device disappears
def test_hot_unplug(self):
diff --git a/tests/qemu-iotests/040.out b/tests/qemu-iotests/040.out
index e20a75c..802ffaa 100644
--- a/tests/qemu-iotests/040.out
+++ b/tests/qemu-iotests/040.out
@@ -1,5 +1,5 @@
-.............................
+...........................................
----------------------------------------------------------------------
-Ran 29 tests
+Ran 43 tests
OK
--
1.8.3.1

View File

@ -1,59 +0,0 @@
From 0908cd5291828eca03bbba206f133a37b87c8b41 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 10 Oct 2018 20:50:58 +0100
Subject: block: for jobs, do not clear user_paused until after the resume
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20181010205100.17689-2-jsnow@redhat.com>
Patchwork-id: 82631
O-Subject: [RHEL8/rhel qemu-kvm PATCH 1/3] block: for jobs, do not clear user_paused until after the resume
Bugzilla: 1635583
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
From: Jeff Cody <jcody@redhat.com>
The function job_cancel_async() will always cause an assert for blockjob
user resume. We set job->user_paused to false, and then call
job->driver->user_resume(). In the case of blockjobs, this is the
block_job_user_resume() function.
In that function, we assert that job.user_paused is set to true.
Unfortunately, right before calling this function, it has explicitly
been set to false.
The fix is pretty simple: set job->user_paused to false only after the
job user_resume() function has been called.
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Jeff Cody <jcody@redhat.com>
Message-id: bb183b77d8f2dd6bd67b8da559a90ac1e74b2052.1534868459.git.jcody@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
(cherry picked from commit e321c0597c7590499bacab239d7f86e257f96bcd)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
job.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/job.c b/job.c
index 87c9aa4..bb322de 100644
--- a/job.c
+++ b/job.c
@@ -705,10 +705,10 @@ static void job_cancel_async(Job *job, bool force)
{
if (job->user_paused) {
/* Do not call job_enter here, the caller will handle it. */
- job->user_paused = false;
if (job->driver->user_resume) {
job->driver->user_resume(job);
}
+ job->user_paused = false;
assert(job->pause_count > 0);
job->pause_count--;
}
--
1.8.3.1

View File

@ -1,173 +0,0 @@
From d26430360b5996c99c0e1dd95b4dbb48bd894944 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 10 Oct 2018 20:51:00 +0100
Subject: block: iotest to catch abort on forced blockjob cancel
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20181010205100.17689-4-jsnow@redhat.com>
Patchwork-id: 82632
O-Subject: [RHEL8/rhel qemu-kvm PATCH 3/3] block: iotest to catch abort on forced blockjob cancel
Bugzilla: 1635583
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
From: Jeff Cody <jcody@redhat.com>
Signed-off-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Message-id: df317f617fbe5affcf699cb8560e7b0c2e028a64.1534868459.git.jcody@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
(cherry picked from commit 26bf474ba92c76e61bea51726e22da6dfd185296)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/qemu-iotests/229 | 95 ++++++++++++++++++++++++++++++++++++++++++++++
tests/qemu-iotests/229.out | 23 +++++++++++
tests/qemu-iotests/group | 1 +
3 files changed, 119 insertions(+)
create mode 100755 tests/qemu-iotests/229
create mode 100644 tests/qemu-iotests/229.out
diff --git a/tests/qemu-iotests/229 b/tests/qemu-iotests/229
new file mode 100755
index 0000000..ff851ec
--- /dev/null
+++ b/tests/qemu-iotests/229
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Test for force canceling a running blockjob that is paused in
+# an error state.
+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=jcody@redhat.com
+
+seq="$(basename $0)"
+echo "QA output created by $seq"
+
+here="$PWD"
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_qemu
+ _cleanup_test_img
+ rm -f "$TEST_IMG" "$DEST_IMG"
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+. ./common.qemu
+
+# Needs backing file and backing format support
+_supported_fmt qcow2 qed
+_supported_proto file
+_supported_os Linux
+
+
+DEST_IMG="$TEST_DIR/d.$IMGFMT"
+TEST_IMG="$TEST_DIR/b.$IMGFMT"
+
+_make_test_img 2M
+
+# destination for mirror will be too small, causing error
+TEST_IMG=$DEST_IMG _make_test_img 1M
+
+$QEMU_IO -c 'write 0 2M' "$TEST_IMG" | _filter_qemu_io
+
+_launch_qemu -drive id=testdisk,file="$TEST_IMG",format="$IMGFMT"
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'qmp_capabilities'}" \
+ 'return'
+
+echo
+echo '=== Starting drive-mirror, causing error & stop ==='
+echo
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'drive-mirror',
+ 'arguments': {'device': 'testdisk',
+ 'mode': 'absolute-paths',
+ 'format': '$IMGFMT',
+ 'target': '$DEST_IMG',
+ 'sync': 'full',
+ 'mode': 'existing',
+ 'on-source-error': 'stop',
+ 'on-target-error': 'stop' }}" \
+ "JOB_STATUS_CHANGE.*pause"
+
+echo
+echo '=== Force cancel job paused in error state ==='
+echo
+
+success_or_failure="y" _send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'block-job-cancel',
+ 'arguments': { 'device': 'testdisk',
+ 'force': true}}" \
+ "BLOCK_JOB_CANCELLED" "Assertion"
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/229.out b/tests/qemu-iotests/229.out
new file mode 100644
index 0000000..4c41128
--- /dev/null
+++ b/tests/qemu-iotests/229.out
@@ -0,0 +1,23 @@
+QA output created by 229
+Formatting 'TEST_DIR/b.IMGFMT', fmt=IMGFMT size=2097152
+Formatting 'TEST_DIR/d.IMGFMT', fmt=IMGFMT size=1048576
+wrote 2097152/2097152 bytes at offset 0
+2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+{"return": {}}
+
+=== Starting drive-mirror, causing error & stop ===
+
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "testdisk"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "testdisk", "operation": "write", "action": "stop"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "testdisk"}}
+
+=== Force cancel job paused in error state ===
+
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "testdisk", "operation": "write", "action": "stop"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "testdisk"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "testdisk", "len": 2097152, "offset": 1048576, "speed": 0, "type": "mirror"}}
+*** done
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index f1059f6..23ab4d3 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -225,3 +225,4 @@
225 rw auto quick
226 auto quick
227 auto quick
+229 auto quick
--
1.8.3.1

View File

@ -1,117 +0,0 @@
From c0bedad9bd133c14096eeeae49877fbb9eb179c3 Mon Sep 17 00:00:00 2001
From: Igor Mammedov <imammedo@redhat.com>
Date: Thu, 4 Oct 2018 10:31:31 +0100
Subject: Revert "hw/acpi-build: build SRAT memory affinity structures for DIMM
devices"
RH-Author: Igor Mammedov <imammedo@redhat.com>
Message-id: <1538649091-70517-1-git-send-email-imammedo@redhat.com>
Patchwork-id: 82373
O-Subject: [RHEL8/virt-8.0.0 qemu-kvm PATCH] Revert "hw/acpi-build: build SRAT memory affinity structures for DIMM devices"
Bugzilla: 1609235
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Since upstream commits
(0efd7e108 "pc: acpi: fix memory hotplug regression by reducing stub SRAT entry size")
(dbb6da8ba7 "pc: acpi: revert back to 1 SRAT entry for hotpluggable area")
hasn't been backported to RHEL8, it's sufficient to revert commit
(848a1cc1e8 "hw/acpi-build: build SRAT memory affinity structures for DIMM devices")
for the result to match the current upstream state and fix the bug.
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
Rebase notes (3.0.0):
- Replace hotplug_memory with device_memory in PCMachineState
---
hw/i386/acpi-build.c | 65 ++++------------------------------------------------
1 file changed, 4 insertions(+), 61 deletions(-)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index be9bdb5..f95516c 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2254,64 +2254,6 @@ build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog)
#define HOLE_640K_START (640 * KiB)
#define HOLE_640K_END (1 * MiB)
-static void build_srat_hotpluggable_memory(GArray *table_data, uint64_t base,
- uint64_t len, int default_node)
-{
- MemoryDeviceInfoList *info_list = qmp_memory_device_list();
- MemoryDeviceInfoList *info;
- MemoryDeviceInfo *mi;
- PCDIMMDeviceInfo *di;
- uint64_t end = base + len, cur, size;
- bool is_nvdimm;
- AcpiSratMemoryAffinity *numamem;
- MemoryAffinityFlags flags;
-
- for (cur = base, info = info_list;
- cur < end;
- cur += size, info = info->next) {
- numamem = acpi_data_push(table_data, sizeof *numamem);
-
- if (!info) {
- /*
- * Entry is required for Windows to enable memory hotplug in OS
- * and for Linux to enable SWIOTLB when booted with less than
- * 4G of RAM. Windows works better if the entry sets proximity
- * to the highest NUMA node in the machine at the end of the
- * reserved space.
- * Memory devices may override proximity set by this entry,
- * providing _PXM method if necessary.
- */
- build_srat_memory(numamem, end - 1, 1, default_node,
- MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
- break;
- }
-
- mi = info->value;
- is_nvdimm = (mi->type == MEMORY_DEVICE_INFO_KIND_NVDIMM);
- di = !is_nvdimm ? mi->u.dimm.data : mi->u.nvdimm.data;
-
- if (cur < di->addr) {
- build_srat_memory(numamem, cur, di->addr - cur, default_node,
- MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
- numamem = acpi_data_push(table_data, sizeof *numamem);
- }
-
- size = di->size;
-
- flags = MEM_AFFINITY_ENABLED;
- if (di->hotpluggable) {
- flags |= MEM_AFFINITY_HOTPLUGGABLE;
- }
- if (is_nvdimm) {
- flags |= MEM_AFFINITY_NON_VOLATILE;
- }
-
- build_srat_memory(numamem, di->addr, size, di->node, flags);
- }
-
- qapi_free_MemoryDeviceInfoList(info_list);
-}
-
static void
build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
{
@@ -2418,9 +2360,10 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
}
if (hotplugabble_address_space_size) {
- build_srat_hotpluggable_memory(table_data, machine->device_memory->base,
- hotplugabble_address_space_size,
- pcms->numa_nodes - 1);
+ numamem = acpi_data_push(table_data, sizeof *numamem);
+ build_srat_memory(numamem, machine->device_memory->base,
+ hotplugabble_address_space_size, pcms->numa_nodes - 1,
+ MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
}
build_header(linker, table_data,
--
1.8.3.1

View File

@ -1,48 +0,0 @@
From c476cf6c76298803fe896eb7c597085af3b73c12 Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Tue, 9 Oct 2018 08:16:47 +0100
Subject: aio-posix: Don't count ctx->notifier as progress when polling
RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <20181009081651.15463-2-famz@redhat.com>
Patchwork-id: 82454
O-Subject: [RHEL8/rhel qemu-kvm PATCH 1/5] aio-posix: Don't count ctx->notifier as progress when polling
Bugzilla: 1623085
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
BZ: 1623085
The same logic exists in fd polling. This change is especially important
to avoid busy loop once we limit aio_notify_accept() to blocking
aio_poll().
Cc: qemu-stable@nongnu.org
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20180809132259.18402-2-famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit 70232b5253a3c4e03ed1ac47ef9246a8ac66c6fa)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/aio-posix.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 118bf57..b5c7f46 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -494,7 +494,8 @@ static bool run_poll_handlers_once(AioContext *ctx)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
- node->io_poll(node->opaque)) {
+ node->io_poll(node->opaque) &&
+ node->opaque != &ctx->notifier) {
progress = true;
}
--
1.8.3.1

View File

@ -1,124 +0,0 @@
From 1580d01151ceea428dc9a25dd3d83990a594e286 Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Tue, 9 Oct 2018 08:16:48 +0100
Subject: aio: Do aio_notify_accept only during blocking aio_poll
RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <20181009081651.15463-3-famz@redhat.com>
Patchwork-id: 82450
O-Subject: [RHEL8/rhel qemu-kvm PATCH 2/5] aio: Do aio_notify_accept only during blocking aio_poll
Bugzilla: 1623085
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
BZ: 1623085
An aio_notify() pairs with an aio_notify_accept(). The former should
happen in the main thread or a vCPU thread, and the latter should be
done in the IOThread.
There is one rare case that the main thread or vCPU thread may "steal"
the aio_notify() event just raised by itself, in bdrv_set_aio_context()
[1]. The sequence is like this:
main thread IO Thread
===============================================================
bdrv_drained_begin()
aio_disable_external(ctx)
aio_poll(ctx, true)
ctx->notify_me += 2
...
bdrv_drained_end()
...
aio_notify()
...
bdrv_set_aio_context()
aio_poll(ctx, false)
[1] aio_notify_accept(ctx)
ppoll() /* Hang! */
[1] is problematic. It will clear the ctx->notifier event so that
the blocked ppoll() will not return.
(For the curious, this bug was noticed when booting a number of VMs
simultaneously in RHV. One or two of the VMs will hit this race
condition, making the VIRTIO device unresponsive to I/O commands. When
it hangs, Seabios is busy waiting for a read request to complete (read
MBR), right after initializing the virtio-blk-pci device, using 100%
guest CPU. See also https://bugzilla.redhat.com/show_bug.cgi?id=1562750
for the original bug analysis.)
aio_notify() only injects an event when ctx->notify_me is set,
correspondingly aio_notify_accept() is only useful when ctx->notify_me
_was_ set. Move the call to it into the "blocking" branch. This will
effectively skip [1] and fix the hang.
Furthermore, blocking aio_poll is only allowed on home thread
(in_aio_context_home_thread), because otherwise two blocking
aio_poll()'s can steal each other's ctx->notifier event and cause
hanging just like described above.
Cc: qemu-stable@nongnu.org
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20180809132259.18402-3-famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit b37548fcd1b8ac2e88e185a395bef851f3fc4e65)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/aio-posix.c | 4 ++--
util/aio-win32.c | 3 ++-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/util/aio-posix.c b/util/aio-posix.c
index b5c7f46..b5c609b 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -591,6 +591,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
* so disable the optimization now.
*/
if (blocking) {
+ assert(in_aio_context_home_thread(ctx));
atomic_add(&ctx->notify_me, 2);
}
@@ -633,6 +634,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
+ aio_notify_accept(ctx);
}
/* Adjust polling time */
@@ -676,8 +678,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
- aio_notify_accept(ctx);
-
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
diff --git a/util/aio-win32.c b/util/aio-win32.c
index e676a8d..c58957c 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -373,11 +373,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
+ assert(in_aio_context_home_thread(ctx));
atomic_sub(&ctx->notify_me, 2);
+ aio_notify_accept(ctx);
}
if (first) {
- aio_notify_accept(ctx);
progress |= aio_bh_poll(ctx);
first = false;
}
--
1.8.3.1

View File

@ -1,122 +0,0 @@
From 07bbb6779b2a628b3e83b5474be550009aae034d Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Tue, 9 Oct 2018 08:16:49 +0100
Subject: aio-posix: fix concurrent access to poll_disable_cnt
RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <20181009081651.15463-4-famz@redhat.com>
Patchwork-id: 82452
O-Subject: [RHEL8/rhel qemu-kvm PATCH 3/5] aio-posix: fix concurrent access to poll_disable_cnt
Bugzilla: 1632622
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
BZ: 1632622
It is valid for an aio_set_fd_handler to happen concurrently with
aio_poll. In that case, poll_disable_cnt can change under the heels
of aio_poll, and the assertion on poll_disable_cnt can fail in
run_poll_handlers.
Therefore, this patch simply checks the counter on every polling
iteration. There are no particular needs for ordering, since the
polling loop is terminated anyway by aio_notify at the end of
aio_set_fd_handler.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20180912171040.1732-2-pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit d7be5dd19c0df7f76e1b42f0c2cbbabefa1974cb)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/aio-posix.c | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/util/aio-posix.c b/util/aio-posix.c
index b5c609b..9189033 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -211,6 +211,7 @@ void aio_set_fd_handler(AioContext *ctx,
AioHandler *node;
bool is_new = false;
bool deleted = false;
+ int poll_disable_change;
qemu_lockcnt_lock(&ctx->list_lock);
@@ -244,11 +245,9 @@ void aio_set_fd_handler(AioContext *ctx,
QLIST_REMOVE(node, node);
deleted = true;
}
-
- if (!node->io_poll) {
- ctx->poll_disable_cnt--;
- }
+ poll_disable_change = -!node->io_poll;
} else {
+ poll_disable_change = !io_poll - (node && !node->io_poll);
if (node == NULL) {
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
@@ -257,10 +256,6 @@ void aio_set_fd_handler(AioContext *ctx,
g_source_add_poll(&ctx->source, &node->pfd);
is_new = true;
-
- ctx->poll_disable_cnt += !io_poll;
- } else {
- ctx->poll_disable_cnt += !io_poll - !node->io_poll;
}
/* Update handler with latest information */
@@ -274,6 +269,15 @@ void aio_set_fd_handler(AioContext *ctx,
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
}
+ /* No need to order poll_disable_cnt writes against other updates;
+ * the counter is only used to avoid wasting time and latency on
+ * iterated polling when the system call will be ultimately necessary.
+ * Changing handlers is a rare event, and a little wasted polling until
+ * the aio_notify below is not an issue.
+ */
+ atomic_set(&ctx->poll_disable_cnt,
+ atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
+
aio_epoll_update(ctx, node, is_new);
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
@@ -525,7 +529,6 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
assert(ctx->notify_me);
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
- assert(ctx->poll_disable_cnt == 0);
trace_run_poll_handlers_begin(ctx, max_ns);
@@ -533,7 +536,8 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
do {
progress = run_poll_handlers_once(ctx);
- } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
+ } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
+ && !atomic_read(&ctx->poll_disable_cnt));
trace_run_poll_handlers_end(ctx, progress);
@@ -552,7 +556,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
*/
static bool try_poll_mode(AioContext *ctx, bool blocking)
{
- if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
+ if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
/* See qemu_soonest_timeout() uint64_t hack */
int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
(uint64_t)ctx->poll_ns);
--
1.8.3.1

View File

@ -1,186 +0,0 @@
From 44bb29739a1cfa471447d6c5880e7527399b146f Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Tue, 9 Oct 2018 08:16:50 +0100
Subject: aio-posix: compute timeout before polling
RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <20181009081651.15463-5-famz@redhat.com>
Patchwork-id: 82453
O-Subject: [RHEL8/rhel qemu-kvm PATCH 4/5] aio-posix: compute timeout before polling
Bugzilla: 1632622
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
BZ: 1632622
This is a preparation for the next patch, and also a very small
optimization. Compute the timeout only once, before invoking
try_poll_mode, and adjust it in run_poll_handlers. The adjustment
is the polling time when polling fails, or zero (non-blocking) if
polling succeeds.
Fixes: 70232b5253a3c4e03ed1ac47ef9246a8ac66c6fa
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20180912171040.1732-3-pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit e30cffa04d52e35996569f1cfac111be19576bde)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/aio-posix.c | 59 ++++++++++++++++++++++++++++++++-----------------------
util/trace-events | 4 ++--
2 files changed, 36 insertions(+), 27 deletions(-)
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 9189033..bb862e1 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -490,7 +490,7 @@ static void add_pollfd(AioHandler *node)
npfd++;
}
-static bool run_poll_handlers_once(AioContext *ctx)
+static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
{
bool progress = false;
AioHandler *node;
@@ -500,6 +500,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque) &&
node->opaque != &ctx->notifier) {
+ *timeout = 0;
progress = true;
}
@@ -522,31 +523,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
*
* Returns: true if progress was made, false otherwise
*/
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
+static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
{
bool progress;
- int64_t end_time;
+ int64_t start_time, elapsed_time;
assert(ctx->notify_me);
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
- trace_run_poll_handlers_begin(ctx, max_ns);
-
- end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
+ trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
+ start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
do {
- progress = run_poll_handlers_once(ctx);
- } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
+ progress = run_poll_handlers_once(ctx, timeout);
+ elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
+ } while (!progress && elapsed_time < max_ns
&& !atomic_read(&ctx->poll_disable_cnt));
- trace_run_poll_handlers_end(ctx, progress);
+ /* If time has passed with no successful polling, adjust *timeout to
+ * keep the same ending time.
+ */
+ if (*timeout != -1) {
+ *timeout -= MIN(*timeout, elapsed_time);
+ }
+ trace_run_poll_handlers_end(ctx, progress, *timeout);
return progress;
}
/* try_poll_mode:
* @ctx: the AioContext
- * @blocking: busy polling is only attempted when blocking is true
+ * @timeout: timeout for blocking wait, computed by the caller and updated if
+ * polling succeeds.
*
* ctx->notify_me must be non-zero so this function can detect aio_notify().
*
@@ -554,19 +562,16 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
*
* Returns: true if progress was made, false otherwise
*/
-static bool try_poll_mode(AioContext *ctx, bool blocking)
+static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
{
- if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
- /* See qemu_soonest_timeout() uint64_t hack */
- int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
- (uint64_t)ctx->poll_ns);
+ /* See qemu_soonest_timeout() uint64_t hack */
+ int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
- if (max_ns) {
- poll_set_started(ctx, true);
+ if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
+ poll_set_started(ctx, true);
- if (run_poll_handlers(ctx, max_ns)) {
- return true;
- }
+ if (run_poll_handlers(ctx, max_ns, timeout)) {
+ return true;
}
}
@@ -575,7 +580,7 @@ static bool try_poll_mode(AioContext *ctx, bool blocking)
/* Even if we don't run busy polling, try polling once in case it can make
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
*/
- return run_poll_handlers_once(ctx);
+ return run_poll_handlers_once(ctx, timeout);
}
bool aio_poll(AioContext *ctx, bool blocking)
@@ -605,8 +610,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
- progress = try_poll_mode(ctx, blocking);
- if (!progress) {
+ timeout = blocking ? aio_compute_timeout(ctx) : 0;
+ progress = try_poll_mode(ctx, &timeout);
+ assert(!(timeout && progress));
+
+ /* If polling is allowed, non-blocking aio_poll does not need the
+ * system call---a single round of run_poll_handlers_once suffices.
+ */
+ if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
assert(npfd == 0);
/* fill pollfds */
@@ -620,8 +631,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
-
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
diff --git a/util/trace-events b/util/trace-events
index 4822434..79569b7 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -1,8 +1,8 @@
# See docs/devel/tracing.txt for syntax documentation.
# util/aio-posix.c
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
+run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
+run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
--
1.8.3.1

View File

@ -1,64 +0,0 @@
From ea1db6ad3fcbcda2068d3aeb21c384d42004aaaf Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Tue, 9 Oct 2018 08:16:51 +0100
Subject: aio-posix: do skip system call if ctx->notifier polling succeeds
RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <20181009081651.15463-6-famz@redhat.com>
Patchwork-id: 82449
O-Subject: [RHEL8/rhel qemu-kvm PATCH 5/5] aio-posix: do skip system call if ctx->notifier polling succeeds
Bugzilla: 1632622
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
BZ: 1632622
Commit 70232b5253 ("aio-posix: Don't count ctx->notifier as progress when
2018-08-15), by not reporting progress, causes aio_poll to execute the
system call when polling succeeds because of ctx->notifier. This introduces
latency before the call to aio_bh_poll() and negates the advantages of
polling, unfortunately.
The fix builds on the previous patch, separating the effect of polling on
the timeout from the progress reported to aio_poll(). ctx->notifier
does zero the timeout, causing the caller to skip the system call,
but it does not report progress, so that the bug fix of commit 70232b5253
still stands.
Fixes: 70232b5253a3c4e03ed1ac47ef9246a8ac66c6fa
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20180912171040.1732-4-pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit cfeb35d6774b2e936046aa9923217818bd160299)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/aio-posix.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/util/aio-posix.c b/util/aio-posix.c
index bb862e1..a959ff6 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -498,10 +498,11 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
- node->io_poll(node->opaque) &&
- node->opaque != &ctx->notifier) {
+ node->io_poll(node->opaque)) {
*timeout = 0;
- progress = true;
+ if (node->opaque != &ctx->notifier) {
+ progress = true;
+ }
}
/* Caller handles freeing deleted nodes. Don't do it here. */
--
1.8.3.1

View File

@ -1,202 +0,0 @@
From 7e13447e23269939c3d1267a957187a60fef36e9 Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:26 +0100
Subject: linux-headers: update
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-2-git-send-email-thuth@redhat.com>
Patchwork-id: 82696
O-Subject: [RHEL-8 qemu-kvm PATCH 1/6] linux-headers: update
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Cornelia Huck <cohuck@redhat.com>
Update to kvm/next commit dd5bd0a65ff6 ("Merge tag 'kvm-s390-next-4.20-1'
of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD")
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 8f3cd250a897213d39e621e3d824507b48158d42)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
Conflicts:
linux-headers/linux/kvm.h
linux-headers/linux/vhost.h
(simple contextual conflicts due to some missing patches in downstream)
Signed-off-by: Thomas Huth <thuth@redhat.com>
---
include/standard-headers/linux/input.h | 9 +++++----
linux-headers/asm-arm/kvm.h | 13 +++++++++++++
linux-headers/asm-arm64/kvm.h | 13 +++++++++++++
linux-headers/asm-s390/kvm.h | 2 ++
linux-headers/asm-x86/kvm.h | 1 +
linux-headers/linux/kvm.h | 3 +++
linux-headers/linux/vfio.h | 2 ++
linux-headers/linux/vhost.h | 8 ++++++++
8 files changed, 47 insertions(+), 4 deletions(-)
diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h
index 6d6128c..c0ad9fc 100644
--- a/include/standard-headers/linux/input.h
+++ b/include/standard-headers/linux/input.h
@@ -267,10 +267,11 @@ struct input_mask {
/*
* MT_TOOL types
*/
-#define MT_TOOL_FINGER 0
-#define MT_TOOL_PEN 1
-#define MT_TOOL_PALM 2
-#define MT_TOOL_MAX 2
+#define MT_TOOL_FINGER 0x00
+#define MT_TOOL_PEN 0x01
+#define MT_TOOL_PALM 0x02
+#define MT_TOOL_DIAL 0x0a
+#define MT_TOOL_MAX 0x0f
/*
* Values describing the status of a force-feedback effect
diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h
index 72aa226..e1f8b74 100644
--- a/linux-headers/asm-arm/kvm.h
+++ b/linux-headers/asm-arm/kvm.h
@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index 99cb9ad..e6a98c1 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -39,6 +39,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h
index 1ab9901..0265482 100644
--- a/linux-headers/asm-s390/kvm.h
+++ b/linux-headers/asm-s390/kvm.h
@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
+#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4
+#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5
/* kvm attributes for migration mode */
#define KVM_S390_VM_MIGRATION_STOP 0
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
index c535c2f..9bba973 100644
--- a/linux-headers/asm-x86/kvm.h
+++ b/linux-headers/asm-x86/kvm.h
@@ -377,5 +377,6 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#endif /* _ASM_X86_KVM_H */
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index 2aae948..c4a5542 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -950,6 +950,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index 3615a26..ceb6453 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -200,6 +200,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
+#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -215,6 +216,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
+#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index e336395..3421624 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -160,6 +160,14 @@ struct vhost_memory {
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
/* VHOST_NET specific defines */
/* Attach virtio net ring to a raw socket, or tap device.
--
1.8.3.1

View File

@ -1,148 +0,0 @@
From 9ceba72eb99b073a86b0aa529154de3e06330720 Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:27 +0100
Subject: s390x/cpumodel: Set up CPU model for AP device support
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-3-git-send-email-thuth@redhat.com>
Patchwork-id: 82694
O-Subject: [RHEL-8 qemu-kvm PATCH 2/6] s390x/cpumodel: Set up CPU model for AP device support
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Tony Krowiak <akrowiak@linux.ibm.com>
A new CPU model feature and two new CPU model facilities are
introduced to support AP devices for a KVM guest.
CPU model features:
1. The S390_FEAT_AP CPU model feature indicates whether AP
instructions are available to the guest. This feature will
be enabled only if the AP instructions are available on the
linux host as determined by the availability of the
KVM_S390_VM_CRYPTO_ENABLE_APIE VM attribute which is exposed
by KVM only if the AP instructions are available on the
host.
This feature must be turned on from userspace to execute AP
instructions on the KVM guest. The QEMU command line to turn
this feature on looks something like this:
qemu-system-s390x ... -cpu xxx,ap=on ...
This feature will be supported for zEC12 and newer CPU models.
The feature will not be supported for older models because
there are few older systems on which to test and the older
crypto cards will be going out of service in the relatively
near future.
CPU model facilities:
1. The S390_FEAT_AP_QUERY_CONFIG_INFO feature indicates whether the
AP Query Configuration Information (QCI) facility is available
to the guest as determined by whether the facility is available
on the host. This feature will be exposed by KVM only if the
QCI facility is installed on the host.
2. The S390_FEAT_AP_FACILITY_TEST feature indicates whether the AP
Facility Test (APFT) facility is available to the guest as
determined by whether the facility is available on the host.
This feature will be exposed by KVM only if APFT is installed
on the host.
Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20181010170309.12045-3-akrowiak@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit c5cd17afddda89376712b315a41ede96b034e4c2)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
target/s390x/cpu_features.c | 3 +++
target/s390x/cpu_features_def.h | 3 +++
target/s390x/cpu_models.c | 2 ++
target/s390x/gen-features.c | 3 +++
4 files changed, 11 insertions(+)
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index e05e6aa..0fbee27 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -40,8 +40,10 @@ static const S390FeatDef s390_features[] = {
FEAT_INIT("srs", S390_FEAT_TYPE_STFL, 9, "Sense-running-status facility"),
FEAT_INIT("csske", S390_FEAT_TYPE_STFL, 10, "Conditional-SSKE facility"),
FEAT_INIT("ctop", S390_FEAT_TYPE_STFL, 11, "Configuration-topology facility"),
+ FEAT_INIT("apqci", S390_FEAT_TYPE_STFL, 12, "Query AP Configuration Information facility"),
FEAT_INIT("ipter", S390_FEAT_TYPE_STFL, 13, "IPTE-range facility"),
FEAT_INIT("nonqks", S390_FEAT_TYPE_STFL, 14, "Nonquiescing key-setting facility"),
+ FEAT_INIT("apft", S390_FEAT_TYPE_STFL, 15, "AP Facilities Test facility"),
FEAT_INIT("etf2", S390_FEAT_TYPE_STFL, 16, "Extended-translation facility 2"),
FEAT_INIT("msa-base", S390_FEAT_TYPE_STFL, 17, "Message-security-assist facility (excluding subfunctions)"),
FEAT_INIT("ldisp", S390_FEAT_TYPE_STFL, 18, "Long-displacement facility"),
@@ -130,6 +132,7 @@ static const S390FeatDef s390_features[] = {
FEAT_INIT_MISC("dateh2", "DAT-enhancement facility 2"),
FEAT_INIT_MISC("cmm", "Collaborative-memory-management facility"),
+ FEAT_INIT_MISC("ap", "AP instructions installed"),
FEAT_INIT("plo-cl", S390_FEAT_TYPE_PLO, 0, "PLO Compare and load (32 bit in general registers)"),
FEAT_INIT("plo-clg", S390_FEAT_TYPE_PLO, 1, "PLO Compare and load (64 bit in parameter list)"),
diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h
index ac2c947..5fc7e7b 100644
--- a/target/s390x/cpu_features_def.h
+++ b/target/s390x/cpu_features_def.h
@@ -27,8 +27,10 @@ typedef enum {
S390_FEAT_SENSE_RUNNING_STATUS,
S390_FEAT_CONDITIONAL_SSKE,
S390_FEAT_CONFIGURATION_TOPOLOGY,
+ S390_FEAT_AP_QUERY_CONFIG_INFO,
S390_FEAT_IPTE_RANGE,
S390_FEAT_NONQ_KEY_SETTING,
+ S390_FEAT_AP_FACILITIES_TEST,
S390_FEAT_EXTENDED_TRANSLATION_2,
S390_FEAT_MSA,
S390_FEAT_LONG_DISPLACEMENT,
@@ -119,6 +121,7 @@ typedef enum {
/* Misc */
S390_FEAT_DAT_ENH_2,
S390_FEAT_CMM,
+ S390_FEAT_AP,
/* PLO */
S390_FEAT_PLO_CL,
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 9c469ff..a8722cd 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -782,6 +782,8 @@ static void check_consistency(const S390CPUModel *model)
{ S390_FEAT_PRNO_TRNG_QRTCR, S390_FEAT_MSA_EXT_5 },
{ S390_FEAT_PRNO_TRNG, S390_FEAT_MSA_EXT_5 },
{ S390_FEAT_SIE_KSS, S390_FEAT_SIE_F2 },
+ { S390_FEAT_AP_QUERY_CONFIG_INFO, S390_FEAT_AP },
+ { S390_FEAT_AP_FACILITIES_TEST, S390_FEAT_AP },
};
int i;
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 5af042c..7302269 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -447,6 +447,9 @@ static uint16_t full_GEN12_GA1[] = {
S390_FEAT_ADAPTER_INT_SUPPRESSION,
S390_FEAT_EDAT_2,
S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
+ S390_FEAT_AP_QUERY_CONFIG_INFO,
+ S390_FEAT_AP_FACILITIES_TEST,
+ S390_FEAT_AP,
};
static uint16_t full_GEN12_GA2[] = {
--
1.8.3.1

View File

@ -1,89 +0,0 @@
From ef6a15cefa04a4f29d0d800d17caa9a37c40b05c Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:28 +0100
Subject: s390x/kvm: enable AP instruction interpretation for guest
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-4-git-send-email-thuth@redhat.com>
Patchwork-id: 82697
O-Subject: [RHEL-8 qemu-kvm PATCH 3/6] s390x/kvm: enable AP instruction interpretation for guest
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Tony Krowiak <akrowiak@linux.ibm.com>
Let's use the KVM_SET_DEVICE_ATTR ioctl to enable hardware
interpretation of AP instructions executed on the guest.
If the S390_FEAT_AP feature is switched on for the guest,
AP instructions must be interpreted by default; otherwise,
they will be intercepted.
This attribute setting may be overridden by a device. For example,
a device may want to provide AP instructions to the guest (i.e.,
S390_FEAT_AP turned on), but it may want to emulate them. In this
case, the AP instructions executed on the guest must be
intercepted; so when the device is realized, it must disable
interpretation.
Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Halil Pasic <pasic@linux.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20181010170309.12045-4-akrowiak@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 1d7db85b61cb9888b8ed8c8923343b468405b7a0)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
target/s390x/kvm.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index 71d90f2..d25e2e2 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -2290,11 +2290,26 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
error_setg(errp, "KVM: host CPU model could not be identified");
return;
}
+ /* for now, we can only provide the AP feature with HW support */
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO,
+ KVM_S390_VM_CRYPTO_ENABLE_APIE)) {
+ set_bit(S390_FEAT_AP, model->features);
+ }
/* strip of features that are not part of the maximum model */
bitmap_and(model->features, model->features, model->def->full_feat,
S390_FEAT_MAX);
}
+static void kvm_s390_configure_apie(bool interpret)
+{
+ uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE :
+ KVM_S390_VM_CRYPTO_DISABLE_APIE;
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
{
struct kvm_s390_vm_cpu_processor prop = {
@@ -2352,6 +2367,10 @@ void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
if (test_bit(S390_FEAT_CMM, model->features)) {
kvm_s390_enable_cmma();
}
+
+ if (test_bit(S390_FEAT_AP, model->features)) {
+ kvm_s390_configure_apie(true);
+ }
}
void kvm_s390_restart_interrupt(S390CPU *cpu)
--
1.8.3.1

View File

@ -1,281 +0,0 @@
From a57558fc97a82853d0c5e1e190297f7677598d5a Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:29 +0100
Subject: s390x/ap: base Adjunct Processor (AP) object model
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-5-git-send-email-thuth@redhat.com>
Patchwork-id: 82695
O-Subject: [RHEL-8 qemu-kvm PATCH 4/6] s390x/ap: base Adjunct Processor (AP) object model
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Tony Krowiak <akrowiak@linux.ibm.com>
Introduces the base object model for virtualizing AP devices.
Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20181010170309.12045-5-akrowiak@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit a51b31535a8ec13997de29b357f7cc1dcd8a7f9c)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
MAINTAINERS | 12 +++++++
hw/s390x/Makefile.objs | 2 ++
hw/s390x/ap-bridge.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
hw/s390x/ap-device.c | 38 +++++++++++++++++++++
hw/s390x/s390-virtio-ccw.c | 4 +++
include/hw/s390x/ap-bridge.h | 19 +++++++++++
include/hw/s390x/ap-device.h | 22 +++++++++++++
7 files changed, 175 insertions(+)
create mode 100644 hw/s390x/ap-bridge.c
create mode 100644 hw/s390x/ap-device.c
create mode 100644 include/hw/s390x/ap-bridge.h
create mode 100644 include/hw/s390x/ap-device.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 666e936..d5b3c18 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1184,6 +1184,18 @@ F: include/hw/s390x/s390-ccw.h
T: git git://github.com/cohuck/qemu.git s390-next
L: qemu-s390x@nongnu.org
+vfio-ap
+M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Tony Krowiak <akrowiak@linux.ibm.com>
+M: Halil Pasic <pasic@linux.ibm.com>
+M: Pierre Morel <pmorel@linux.ibm.com>
+S: Supported
+F: hw/s390x/ap-device.c
+F: hw/s390x/ap-bridge.c
+F: include/hw/s390x/ap-device.h
+F: include/hw/s390x/ap-bridge.h
+L: qemu-s390x@nongnu.org
+
vhost
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
diff --git a/hw/s390x/Makefile.objs b/hw/s390x/Makefile.objs
index 93282f7..add89b1 100644
--- a/hw/s390x/Makefile.objs
+++ b/hw/s390x/Makefile.objs
@@ -20,3 +20,5 @@ obj-$(CONFIG_TCG) += tod-qemu.o
obj-$(CONFIG_KVM) += s390-skeys-kvm.o
obj-$(CONFIG_KVM) += s390-stattrib-kvm.o
obj-y += s390-ccw.o
+obj-y += ap-device.o
+obj-y += ap-bridge.o
diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c
new file mode 100644
index 0000000..3795d30
--- /dev/null
+++ b/hw/s390x/ap-bridge.c
@@ -0,0 +1,78 @@
+/*
+ * ap bridge
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "hw/s390x/ap-bridge.h"
+#include "cpu.h"
+
+static char *ap_bus_get_dev_path(DeviceState *dev)
+{
+ /* at most one */
+ return g_strdup_printf("/1");
+}
+
+static void ap_bus_class_init(ObjectClass *oc, void *data)
+{
+ BusClass *k = BUS_CLASS(oc);
+
+ k->get_dev_path = ap_bus_get_dev_path;
+ /* More than one ap device does not make sense */
+ k->max_dev = 1;
+}
+
+static const TypeInfo ap_bus_info = {
+ .name = TYPE_AP_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = 0,
+ .class_init = ap_bus_class_init,
+};
+
+void s390_init_ap(void)
+{
+ DeviceState *dev;
+
+ /* If no AP instructions then no need for AP bridge */
+ if (!s390_has_feat(S390_FEAT_AP)) {
+ return;
+ }
+
+ /* Create bridge device */
+ dev = qdev_create(NULL, TYPE_AP_BRIDGE);
+ object_property_add_child(qdev_get_machine(), TYPE_AP_BRIDGE,
+ OBJECT(dev), NULL);
+ qdev_init_nofail(dev);
+
+ /* Create bus on bridge device */
+ qbus_create(TYPE_AP_BUS, dev, TYPE_AP_BUS);
+ }
+
+static void ap_bridge_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+}
+
+static const TypeInfo ap_bridge_info = {
+ .name = TYPE_AP_BRIDGE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = 0,
+ .class_init = ap_bridge_class_init,
+};
+
+static void ap_register(void)
+{
+ type_register_static(&ap_bridge_info);
+ type_register_static(&ap_bus_info);
+}
+
+type_init(ap_register)
diff --git a/hw/s390x/ap-device.c b/hw/s390x/ap-device.c
new file mode 100644
index 0000000..f5ac8db
--- /dev/null
+++ b/hw/s390x/ap-device.c
@@ -0,0 +1,38 @@
+/*
+ * Adjunct Processor (AP) matrix device
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "hw/qdev.h"
+#include "hw/s390x/ap-device.h"
+
+static void ap_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "AP device class";
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo ap_device_info = {
+ .name = AP_DEVICE_TYPE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(APDevice),
+ .class_size = sizeof(DeviceClass),
+ .class_init = ap_class_init,
+ .abstract = true,
+};
+
+static void ap_device_register(void)
+{
+ type_register_static(&ap_device_info);
+}
+
+type_init(ap_device_register)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index cdf4558..a4b8b62 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -32,6 +32,7 @@
#include "ipl.h"
#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/s390x/css-bridge.h"
+#include "hw/s390x/ap-bridge.h"
#include "migration/register.h"
#include "cpu_models.h"
#include "hw/nmi.h"
@@ -263,6 +264,9 @@ static void ccw_init(MachineState *machine)
/* init the SIGP facility */
s390_init_sigp();
+ /* create AP bridge and bus(es) */
+ s390_init_ap();
+
/* get a BUS */
css_bus = virtual_css_bus_init();
s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
diff --git a/include/hw/s390x/ap-bridge.h b/include/hw/s390x/ap-bridge.h
new file mode 100644
index 0000000..470e439
--- /dev/null
+++ b/include/hw/s390x/ap-bridge.h
@@ -0,0 +1,19 @@
+/*
+ * ap bridge
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390X_AP_BRIDGE_H
+#define HW_S390X_AP_BRIDGE_H
+
+#define TYPE_AP_BRIDGE "ap-bridge"
+#define TYPE_AP_BUS "ap-bus"
+
+void s390_init_ap(void);
+
+#endif
diff --git a/include/hw/s390x/ap-device.h b/include/hw/s390x/ap-device.h
new file mode 100644
index 0000000..765e908
--- /dev/null
+++ b/include/hw/s390x/ap-device.h
@@ -0,0 +1,22 @@
+/*
+ * Adjunct Processor (AP) matrix device interfaces
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#ifndef HW_S390X_AP_DEVICE_H
+#define HW_S390X_AP_DEVICE_H
+
+#define AP_DEVICE_TYPE "ap-device"
+
+typedef struct APDevice {
+ DeviceState parent_obj;
+} APDevice;
+
+#define AP_DEVICE(obj) \
+ OBJECT_CHECK(APDevice, (obj), AP_DEVICE_TYPE)
+
+#endif /* HW_S390X_AP_DEVICE_H */
--
1.8.3.1

View File

@ -1,305 +0,0 @@
From 9f3a3325bb6859b1d3b46818a7d5b75c5d609f32 Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:30 +0100
Subject: s390x/vfio: ap: Introduce VFIO AP device
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-6-git-send-email-thuth@redhat.com>
Patchwork-id: 82700
O-Subject: [RHEL-8 qemu-kvm PATCH 5/6] s390x/vfio: ap: Introduce VFIO AP device
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Tony Krowiak <akrowiak@linux.ibm.com>
Introduces a VFIO based AP device. The device is defined via
the QEMU command line by specifying:
-device vfio-ap,sysfsdev=<path-to-mediated-matrix-device>
There may be only one vfio-ap device configured for a guest.
The mediated matrix device is created by the VFIO AP device
driver by writing a UUID to a sysfs attribute file (see
docs/vfio-ap.txt). The mediated matrix device will be named
after the UUID. Symbolic links to the $uuid are created in
many places, so the path to the mediated matrix device $uuid
can be specified in any of the following ways:
/sys/devices/vfio_ap/matrix/$uuid
/sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough/devices/$uuid
/sys/bus/mdev/devices/$uuid
/sys/bus/mdev/drivers/vfio_mdev/$uuid
When the vfio-ap device is realized, it acquires and opens the
VFIO iommu group to which the mediated matrix device is
bound. This causes a VFIO group notification event to be
signaled. The vfio_ap device driver's group notification
handler will get called at which time the device driver
will configure the the AP devices to which the guest will
be granted access.
Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Acked-by: Halil Pasic <pasic@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20181010170309.12045-6-akrowiak@linux.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
[CH: added missing g_free and device category]
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 2fe2942cd6ddad8ddd40fe5d16d67599c28959d7)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
MAINTAINERS | 2 +
default-configs/s390x-softmmu.mak | 1 +
hw/vfio/Makefile.objs | 1 +
hw/vfio/ap.c | 181 ++++++++++++++++++++++++++++++++++++++
include/hw/vfio/vfio-common.h | 1 +
5 files changed, 186 insertions(+)
create mode 100644 hw/vfio/ap.c
diff --git a/MAINTAINERS b/MAINTAINERS
index d5b3c18..f2fa1b8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -88,6 +88,7 @@ F: hw/char/terminal3270.c
F: hw/intc/s390_flic.c
F: hw/intc/s390_flic_kvm.c
F: hw/s390x/
+F: hw/vfio/ap.c
F: hw/vfio/ccw.c
F: hw/watchdog/wdt_diag288.c
F: include/hw/s390x/
@@ -1194,6 +1195,7 @@ F: hw/s390x/ap-device.c
F: hw/s390x/ap-bridge.c
F: include/hw/s390x/ap-device.h
F: include/hw/s390x/ap-bridge.h
+F: hw/vfio/ap.c
L: qemu-s390x@nongnu.org
vhost
diff --git a/default-configs/s390x-softmmu.mak b/default-configs/s390x-softmmu.mak
index 8b2db3e..49a59fc 100644
--- a/default-configs/s390x-softmmu.mak
+++ b/default-configs/s390x-softmmu.mak
@@ -8,3 +8,4 @@ CONFIG_S390_FLIC_KVM=$(CONFIG_KVM)
# Disabled for Red Hat Enterprise Linux:
# CONFIG_VFIO_CCW=$(CONFIG_LINUX)
CONFIG_WDT_DIAG288=y
+CONFIG_VFIO_AP=$(CONFIG_LINUX)
diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
index d38205b..53b4cbe 100644
--- a/hw/vfio/Makefile.objs
+++ b/hw/vfio/Makefile.objs
@@ -5,4 +5,5 @@ obj-$(CONFIG_VFIO_CCW) += ccw.o
obj-$(CONFIG_VFIO_XGMAC) += calxeda-xgmac.o
obj-$(CONFIG_VFIO_AMD_XGBE) += amd-xgbe.o
obj-$(CONFIG_SOFTMMU) += spapr.o
+obj-$(CONFIG_VFIO_AP) += ap.o
endif
diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c
new file mode 100644
index 0000000..3962bb7
--- /dev/null
+++ b/hw/vfio/ap.c
@@ -0,0 +1,181 @@
+/*
+ * VFIO based AP matrix device assignment
+ *
+ * Copyright 2018 IBM Corp.
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/sysbus.h"
+#include "hw/vfio/vfio.h"
+#include "hw/vfio/vfio-common.h"
+#include "hw/s390x/ap-device.h"
+#include "qemu/error-report.h"
+#include "qemu/queue.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "cpu.h"
+#include "kvm_s390x.h"
+#include "sysemu/sysemu.h"
+#include "hw/s390x/ap-bridge.h"
+#include "exec/address-spaces.h"
+
+#define VFIO_AP_DEVICE_TYPE "vfio-ap"
+
+typedef struct VFIOAPDevice {
+ APDevice apdev;
+ VFIODevice vdev;
+} VFIOAPDevice;
+
+#define VFIO_AP_DEVICE(obj) \
+ OBJECT_CHECK(VFIOAPDevice, (obj), VFIO_AP_DEVICE_TYPE)
+
+static void vfio_ap_compute_needs_reset(VFIODevice *vdev)
+{
+ vdev->needs_reset = false;
+}
+
+/*
+ * We don't need vfio_hot_reset_multi and vfio_eoi operations for
+ * vfio-ap device now.
+ */
+struct VFIODeviceOps vfio_ap_ops = {
+ .vfio_compute_needs_reset = vfio_ap_compute_needs_reset,
+};
+
+static void vfio_ap_put_device(VFIOAPDevice *vapdev)
+{
+ g_free(vapdev->vdev.name);
+ vfio_put_base_device(&vapdev->vdev);
+}
+
+static VFIOGroup *vfio_ap_get_group(VFIOAPDevice *vapdev, Error **errp)
+{
+ GError *gerror = NULL;
+ char *symlink, *group_path;
+ int groupid;
+
+ symlink = g_strdup_printf("%s/iommu_group", vapdev->vdev.sysfsdev);
+ group_path = g_file_read_link(symlink, &gerror);
+ g_free(symlink);
+
+ if (!group_path) {
+ error_setg(errp, "%s: no iommu_group found for %s: %s",
+ VFIO_AP_DEVICE_TYPE, vapdev->vdev.sysfsdev, gerror->message);
+ return NULL;
+ }
+
+ if (sscanf(basename(group_path), "%d", &groupid) != 1) {
+ error_setg(errp, "vfio: failed to read %s", group_path);
+ g_free(group_path);
+ return NULL;
+ }
+
+ g_free(group_path);
+
+ return vfio_get_group(groupid, &address_space_memory, errp);
+}
+
+static void vfio_ap_realize(DeviceState *dev, Error **errp)
+{
+ int ret;
+ char *mdevid;
+ Error *local_err = NULL;
+ VFIOGroup *vfio_group;
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+
+ vfio_group = vfio_ap_get_group(vapdev, &local_err);
+ if (!vfio_group) {
+ goto out_err;
+ }
+
+ vapdev->vdev.ops = &vfio_ap_ops;
+ vapdev->vdev.type = VFIO_DEVICE_TYPE_AP;
+ mdevid = basename(vapdev->vdev.sysfsdev);
+ vapdev->vdev.name = g_strdup_printf("%s", mdevid);
+ vapdev->vdev.dev = dev;
+
+ ret = vfio_get_device(vfio_group, mdevid, &vapdev->vdev, &local_err);
+ if (ret) {
+ goto out_get_dev_err;
+ }
+
+ return;
+
+out_get_dev_err:
+ vfio_ap_put_device(vapdev);
+ vfio_put_group(vfio_group);
+out_err:
+ error_propagate(errp, local_err);
+}
+
+static void vfio_ap_unrealize(DeviceState *dev, Error **errp)
+{
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+ VFIOGroup *group = vapdev->vdev.group;
+
+ vfio_ap_put_device(vapdev);
+ vfio_put_group(group);
+}
+
+static Property vfio_ap_properties[] = {
+ DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vfio_ap_reset(DeviceState *dev)
+{
+ int ret;
+ APDevice *apdev = AP_DEVICE(dev);
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev);
+
+ ret = ioctl(vapdev->vdev.fd, VFIO_DEVICE_RESET);
+ if (ret) {
+ error_report("%s: failed to reset %s device: %s", __func__,
+ vapdev->vdev.name, strerror(ret));
+ }
+}
+
+static const VMStateDescription vfio_ap_vmstate = {
+ .name = VFIO_AP_DEVICE_TYPE,
+ .unmigratable = 1,
+};
+
+static void vfio_ap_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = vfio_ap_properties;
+ dc->vmsd = &vfio_ap_vmstate;
+ dc->desc = "VFIO-based AP device assignment";
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->realize = vfio_ap_realize;
+ dc->unrealize = vfio_ap_unrealize;
+ dc->hotpluggable = false;
+ dc->reset = vfio_ap_reset;
+ dc->bus_type = TYPE_AP_BUS;
+}
+
+static const TypeInfo vfio_ap_info = {
+ .name = VFIO_AP_DEVICE_TYPE,
+ .parent = AP_DEVICE_TYPE,
+ .instance_size = sizeof(VFIOAPDevice),
+ .class_init = vfio_ap_class_init,
+};
+
+static void vfio_ap_type_init(void)
+{
+ type_register_static(&vfio_ap_info);
+}
+
+type_init(vfio_ap_type_init)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index a903692..1389da4 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -37,6 +37,7 @@ enum {
VFIO_DEVICE_TYPE_PCI = 0,
VFIO_DEVICE_TYPE_PLATFORM = 1,
VFIO_DEVICE_TYPE_CCW = 2,
+ VFIO_DEVICE_TYPE_AP = 3,
};
typedef struct VFIOMmap {
--
1.8.3.1

View File

@ -1,889 +0,0 @@
From 8f59c31a8b0c4cde4bc92126d7102c1be9da97d4 Mon Sep 17 00:00:00 2001
From: Thomas Huth <thuth@redhat.com>
Date: Mon, 15 Oct 2018 10:19:31 +0100
Subject: s390: doc: detailed specifications for AP virtualization
RH-Author: Thomas Huth <thuth@redhat.com>
Message-id: <1539598771-16223-7-git-send-email-thuth@redhat.com>
Patchwork-id: 82699
O-Subject: [RHEL-8 qemu-kvm PATCH 6/6] s390: doc: detailed specifications for AP virtualization
Bugzilla: 1508142
RH-Acked-by: David Hildenbrand <david@redhat.com>
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
RH-Acked-by: Jens Freimann <jfreimann@redhat.com>
From: Tony Krowiak <akrowiak@linux.ibm.com>
This patch provides documentation describing the AP architecture and
design concepts behind the virtualization of AP devices. It also
includes an example of how to configure AP devices for exclusive
use of KVM guests.
Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
Reviewed-by: Pierre Morel <pmorel@linux.ibm.com>
Tested-by: Pierre Morel <pmorel@linux.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Message-Id: <20181010170309.12045-7-akrowiak@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
(cherry picked from commit 694a8d703bfe06226a0574f5ec4af17a2b7060ef)
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
MAINTAINERS | 2 +
docs/vfio-ap.txt | 825 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 827 insertions(+)
create mode 100644 docs/vfio-ap.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index f2fa1b8..fdbfc04 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -96,6 +96,7 @@ F: include/hw/watchdog/wdt_diag288.h
F: pc-bios/s390-ccw/
F: pc-bios/s390-ccw.img
F: target/s390x/
+F: docs/vfio-ap.txt
K: ^Subject:.*(?i)s390x?
T: git git://github.com/cohuck/qemu.git s390-next
L: qemu-s390x@nongnu.org
@@ -1196,6 +1197,7 @@ F: hw/s390x/ap-bridge.c
F: include/hw/s390x/ap-device.h
F: include/hw/s390x/ap-bridge.h
F: hw/vfio/ap.c
+F: docs/vfio-ap.txt
L: qemu-s390x@nongnu.org
vhost
diff --git a/docs/vfio-ap.txt b/docs/vfio-ap.txt
new file mode 100644
index 0000000..1233968
--- /dev/null
+++ b/docs/vfio-ap.txt
@@ -0,0 +1,825 @@
+Adjunct Processor (AP) Device
+=============================
+
+Contents:
+=========
+* Introduction
+* AP Architectural Overview
+* Start Interpretive Execution (SIE) Instruction
+* AP Matrix Configuration on Linux Host
+* Starting a Linux Guest Configured with an AP Matrix
+* Example: Configure AP Matrices for Three Linux Guests
+
+Introduction:
+============
+The IBM Adjunct Processor (AP) Cryptographic Facility is comprised
+of three AP instructions and from 1 to 256 PCIe cryptographic adapter cards.
+These AP devices provide cryptographic functions to all CPUs assigned to a
+linux system running in an IBM Z system LPAR.
+
+On s390x, AP adapter cards are exposed via the AP bus. This document
+describes how those cards may be made available to KVM guests using the
+VFIO mediated device framework.
+
+AP Architectural Overview:
+=========================
+In order understand the terminology used in the rest of this document, let's
+start with some definitions:
+
+* AP adapter
+
+ An AP adapter is an IBM Z adapter card that can perform cryptographic
+ functions. There can be from 0 to 256 adapters assigned to an LPAR depending
+ on the machine model. Adapters assigned to the LPAR in which a linux host is
+ running will be available to the linux host. Each adapter is identified by a
+ number from 0 to 255; however, the maximum adapter number allowed is
+ determined by machine model. When installed, an AP adapter is accessed by
+ AP instructions executed by any CPU.
+
+* AP domain
+
+ An adapter is partitioned into domains. Each domain can be thought of as
+ a set of hardware registers for processing AP instructions. An adapter can
+ hold up to 256 domains; however, the maximum domain number allowed is
+ determined by machine model. Each domain is identified by a number from 0 to
+ 255. Domains can be further classified into two types:
+
+ * Usage domains are domains that can be accessed directly to process AP
+ commands
+
+ * Control domains are domains that are accessed indirectly by AP
+ commands sent to a usage domain to control or change the domain; for
+ example, to set a secure private key for the domain.
+
+* AP Queue
+
+ An AP queue is the means by which an AP command-request message is sent to an
+ AP usage domain inside a specific AP. An AP queue is identified by a tuple
+ comprised of an AP adapter ID (APID) and an AP queue index (APQI). The
+ APQI corresponds to a given usage domain number within the adapter. This tuple
+ forms an AP Queue Number (APQN) uniquely identifying an AP queue. AP
+ instructions include a field containing the APQN to identify the AP queue to
+ which the AP command-request message is to be sent for processing.
+
+* AP Instructions:
+
+ There are three AP instructions:
+
+ * NQAP: to enqueue an AP command-request message to a queue
+ * DQAP: to dequeue an AP command-reply message from a queue
+ * PQAP: to administer the queues
+
+ AP instructions identify the domain that is targeted to process the AP
+ command; this must be one of the usage domains. An AP command may modify a
+ domain that is not one of the usage domains, but the modified domain
+ must be one of the control domains.
+
+Start Interpretive Execution (SIE) Instruction
+==============================================
+A KVM guest is started by executing the Start Interpretive Execution (SIE)
+instruction. The SIE state description is a control block that contains the
+state information for a KVM guest and is supplied as input to the SIE
+instruction. The SIE state description contains a satellite control block called
+the Crypto Control Block (CRYCB). The CRYCB contains three fields to identify
+the adapters, usage domains and control domains assigned to the KVM guest:
+
+* The AP Mask (APM) field is a bit mask that identifies the AP adapters assigned
+ to the KVM guest. Each bit in the mask, from left to right, corresponds to
+ an APID from 0-255. If a bit is set, the corresponding adapter is valid for
+ use by the KVM guest.
+
+* The AP Queue Mask (AQM) field is a bit mask identifying the AP usage domains
+ assigned to the KVM guest. Each bit in the mask, from left to right,
+ corresponds to an AP queue index (APQI) from 0-255. If a bit is set, the
+ corresponding queue is valid for use by the KVM guest.
+
+* The AP Domain Mask field is a bit mask that identifies the AP control domains
+ assigned to the KVM guest. The ADM bit mask controls which domains can be
+ changed by an AP command-request message sent to a usage domain from the
+ guest. Each bit in the mask, from left to right, corresponds to a domain from
+ 0-255. If a bit is set, the corresponding domain can be modified by an AP
+ command-request message sent to a usage domain.
+
+If you recall from the description of an AP Queue, AP instructions include
+an APQN to identify the AP adapter and AP queue to which an AP command-request
+message is to be sent (NQAP and PQAP instructions), or from which a
+command-reply message is to be received (DQAP instruction). The validity of an
+APQN is defined by the matrix calculated from the APM and AQM; it is the
+cross product of all assigned adapter numbers (APM) with all assigned queue
+indexes (AQM). For example, if adapters 1 and 2 and usage domains 5 and 6 are
+assigned to a guest, the APQNs (1,5), (1,6), (2,5) and (2,6) will be valid for
+the guest.
+
+The APQNs can provide secure key functionality - i.e., a private key is stored
+on the adapter card for each of its domains - so each APQN must be assigned to
+at most one guest or the linux host.
+
+ Example 1: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1,2 domain 7
+
+ This is valid because both guests have a unique set of APQNs: Guest1 has
+ APQNs (1,5), (1,6), (2,5) and (2,6); Guest2 has APQNs (1,7) and (2,7).
+
+ Example 2: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapters 3,4 domains 5,6
+
+ This is also valid because both guests have a unique set of APQNs:
+ Guest1 has APQNs (1,5), (1,6), (2,5), (2,6);
+ Guest2 has APQNs (3,5), (3,6), (4,5), (4,6)
+
+ Example 3: Invalid configuration:
+ --------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1 domains 6,7
+
+ This is an invalid configuration because both guests have access to
+ APQN (1,6).
+
+AP Matrix Configuration on Linux Host:
+=====================================
+A linux system is a guest of the LPAR in which it is running and has access to
+the AP resources configured for the LPAR. The LPAR's AP matrix is
+configured via its Activation Profile which can be edited on the HMC. When the
+linux system is started, the AP bus will detect the AP devices assigned to the
+LPAR and create the following in sysfs:
+
+/sys/bus/ap
+... [devices]
+...... xx.yyyy
+...... ...
+...... cardxx
+...... ...
+
+Where:
+ cardxx is AP adapter number xx (in hex)
+....xx.yyyy is an APQN with xx specifying the APID and yyyy specifying the
+ APQI
+
+For example, if AP adapters 5 and 6 and domains 4, 71 (0x47), 171 (0xab) and
+255 (0xff) are configured for the LPAR, the sysfs representation on the linux
+host system would look like this:
+
+/sys/bus/ap
+... [devices]
+...... 05.0004
+...... 05.0047
+...... 05.00ab
+...... 05.00ff
+...... 06.0004
+...... 06.0047
+...... 06.00ab
+...... 06.00ff
+...... card05
+...... card06
+
+A set of default device drivers are also created to control each type of AP
+device that can be assigned to the LPAR on which a linux host is running:
+
+/sys/bus/ap
+... [drivers]
+...... [cex2acard] for Crypto Express 2/3 accelerator cards
+...... [cex2aqueue] for AP queues served by Crypto Express 2/3
+ accelerator cards
+...... [cex4card] for Crypto Express 4/5/6 accelerator and coprocessor
+ cards
+...... [cex4queue] for AP queues served by Crypto Express 4/5/6
+ accelerator and coprocessor cards
+...... [pcixcccard] for Crypto Express 2/3 coprocessor cards
+...... [pcixccqueue] for AP queues served by Crypto Express 2/3
+ coprocessor cards
+
+Binding AP devices to device drivers
+------------------------------------
+There are two sysfs files that specify bitmasks marking a subset of the APQN
+range as 'usable by the default AP queue device drivers' or 'not usable by the
+default device drivers' and thus available for use by the alternate device
+driver(s). The sysfs locations of the masks are:
+
+ /sys/bus/ap/apmask
+ /sys/bus/ap/aqmask
+
+ The 'apmask' is a 256-bit mask that identifies a set of AP adapter IDs
+ (APID). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APID from
+ 0-255. If a bit is set, the APID is marked as usable only by the default AP
+ queue device drivers; otherwise, the APID is usable by the vfio_ap
+ device driver.
+
+ The 'aqmask' is a 256-bit mask that identifies a set of AP queue indexes
+ (APQI). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APQI from
+ 0-255. If a bit is set, the APQI is marked as usable only by the default AP
+ queue device drivers; otherwise, the APQI is usable by the vfio_ap device
+ driver.
+
+ Take, for example, the following mask:
+
+ 0x7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+ It indicates:
+
+ 1, 2, 3, 4, 5, and 7-255 belong to the default drivers' pool, and 0 and 6
+ belong to the vfio_ap device driver's pool.
+
+ The APQN of each AP queue device assigned to the linux host is checked by the
+ AP bus against the set of APQNs derived from the cross product of APIDs
+ and APQIs marked as usable only by the default AP queue device drivers. If a
+ match is detected, only the default AP queue device drivers will be probed;
+ otherwise, the vfio_ap device driver will be probed.
+
+ By default, the two masks are set to reserve all APQNs for use by the default
+ AP queue device drivers. There are two ways the default masks can be changed:
+
+ 1. The sysfs mask files can be edited by echoing a string into the
+ respective sysfs mask file in one of two formats:
+
+ * An absolute hex string starting with 0x - like "0x12345678" - sets
+ the mask. If the given string is shorter than the mask, it is padded
+ with 0s on the right; for example, specifying a mask value of 0x41 is
+ the same as specifying:
+
+ 0x4100000000000000000000000000000000000000000000000000000000000000
+
+ Keep in mind that the mask reads from left to right (i.e., most
+ significant to least significant bit in big endian order), so the mask
+ above identifies device numbers 1 and 7 (01000001).
+
+ If the string is longer than the mask, the operation is terminated with
+ an error (EINVAL).
+
+ * Individual bits in the mask can be switched on and off by specifying
+ each bit number to be switched in a comma separated list. Each bit
+ number string must be prepended with a ('+') or minus ('-') to indicate
+ the corresponding bit is to be switched on ('+') or off ('-'). Some
+ valid values are:
+
+ "+0" switches bit 0 on
+ "-13" switches bit 13 off
+ "+0x41" switches bit 65 on
+ "-0xff" switches bit 255 off
+
+ The following example:
+ +0,-6,+0x47,-0xf0
+
+ Switches bits 0 and 71 (0x47) on
+ Switches bits 6 and 240 (0xf0) off
+
+ Note that the bits not specified in the list remain as they were before
+ the operation.
+
+ 2. The masks can also be changed at boot time via parameters on the kernel
+ command line like this:
+
+ ap.apmask=0xffff ap.aqmask=0x40
+
+ This would create the following masks:
+
+ apmask:
+ 0xffff000000000000000000000000000000000000000000000000000000000000
+
+ aqmask:
+ 0x4000000000000000000000000000000000000000000000000000000000000000
+
+ Resulting in these two pools:
+
+ default drivers pool: adapter 0-15, domain 1
+ alternate drivers pool: adapter 16-255, domains 0, 2-255
+
+Configuring an AP matrix for a linux guest.
+------------------------------------------
+The sysfs interfaces for configuring an AP matrix for a guest are built on the
+VFIO mediated device framework. To configure an AP matrix for a guest, a
+mediated matrix device must first be created for the /sys/devices/vfio_ap/matrix
+device. When the vfio_ap device driver is loaded, it registers with the VFIO
+mediated device framework. When the driver registers, the sysfs interfaces for
+creating mediated matrix devices is created:
+
+/sys/devices
+... [vfio_ap]
+......[matrix]
+......... [mdev_supported_types]
+............ [vfio_ap-passthrough]
+............... create
+............... [devices]
+
+A mediated AP matrix device is created by writing a UUID to the attribute file
+named 'create', for example:
+
+ uuidgen > create
+
+ or
+
+ echo $uuid > create
+
+When a mediated AP matrix device is created, a sysfs directory named after
+the UUID is created in the 'devices' subdirectory:
+
+/sys/devices
+... [vfio_ap]
+......[matrix]
+......... [mdev_supported_types]
+............ [vfio_ap-passthrough]
+............... create
+............... [devices]
+.................. [$uuid]
+
+There will also be three sets of attribute files created in the mediated
+matrix device's sysfs directory to configure an AP matrix for the
+KVM guest:
+
+/sys/devices
+... [vfio_ap]
+......[matrix]
+......... [mdev_supported_types]
+............ [vfio_ap-passthrough]
+............... create
+............... [devices]
+.................. [$uuid]
+..................... assign_adapter
+..................... assign_control_domain
+..................... assign_domain
+..................... matrix
+..................... unassign_adapter
+..................... unassign_control_domain
+..................... unassign_domain
+
+assign_adapter
+ To assign an AP adapter to the mediated matrix device, its APID is written
+ to the 'assign_adapter' file. This may be done multiple times to assign more
+ than one adapter. The APID may be specified using conventional semantics
+ as a decimal, hexadecimal, or octal number. For example, to assign adapters
+ 4, 5 and 16 to a mediated matrix device in decimal, hexadecimal and octal
+ respectively:
+
+ echo 4 > assign_adapter
+ echo 0x5 > assign_adapter
+ echo 020 > assign_adapter
+
+ In order to successfully assign an adapter:
+
+ * The adapter number specified must represent a value from 0 up to the
+ maximum adapter number allowed by the machine model. If an adapter number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the adapter ID being assigned and the
+ IDs of the previously assigned domains must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APID bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the adapter ID and the IDs of the
+ previously assigned domains can be assigned to another mediated matrix
+ device. If an APQN is assigned to another mediated matrix device, the
+ operation will terminate with an error (EADDRINUSE).
+
+unassign_adapter
+ To unassign an AP adapter, its APID is written to the 'unassign_adapter'
+ file. This may also be done multiple times to unassign more than one adapter.
+
+assign_domain
+ To assign a usage domain, the domain number is written into the
+ 'assign_domain' file. This may be done multiple times to assign more than one
+ usage domain. The domain number is specified using conventional semantics as
+ a decimal, hexadecimal, or octal number. For example, to assign usage domains
+ 4, 8, and 71 to a mediated matrix device in decimal, hexadecimal and octal
+ respectively:
+
+ echo 4 > assign_domain
+ echo 0x8 > assign_domain
+ echo 0107 > assign_domain
+
+ In order to successfully assign a domain:
+
+ * The domain number specified must represent a value from 0 up to the
+ maximum domain number allowed by the machine model. If a domain number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the domain ID being assigned and the IDs
+ of the previously assigned adapters must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APQI bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the domain ID being assigned and the IDs
+ of the previously assigned adapters can be assigned to another mediated
+ matrix device. If an APQN is assigned to another mediated matrix device,
+ the operation will terminate with an error (EADDRINUSE).
+
+unassign_domain
+ To unassign a usage domain, the domain number is written into the
+ 'unassign_domain' file. This may be done multiple times to unassign more than
+ one usage domain.
+
+assign_control_domain
+ To assign a control domain, the domain number is written into the
+ 'assign_control_domain' file. This may be done multiple times to
+ assign more than one control domain. The domain number may be specified using
+ conventional semantics as a decimal, hexadecimal, or octal number. For
+ example, to assign control domains 4, 8, and 71 to a mediated matrix device
+ in decimal, hexadecimal and octal respectively:
+
+ echo 4 > assign_domain
+ echo 0x8 > assign_domain
+ echo 0107 > assign_domain
+
+ In order to successfully assign a control domain, the domain number
+ specified must represent a value from 0 up to the maximum domain number
+ allowed by the machine model. If a control domain number higher than the
+ maximum is specified, the operation will terminate with an error (ENODEV).
+
+unassign_control_domain
+ To unassign a control domain, the domain number is written into the
+ 'unassign_domain' file. This may be done multiple times to unassign more than
+ one control domain.
+
+Notes: Hot plug/unplug is not currently supported for mediated AP matrix
+devices, so no changes to the AP matrix will be allowed while a guest using
+the mediated matrix device is running. Attempts to assign an adapter,
+domain or control domain will be rejected and an error (EBUSY) returned.
+
+Starting a Linux Guest Configured with an AP Matrix:
+===================================================
+To provide a mediated matrix device for use by a guest, the following option
+must be specified on the QEMU command line:
+
+ -device vfio_ap,sysfsdev=$path-to-mdev
+
+The sysfsdev parameter specifies the path to the mediated matrix device.
+There are a number of ways to specify this path:
+
+/sys/devices/vfio_ap/matrix/$uuid
+/sys/bus/mdev/devices/$uuid
+/sys/bus/mdev/drivers/vfio_mdev/$uuid
+/sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough/devices/$uuid
+
+When the linux guest is started, the guest will open the mediated
+matrix device's file descriptor to get information about the mediated matrix
+device. The vfio_ap device driver will update the APM, AQM, and ADM fields in
+the guest's CRYCB with the adapter, usage domain and control domains assigned
+via the mediated matrix device's sysfs attribute files. Programs running on the
+linux guest will then:
+
+1. Have direct access to the APQNs derived from the cross product of the AP
+ adapter numbers (APID) and queue indexes (APQI) specified in the APM and AQM
+ fields of the guests's CRYCB respectively. These APQNs identify the AP queues
+ that are valid for use by the guest; meaning, AP commands can be sent by the
+ guest to any of these queues for processing.
+
+2. Have authorization to process AP commands to change a control domain
+ identified in the ADM field of the guest's CRYCB. The AP command must be sent
+ to a valid APQN (see 1 above).
+
+CPU model features:
+
+Three CPU model features are available for controlling guest access to AP
+facilities:
+
+1. AP facilities feature
+
+ The AP facilities feature indicates that AP facilities are installed on the
+ guest. This feature will be exposed for use only if the AP facilities
+ are installed on the host system. The feature is s390-specific and is
+ represented as a parameter of the -cpu option on the QEMU command line:
+
+ qemu-system-s390x -cpu $model,ap=on|off
+
+ Where:
+
+ $model is the CPU model defined for the guest (defaults to the model of
+ the host system if not specified).
+
+ ap=on|off indicates whether AP facilities are installed (on) or not
+ (off). The default for CPU models zEC12 or newer
+ is ap=on. AP facilities must be installed on the guest if a
+ vfio-ap device (-device vfio-ap,sysfsdev=$path) is configured
+ for the guest, or the guest will fail to start.
+
+2. Query Configuration Information (QCI) facility
+
+ The QCI facility is used by the AP bus running on the guest to query the
+ configuration of the AP facilities. This facility will be available
+ only if the QCI facility is installed on the host system. The feature is
+ s390-specific and is represented as a parameter of the -cpu option on the
+ QEMU command line:
+
+ qemu-system-s390x -cpu $model,apqci=on|off
+
+ Where:
+
+ $model is the CPU model defined for the guest
+
+ apqci=on|off indicates whether the QCI facility is installed (on) or
+ not (off). The default for CPU models zEC12 or newer
+ is apqci=on; for older models, QCI will not be installed.
+
+ If QCI is installed (apqci=on) but AP facilities are not
+ (ap=off), an error message will be logged, but the guest
+ will be allowed to start. It makes no sense to have QCI
+ installed if the AP facilities are not; this is considered
+ an invalid configuration.
+
+ If the QCI facility is not installed, APQNs with an APQI
+ greater than 15 will not be detected by the AP bus
+ running on the guest.
+
+3. Adjunct Process Facility Test (APFT) facility
+
+ The APFT facility is used by the AP bus running on the guest to test the
+ AP facilities available for a given AP queue. This facility will be available
+ only if the APFT facility is installed on the host system. The feature is
+ s390-specific and is represented as a parameter of the -cpu option on the
+ QEMU command line:
+
+ qemu-system-s390x -cpu $model,apft=on|off
+
+ Where:
+
+ $model is the CPU model defined for the guest (defaults to the model of
+ the host system if not specified).
+
+ apft=on|off indicates whether the APFT facility is installed (on) or
+ not (off). The default for CPU models zEC12 and
+ newer is apft=on for older models, APFT will not be
+ installed.
+
+ If APFT is installed (apft=on) but AP facilities are not
+ (ap=off), an error message will be logged, but the guest
+ will be allowed to start. It makes no sense to have APFT
+ installed if the AP facilities are not; this is considered
+ an invalid configuration.
+
+ It also makes no sense to turn APFT off because the AP bus
+ running on the guest will not detect CEX4 and newer devices
+ without it. Since only CEX4 and newer devices are supported
+ for guest usage, no AP devices can be made accessible to a
+ guest started without APFT installed.
+
+Example: Configure AP Matrixes for Three Linux Guests:
+=====================================================
+Let's now provide an example to illustrate how KVM guests may be given
+access to AP facilities. For this example, we will show how to configure
+three guests such that executing the lszcrypt command on the guests would
+look like this:
+
+Guest1
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5C CCA-Coproc
+05.0004 CEX5C CCA-Coproc
+05.00ab CEX5C CCA-Coproc
+06 CEX5A Accelerator
+06.0004 CEX5A Accelerator
+06.00ab CEX5C CCA-Coproc
+
+Guest2
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5A Accelerator
+05.0047 CEX5A Accelerator
+05.00ff CEX5A Accelerator (5,4), (5,171), (6,4), (6,171),
+
+Guest3
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+06 CEX5A Accelerator
+06.0047 CEX5A Accelerator
+06.00ff CEX5A Accelerator
+
+These are the steps:
+
+1. Install the vfio_ap module on the linux host. The dependency chain for the
+ vfio_ap module is:
+ * iommu
+ * s390
+ * zcrypt
+ * vfio
+ * vfio_mdev
+ * vfio_mdev_device
+ * KVM
+
+ To build the vfio_ap module, the kernel build must be configured with the
+ following Kconfig elements selected:
+ * IOMMU_SUPPORT
+ * S390
+ * ZCRYPT
+ * S390_AP_IOMMU
+ * VFIO
+ * VFIO_MDEV
+ * VFIO_MDEV_DEVICE
+ * KVM
+
+ If using make menuconfig select the following to build the vfio_ap module:
+ -> Device Drivers
+ -> IOMMU Hardware Support
+ select S390 AP IOMMU Support
+ -> VFIO Non-Privileged userspace driver framework
+ -> Mediated device driver frramework
+ -> VFIO driver for Mediated devices
+ -> I/O subsystem
+ -> VFIO support for AP devices
+
+2. Secure the AP queues to be used by the three guests so that the host can not
+ access them. To secure the AP queues 05.0004, 05.0047, 05.00ab, 05.00ff,
+ 06.0004, 06.0047, 06.00ab, and 06.00ff for use by the vfio_ap device driver,
+ the corresponding APQNs must be removed from the default queue drivers pool
+ as follows:
+
+ echo -5,-6 > /sys/bus/ap/apmask
+
+ echo -4,-0x47,-0xab,-0xff > /sys/bus/ap/aqmask
+
+ This will result in AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004,
+ 06.0047, 06.00ab, and 06.00ff getting bound to the vfio_ap device driver. The
+ sysfs directory for the vfio_ap device driver will now contain symbolic links
+ to the AP queue devices bound to it:
+
+ /sys/bus/ap
+ ... [drivers]
+ ...... [vfio_ap]
+ ......... [05.0004]
+ ......... [05.0047]
+ ......... [05.00ab]
+ ......... [05.00ff]
+ ......... [06.0004]
+ ......... [06.0047]
+ ......... [06.00ab]
+ ......... [06.00ff]
+
+ Keep in mind that only type 10 and newer adapters (i.e., CEX4 and later)
+ can be bound to the vfio_ap device driver. The reason for this is to
+ simplify the implementation by not needlessly complicating the design by
+ supporting older devices that will go out of service in the relatively near
+ future, and for which there are few older systems on which to test.
+
+ The administrator, therefore, must take care to secure only AP queues that
+ can be bound to the vfio_ap device driver. The device type for a given AP
+ queue device can be read from the parent card's sysfs directory. For example,
+ to see the hardware type of the queue 05.0004:
+
+ cat /sys/bus/ap/devices/card05/hwtype
+
+ The hwtype must be 10 or higher (CEX4 or newer) in order to be bound to the
+ vfio_ap device driver.
+
+3. Create the mediated devices needed to configure the AP matrixes for the
+ three guests and to provide an interface to the vfio_ap driver for
+ use by the guests:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough] (passthrough mediated matrix device type)
+ --------- create
+ --------- [devices]
+
+ To create the mediated devices for the three guests:
+
+ uuidgen > create
+ uuidgen > create
+ uuidgen > create
+
+ or
+
+ echo $uuid1 > create
+ echo $uuid2 > create
+ echo $uuid3 > create
+
+ This will create three mediated devices in the [devices] subdirectory named
+ after the UUID used to create the mediated device. We'll call them $uuid1,
+ $uuid2 and $uuid3 and this is the sysfs directory structure after creation:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ --------------- unassign_control_domain
+ --------------- unassign_domain
+
+ ------------ [$uuid2]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+ ------------ [$uuid3]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+4. The administrator now needs to configure the matrixes for the mediated
+ devices $uuid1 (for Guest1), $uuid2 (for Guest2) and $uuid3 (for Guest3).
+
+ This is how the matrix is configured for Guest1:
+
+ echo 5 > assign_adapter
+ echo 6 > assign_adapter
+ echo 4 > assign_domain
+ echo 0xab > assign_domain
+
+ Control domains can similarly be assigned using the assign_control_domain
+ sysfs file.
+
+ If a mistake is made configuring an adapter, domain or control domain,
+ you can use the unassign_xxx interfaces to unassign the adapter, domain or
+ control domain.
+
+ To display the matrix configuration for Guest1:
+
+ cat matrix
+
+ The output will display the APQNs in the format xx.yyyy, where xx is
+ the adapter number and yyyy is the domain number. The output for Guest1
+ will look like this:
+
+ 05.0004
+ 05.00ab
+ 06.0004
+ 06.00ab
+
+ This is how the matrix is configured for Guest2:
+
+ echo 5 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+ This is how the matrix is configured for Guest3:
+
+ echo 6 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+5. Start Guest1:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid1 ...
+
+7. Start Guest2:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid2 ...
+
+7. Start Guest3:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid3 ...
+
+When the guest is shut down, the mediated matrix devices may be removed.
+
+Using our example again, to remove the mediated matrix device $uuid1:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- remove
+
+
+ echo 1 > remove
+
+ This will remove all of the mdev matrix device's sysfs structures including
+ the mdev device itself. To recreate and reconfigure the mdev matrix device,
+ all of the steps starting with step 3 will have to be performed again. Note
+ that the remove will fail if a guest using the mdev is still running.
+
+ It is not necessary to remove an mdev matrix device, but one may want to
+ remove it if no guest will use it during the remaining lifetime of the linux
+ host. If the mdev matrix device is removed, one may want to also reconfigure
+ the pool of adapters and queues reserved for use by the default drivers.
+
+Limitations
+===========
+* The KVM/kernel interfaces do not provide a way to prevent restoring an APQN
+ to the default drivers pool of a queue that is still assigned to a mediated
+ device in use by a guest. It is incumbent upon the administrator to
+ ensure there is no mediated device in use by a guest to which the APQN is
+ assigned lest the host be given access to the private data of the AP queue
+ device, such as a private key configured specifically for the guest.
+
+* Dynamically modifying the AP matrix for a running guest (which would amount to
+ hot(un)plug of AP devices for the guest) is currently not supported
+
+* Live guest migration is not supported for guests using AP devices.
--
1.8.3.1

View File

@ -1,89 +0,0 @@
From dbf0257cf3587d5580765cbd2040f370820fb5e3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= <marcandre.lureau@redhat.com>
Date: Tue, 2 Oct 2018 12:34:03 +0100
Subject: vnc: call sasl_server_init() only when required
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-id: <20181002123403.20747-2-marcandre.lureau@redhat.com>
Patchwork-id: 82356
O-Subject: [RHEL8/rhel qemu-kvm PATCH 1/1] vnc: call sasl_server_init() only when required
Bugzilla: 1609327
RH-Acked-by: Daniel P. Berrange <berrange@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
VNC server is calling sasl_server_init() during startup of QEMU, even
if SASL auth has not been enabled.
This may create undesirable warnings like "Could not find keytab file:
/etc/qemu/krb5.tab" when the user didn't configure SASL on host and
started VNC server.
Instead, only initialize SASL when needed. Note that HMP/QMP "change
vnc" calls vnc_display_open() again, which will initialize SASL if
needed.
Fix assignment in if condition, while touching this code.
Related to:
https://bugzilla.redhat.com/show_bug.cgi?id=1609327
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-id: 20180907063634.359-1-marcandre.lureau@redhat.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
(cherry picked from commit b5dc0d7d565048fcf2767060261d8385805aced1)
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1609327
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18601393
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
Conflicts:
ui/vnc.c
Due to "qemu"->"qemu-kvm" rename.
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
ui/vnc.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/ui/vnc.c b/ui/vnc.c
index 050c421..b3fe7d7 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -3878,9 +3878,6 @@ void vnc_display_open(const char *id, Error **errp)
bool reverse = false;
const char *credid;
bool sasl = false;
-#ifdef CONFIG_VNC_SASL
- int saslErr;
-#endif
int acl = 0;
int lock_key_sync = 1;
int key_delay_ms;
@@ -4054,10 +4051,14 @@ void vnc_display_open(const char *id, Error **errp)
trace_vnc_auth_init(vd, 1, vd->ws_auth, vd->ws_subauth);
#ifdef CONFIG_VNC_SASL
- if ((saslErr = sasl_server_init(NULL, "qemu-kvm")) != SASL_OK) {
- error_setg(errp, "Failed to initialize SASL auth: %s",
- sasl_errstring(saslErr, NULL, NULL));
- goto fail;
+ if (sasl) {
+ int saslErr = sasl_server_init(NULL, "qemu-kvm");
+
+ if (saslErr != SASL_OK) {
+ error_setg(errp, "Failed to initialize SASL auth: %s",
+ sasl_errstring(saslErr, NULL, NULL));
+ goto fail;
+ }
}
#endif
vd->lock_key_sync = lock_key_sync;
--
1.8.3.1

View File

@ -1,52 +0,0 @@
From c10de200e291af4a6a5cb41ac10e1ae7a2b9c5b2 Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 10 Oct 2018 18:19:23 +0100
Subject: nbd/server: fix NBD_CMD_CACHE
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20181010181924.30470-2-jsnow@redhat.com>
Patchwork-id: 82576
O-Subject: [RHEL8/rhel qemu-kvm PATCH 1/2] nbd/server: fix NBD_CMD_CACHE
Bugzilla: 1636142
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
We should not go to structured-read branch on CACHE command, fix that.
Bug introduced in bc37b06a5cde24 "nbd/server: introduce NBD_CMD_CACHE"
with the whole feature and affects 3.0.0 release.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
CC: qemu-stable@nongnu.org
Message-Id: <20181003144738.70670-1-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: commit message typo fix]
Signed-off-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit 2f454defc23e1be78f2a96bad2877ce7829f61b4)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
nbd/server.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/nbd/server.c b/nbd/server.c
index ea5fe0e..1ce3f44 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -2135,7 +2135,8 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
}
if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
- request->len) {
+ request->len && request->type != NBD_CMD_CACHE)
+ {
return nbd_co_send_sparse_read(client, request->handle, request->from,
data, request->len, errp);
}
--
1.8.3.1

View File

@ -1,96 +0,0 @@
From 24022cbbfd2230d4781a079d1856e0315895c8ce Mon Sep 17 00:00:00 2001
From: John Snow <jsnow@redhat.com>
Date: Wed, 10 Oct 2018 18:19:24 +0100
Subject: nbd: fix NBD_FLAG_SEND_CACHE value
RH-Author: John Snow <jsnow@redhat.com>
Message-id: <20181010181924.30470-3-jsnow@redhat.com>
Patchwork-id: 82578
O-Subject: [RHEL8/rhel qemu-kvm PATCH 2/2] nbd: fix NBD_FLAG_SEND_CACHE value
Bugzilla: 1636142
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: "Denis V. Lunev" <den@openvz.org>
Commit bc37b06a5 added NBD_CMD_CACHE support, but used the wrong value
for NBD_FLAG_SEND_CACHE flag for negotiation. That commit picked bit 8,
which had already been assigned by the NBD specification to mean
NBD_FLAG_CAN_MULTI_CONN, and which was already implemented in the
Linux kernel as a part of stable userspace-kernel API since 4.10:
"bit 8, NBD_FLAG_CAN_MULTI_CONN: Indicates that the server operates
entirely without cache, or that the cache it uses is shared among all
connections to the given device. In particular, if this flag is
present, then the effects of NBD_CMD_FLUSH and NBD_CMD_FLAG_FUA
MUST be visible across all connections when the server sends its reply
to that command to the client. In the absense of this flag, clients
SHOULD NOT multiplex their commands over more than one connection to
the export.
...
bit 10, NBD_FLAG_SEND_CACHE: documents that the server understands
NBD_CMD_CACHE; however, note that server implementations exist
which support the command without advertising this bit, and
conversely that this bit does not guarantee that the command will
succeed or have an impact."
Consequences:
- a client trying to use NBD_CMD_CACHE per the NBD spec will not
see the feature as available from a qemu 3.0 server (not fatal,
clients already have to be prepared for caching to not exist)
- a client accidentally coded to the qemu 3.0 bit value instead
of following the spec may interpret NBD_CMD_CACHE as being available
when it is not (probably not fatal, the spec says the server should
gracefully fail unknown commands, and that clients of NBD_CMD_CACHE
should be prepared for failure even when the feature is advertised);
such clients are unlikely (perhaps only in unreleased Virtuozzo code),
and will disappear over time
- a client prepared to use multiple connections based on
NBD_FLAG_CAN_MULTI_CONN may cause data corruption when it assumes
that caching is consistent when in reality qemu 3.0 did not have
a consistent cache. Partially mitigated by using read-only
connections (where nothing needs to be flushed, so caching is
indeed consistent) or when using qemu-nbd with the default -e 1
(at most one client at a time); visible only when using -e 2 or
more for a writable export.
Thus the commit fixes negotiation flag in QEMU according to the
specification.
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
CC: Valery Vdovin <valery.vdovin@acronis.com>
CC: Eric Blake <eblake@redhat.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: qemu-stable@nongnu.org
Message-Id: <20181004100313.4253-1-den@openvz.org>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: enhance commit message, add defines for unimplemented flags]
Signed-off-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit df91328adab8490367776d2b21b35d790a606120)
Signed-off-by: John Snow <jsnow@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/block/nbd.h | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/include/block/nbd.h b/include/block/nbd.h
index 4638c83..a53b0cf 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -135,7 +135,9 @@ typedef struct NBDExtent {
#define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* Send WRITE_ZEROES */
#define NBD_FLAG_SEND_DF (1 << 7) /* Send DF (Do not Fragment) */
-#define NBD_FLAG_SEND_CACHE (1 << 8) /* Send CACHE (prefetch) */
+#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Multi-client cache consistent */
+#define NBD_FLAG_SEND_RESIZE (1 << 9) /* Send resize */
+#define NBD_FLAG_SEND_CACHE (1 << 10) /* Send CACHE (prefetch) */
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
--
1.8.3.1

View File

@ -1,134 +0,0 @@
From ca570895f9825c8ed6691bb520341ac9e07bac5a Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:52 +0100
Subject: block/linux-aio: acquire AioContext before
qemu_laio_process_completions
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-14-kwolf@redhat.com>
Patchwork-id: 82603
O-Subject: [RHEL-8 qemu-kvm PATCH 23/44] block/linux-aio: acquire AioContext before qemu_laio_process_completions
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Sergio Lopez <slp@redhat.com>
In qemu_laio_process_completions_and_submit, the AioContext is acquired
before the ioq_submit iteration and after qemu_laio_process_completions,
but the latter is not thread safe either.
This change avoids a number of random crashes when the Main Thread and
an IO Thread collide processing completions for the same AioContext.
This is an example of such crash:
- The IO Thread is trying to acquire the AioContext at aio_co_enter,
which evidences that it didn't lock it before:
Thread 3 (Thread 0x7fdfd8bd8700 (LWP 36743)):
#0 0x00007fdfe0dd542d in __lll_lock_wait () at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:135
#1 0x00007fdfe0dd0de6 in _L_lock_870 () at /lib64/libpthread.so.0
#2 0x00007fdfe0dd0cdf in __GI___pthread_mutex_lock (mutex=mutex@entry=0x5631fde0e6c0)
at ../nptl/pthread_mutex_lock.c:114
#3 0x00005631fc0603a7 in qemu_mutex_lock_impl (mutex=0x5631fde0e6c0, file=0x5631fc23520f "util/async.c", line=511) at util/qemu-thread-posix.c:66
#4 0x00005631fc05b558 in aio_co_enter (ctx=0x5631fde0e660, co=0x7fdfcc0c2b40) at util/async.c:493
#5 0x00005631fc05b5ac in aio_co_wake (co=<optimized out>) at util/async.c:478
#6 0x00005631fbfc51ad in qemu_laio_process_completion (laiocb=<optimized out>) at block/linux-aio.c:104
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
at block/linux-aio.c:222
#8 0x00005631fbfc5499 in qemu_laio_process_completions_and_submit (s=0x7fdfc0297670)
at block/linux-aio.c:237
#9 0x00005631fc05d978 in aio_dispatch_handlers (ctx=ctx@entry=0x5631fde0e660) at util/aio-posix.c:406
#10 0x00005631fc05e3ea in aio_poll (ctx=0x5631fde0e660, blocking=blocking@entry=true)
at util/aio-posix.c:693
#11 0x00005631fbd7ad96 in iothread_run (opaque=0x5631fde0e1c0) at iothread.c:64
#12 0x00007fdfe0dcee25 in start_thread (arg=0x7fdfd8bd8700) at pthread_create.c:308
#13 0x00007fdfe0afc34d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:113
- The Main Thread is also processing completions from the same
AioContext, and crashes due to failed assertion at util/iov.c:78:
Thread 1 (Thread 0x7fdfeb5eac80 (LWP 36740)):
#0 0x00007fdfe0a391f7 in __GI_raise (sig=sig@entry=6) at ../nptl/sysdeps/unix/sysv/linux/raise.c:56
#1 0x00007fdfe0a3a8e8 in __GI_abort () at abort.c:90
#2 0x00007fdfe0a32266 in __assert_fail_base (fmt=0x7fdfe0b84e68 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=assertion@entry=0x5631fc238ccb "offset == 0", file=file@entry=0x5631fc23698e "util/iov.c", line=line@entry=78, function=function@entry=0x5631fc236adc <__PRETTY_FUNCTION__.15220> "iov_memset")
at assert.c:92
#3 0x00007fdfe0a32312 in __GI___assert_fail (assertion=assertion@entry=0x5631fc238ccb "offset == 0", file=file@entry=0x5631fc23698e "util/iov.c", line=line@entry=78, function=function@entry=0x5631fc236adc <__PRETTY_FUNCTION__.15220> "iov_memset") at assert.c:101
#4 0x00005631fc065287 in iov_memset (iov=<optimized out>, iov_cnt=<optimized out>, offset=<optimized out>, offset@entry=65536, fillc=fillc@entry=0, bytes=15515191315812405248) at util/iov.c:78
#5 0x00005631fc065a63 in qemu_iovec_memset (qiov=<optimized out>, offset=offset@entry=65536, fillc=fillc@entry=0, bytes=<optimized out>) at util/iov.c:410
#6 0x00005631fbfc5178 in qemu_laio_process_completion (laiocb=0x7fdd920df630) at block/linux-aio.c:88
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
at block/linux-aio.c:222
#8 0x00005631fbfc5499 in qemu_laio_process_completions_and_submit (s=0x7fdfc0297670)
at block/linux-aio.c:237
#9 0x00005631fbfc54ed in qemu_laio_poll_cb (opaque=<optimized out>) at block/linux-aio.c:272
#10 0x00005631fc05d85e in run_poll_handlers_once (ctx=ctx@entry=0x5631fde0e660) at util/aio-posix.c:497
#11 0x00005631fc05e2ca in aio_poll (blocking=false, ctx=0x5631fde0e660) at util/aio-posix.c:574
#12 0x00005631fc05e2ca in aio_poll (ctx=0x5631fde0e660, blocking=blocking@entry=false)
at util/aio-posix.c:604
#13 0x00005631fbfcb8a3 in bdrv_do_drained_begin (ignore_parent=<optimized out>, recursive=<optimized out>, bs=<optimized out>) at block/io.c:273
#14 0x00005631fbfcb8a3 in bdrv_do_drained_begin (bs=0x5631fe8b6200, recursive=<optimized out>, parent=0x0, ignore_bds_parents=<optimized out>, poll=<optimized out>) at block/io.c:390
#15 0x00005631fbfbcd2e in blk_drain (blk=0x5631fe83ac80) at block/block-backend.c:1590
#16 0x00005631fbfbe138 in blk_remove_bs (blk=blk@entry=0x5631fe83ac80) at block/block-backend.c:774
#17 0x00005631fbfbe3d6 in blk_unref (blk=0x5631fe83ac80) at block/block-backend.c:401
#18 0x00005631fbfbe3d6 in blk_unref (blk=0x5631fe83ac80) at block/block-backend.c:449
#19 0x00005631fbfc9a69 in commit_complete (job=0x5631fe8b94b0, opaque=0x7fdfcc1bb080)
at block/commit.c:92
#20 0x00005631fbf7d662 in job_defer_to_main_loop_bh (opaque=0x7fdfcc1b4560) at job.c:973
#21 0x00005631fc05ad41 in aio_bh_poll (bh=0x7fdfcc01ad90) at util/async.c:90
#22 0x00005631fc05ad41 in aio_bh_poll (ctx=ctx@entry=0x5631fddffdb0) at util/async.c:118
#23 0x00005631fc05e210 in aio_dispatch (ctx=0x5631fddffdb0) at util/aio-posix.c:436
#24 0x00005631fc05ac1e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
#25 0x00007fdfeaae44c9 in g_main_context_dispatch (context=0x5631fde00140) at gmain.c:3201
#26 0x00007fdfeaae44c9 in g_main_context_dispatch (context=context@entry=0x5631fde00140) at gmain.c:3854
#27 0x00005631fc05d503 in main_loop_wait () at util/main-loop.c:215
#28 0x00005631fc05d503 in main_loop_wait (timeout=<optimized out>) at util/main-loop.c:238
#29 0x00005631fc05d503 in main_loop_wait (nonblocking=nonblocking@entry=0) at util/main-loop.c:497
#30 0x00005631fbd81412 in main_loop () at vl.c:1866
#31 0x00005631fbc18ff3 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>)
at vl.c:4647
- A closer examination shows that s->io_q.in_flight appears to have
gone backwards:
(gdb) frame 7
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
at block/linux-aio.c:222
222 qemu_laio_process_completion(laiocb);
(gdb) p s
$2 = (LinuxAioState *) 0x7fdfc0297670
(gdb) p *s
$3 = {aio_context = 0x5631fde0e660, ctx = 0x7fdfeb43b000, e = {rfd = 33, wfd = 33}, io_q = {plugged = 0,
in_queue = 0, in_flight = 4294967280, blocked = false, pending = {sqh_first = 0x0,
sqh_last = 0x7fdfc0297698}}, completion_bh = 0x7fdfc0280ef0, event_idx = 21, event_max = 241}
(gdb) p/x s->io_q.in_flight
$4 = 0xfffffff0
Signed-off-by: Sergio Lopez <slp@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit e091f0e905a4481f347913420f327d427f18d9d4)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/linux-aio.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 19eb922..217ce60 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -234,9 +234,9 @@ static void qemu_laio_process_completions(LinuxAioState *s)
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
{
+ aio_context_acquire(s->aio_context);
qemu_laio_process_completions(s);
- aio_context_acquire(s->aio_context);
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
--
1.8.3.1

View File

@ -1,78 +0,0 @@
From faa3d5106cb296858227cc240e045ca16cb28c81 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:53 +0100
Subject: util/async: use qemu_aio_coroutine_enter in co_schedule_bh_cb
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-15-kwolf@redhat.com>
Patchwork-id: 82604
O-Subject: [RHEL-8 qemu-kvm PATCH 24/44] util/async: use qemu_aio_coroutine_enter in co_schedule_bh_cb
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Sergio Lopez <slp@redhat.com>
AIO Coroutines shouldn't by managed by an AioContext different than the
one assigned when they are created. aio_co_enter avoids entering a
coroutine from a different AioContext, calling aio_co_schedule instead.
Scheduled coroutines are then entered by co_schedule_bh_cb using
qemu_coroutine_enter, which just calls qemu_aio_coroutine_enter with the
current AioContext obtained with qemu_get_current_aio_context.
Eventually, co->ctx will be set to the AioContext passed as an argument
to qemu_aio_coroutine_enter.
This means that, if an IO Thread's AioConext is being processed by the
Main Thread (due to aio_poll being called with a BDS AioContext, as it
happens in AIO_WAIT_WHILE among other places), the AioContext from some
coroutines may be wrongly replaced with the one from the Main Thread.
This is the root cause behind some crashes, mainly triggered by the
drain code at block/io.c. The most common are these abort and failed
assertion:
util/async.c:aio_co_schedule
456 if (scheduled) {
457 fprintf(stderr,
458 "%s: Co-routine was already scheduled in '%s'\n",
459 __func__, scheduled);
460 abort();
461 }
util/qemu-coroutine-lock.c:
286 assert(mutex->holder == self);
But it's also known to cause random errors at different locations, and
even SIGSEGV with broken coroutine backtraces.
By using qemu_aio_coroutine_enter directly in co_schedule_bh_cb, we can
pass the correct AioContext as an argument, making sure co->ctx is not
wrongly altered.
Signed-off-by: Sergio Lopez <slp@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 6808ae0417131f8dbe7b051256dff7a16634dc1d)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
util/async.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/util/async.c b/util/async.c
index 05979f8..c10642a 100644
--- a/util/async.c
+++ b/util/async.c
@@ -400,7 +400,7 @@ static void co_schedule_bh_cb(void *opaque)
/* Protected by write barrier in qemu_aio_coroutine_enter */
atomic_set(&co->scheduled, NULL);
- qemu_coroutine_enter(co);
+ qemu_aio_coroutine_enter(ctx, co);
aio_context_release(ctx);
}
}
--
1.8.3.1

View File

@ -1,105 +0,0 @@
From f78998e365809f77ed146ee2afdcf132b12c838c Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:54 +0100
Subject: job: Fix nested aio_poll() hanging in job_txn_apply
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-16-kwolf@redhat.com>
Patchwork-id: 82605
O-Subject: [RHEL-8 qemu-kvm PATCH 25/44] job: Fix nested aio_poll() hanging in job_txn_apply
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
From: Fam Zheng <famz@redhat.com>
All callers have acquired ctx already. Doing that again results in
aio_poll() hang. This fixes the problem that a BDRV_POLL_WHILE() in the
callback cannot make progress because ctx is recursively locked, for
example, when drive-backup finishes.
There are two callers of job_finalize():
fam@lemon:~/work/qemu [master]$ git grep -w -A1 '^\s*job_finalize'
blockdev.c: job_finalize(&job->job, errp);
blockdev.c- aio_context_release(aio_context);
--
job-qmp.c: job_finalize(job, errp);
job-qmp.c- aio_context_release(aio_context);
--
tests/test-blockjob.c: job_finalize(&job->job, &error_abort);
tests/test-blockjob.c- assert(job->job.status == JOB_STATUS_CONCLUDED);
Ignoring the test, it's easy to see both callers to job_finalize (and
job_do_finalize) have acquired the context.
Cc: qemu-stable@nongnu.org
Reported-by: Gu Nini <ngu@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 49880165a44f26dc84651858750facdee31f2513)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
job.c | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)
diff --git a/job.c b/job.c
index bb322de..82b4692 100644
--- a/job.c
+++ b/job.c
@@ -136,21 +136,13 @@ static void job_txn_del_job(Job *job)
}
}
-static int job_txn_apply(JobTxn *txn, int fn(Job *), bool lock)
+static int job_txn_apply(JobTxn *txn, int fn(Job *))
{
- AioContext *ctx;
Job *job, *next;
int rc = 0;
QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
- if (lock) {
- ctx = job->aio_context;
- aio_context_acquire(ctx);
- }
rc = fn(job);
- if (lock) {
- aio_context_release(ctx);
- }
if (rc) {
break;
}
@@ -780,11 +772,11 @@ static void job_do_finalize(Job *job)
assert(job && job->txn);
/* prepare the transaction to complete */
- rc = job_txn_apply(job->txn, job_prepare, true);
+ rc = job_txn_apply(job->txn, job_prepare);
if (rc) {
job_completed_txn_abort(job);
} else {
- job_txn_apply(job->txn, job_finalize_single, true);
+ job_txn_apply(job->txn, job_finalize_single);
}
}
@@ -830,10 +822,10 @@ static void job_completed_txn_success(Job *job)
assert(other_job->ret == 0);
}
- job_txn_apply(txn, job_transition_to_pending, false);
+ job_txn_apply(txn, job_transition_to_pending);
/* If no jobs need manual finalization, automatically do so */
- if (job_txn_apply(txn, job_needs_finalize, false) == 0) {
+ if (job_txn_apply(txn, job_needs_finalize) == 0) {
job_do_finalize(job);
}
}
--
1.8.3.1

View File

@ -1,55 +0,0 @@
From bb58f00a6c09bd1fe9af6dabe9ea173adc406d7b Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:55 +0100
Subject: job: Fix missing locking due to mismerge
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-17-kwolf@redhat.com>
Patchwork-id: 82607
O-Subject: [RHEL-8 qemu-kvm PATCH 26/44] job: Fix missing locking due to mismerge
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
job_completed() had a problem with double locking that was recently
fixed independently by two different commits:
"job: Fix nested aio_poll() hanging in job_txn_apply"
"jobs: add exit shim"
One fix removed the first aio_context_acquire(), the other fix removed
the other one. Now we have a bug again and the code is run without any
locking.
Add it back in one of the places.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
(cherry picked from commit d1756c780b7879fb64e41135feac781d84a1f995)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
job.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/job.c b/job.c
index 82b4692..5c4e84f 100644
--- a/job.c
+++ b/job.c
@@ -847,7 +847,11 @@ static void job_completed(Job *job)
static void job_exit(void *opaque)
{
Job *job = (Job *)opaque;
+ AioContext *ctx = job->aio_context;
+
+ aio_context_acquire(ctx);
job_completed(job);
+ aio_context_release(ctx);
}
/**
--
1.8.3.1

View File

@ -1,161 +0,0 @@
From ac751d8909fa4b734fab48e27c0213df48ffd76b Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:56 +0100
Subject: blockjob: Wake up BDS when job becomes idle
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-18-kwolf@redhat.com>
Patchwork-id: 82610
O-Subject: [RHEL-8 qemu-kvm PATCH 27/44] blockjob: Wake up BDS when job becomes idle
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
In the context of draining a BDS, the .drained_poll callback of block
jobs is called. If this returns true (i.e. there is still some activity
pending), the drain operation may call aio_poll() with blocking=true to
wait for completion.
As soon as the pending activity is completed and the job finally arrives
in a quiescent state (i.e. its coroutine either yields with busy=false
or terminates), the block job must notify the aio_poll() loop to wake
up, otherwise we get a deadlock if both are running in different
threads.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 34dc97b9a0e592bc466bdb0bbfe45d77304a72b6)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockjob.c | 18 ++++++++++++++++++
include/block/blockjob.h | 13 +++++++++++++
include/qemu/job.h | 3 +++
job.c | 7 +++++++
4 files changed, 41 insertions(+)
diff --git a/blockjob.c b/blockjob.c
index be5903a..8d27e8e 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -221,6 +221,22 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
return 0;
}
+void block_job_wakeup_all_bdrv(BlockJob *job)
+{
+ GSList *l;
+
+ for (l = job->nodes; l; l = l->next) {
+ BdrvChild *c = l->data;
+ bdrv_wakeup(c->bs);
+ }
+}
+
+static void block_job_on_idle(Notifier *n, void *opaque)
+{
+ BlockJob *job = opaque;
+ block_job_wakeup_all_bdrv(job);
+}
+
bool block_job_is_internal(BlockJob *job)
{
return (job->job.id == NULL);
@@ -419,6 +435,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
job->finalize_completed_notifier.notify = block_job_event_completed;
job->pending_notifier.notify = block_job_event_pending;
job->ready_notifier.notify = block_job_event_ready;
+ job->idle_notifier.notify = block_job_on_idle;
notifier_list_add(&job->job.on_finalize_cancelled,
&job->finalize_cancelled_notifier);
@@ -426,6 +443,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
&job->finalize_completed_notifier);
notifier_list_add(&job->job.on_pending, &job->pending_notifier);
notifier_list_add(&job->job.on_ready, &job->ready_notifier);
+ notifier_list_add(&job->job.on_idle, &job->idle_notifier);
error_setg(&job->blocker, "block device is in use by block job: %s",
job_type_str(&job->job));
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 32c00b7..2290bbb 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -70,6 +70,9 @@ typedef struct BlockJob {
/** Called when the job transitions to READY */
Notifier ready_notifier;
+ /** Called when the job coroutine yields or terminates */
+ Notifier idle_notifier;
+
/** BlockDriverStates that are involved in this block job */
GSList *nodes;
} BlockJob;
@@ -119,6 +122,16 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
void block_job_remove_all_bdrv(BlockJob *job);
/**
+ * block_job_wakeup_all_bdrv:
+ * @job: The block job
+ *
+ * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
+ * job. This function is to be called whenever child_job_drained_poll() would
+ * go from true to false to notify waiting drain requests.
+ */
+void block_job_wakeup_all_bdrv(BlockJob *job);
+
+/**
* block_job_set_speed:
* @job: The job to set the speed for.
* @speed: The new value
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 5cb0681..b4a784d 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -156,6 +156,9 @@ typedef struct Job {
/** Notifiers called when the job transitions to READY */
NotifierList on_ready;
+ /** Notifiers called when the job coroutine yields or terminates */
+ NotifierList on_idle;
+
/** Element of the list of jobs */
QLIST_ENTRY(Job) job_list;
diff --git a/job.c b/job.c
index 5c4e84f..48a767c 100644
--- a/job.c
+++ b/job.c
@@ -402,6 +402,11 @@ static void job_event_ready(Job *job)
notifier_list_notify(&job->on_ready, job);
}
+static void job_event_idle(Job *job)
+{
+ notifier_list_notify(&job->on_idle, job);
+}
+
void job_enter_cond(Job *job, bool(*fn)(Job *job))
{
if (!job_started(job)) {
@@ -447,6 +452,7 @@ static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
timer_mod(&job->sleep_timer, ns);
}
job->busy = false;
+ job_event_idle(job);
job_unlock();
qemu_coroutine_yield();
@@ -865,6 +871,7 @@ static void coroutine_fn job_co_entry(void *opaque)
assert(job && job->driver && job->driver->run);
job_pause_point(job);
job->ret = job->driver->run(job, &job->err);
+ job_event_idle(job);
job->deferred_to_main_loop = true;
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
}
--
1.8.3.1

View File

@ -1,64 +0,0 @@
From 0e651f939d3fd65071a8edc8090a777bdb45b921 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:57 +0100
Subject: aio-wait: Increase num_waiters even in home thread
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-19-kwolf@redhat.com>
Patchwork-id: 82609
O-Subject: [RHEL-8 qemu-kvm PATCH 28/44] aio-wait: Increase num_waiters even in home thread
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Even if AIO_WAIT_WHILE() is called in the home context of the
AioContext, we still want to allow the condition to change depending on
other threads as long as they kick the AioWait. Specfically block jobs
can be running in an I/O thread and should then be able to kick a drain
in the main loop context.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit 486574483aba988c83b20e7d3f1ccd50c4c333d8)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/block/aio-wait.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index c85a62f..600fad1 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -76,6 +76,8 @@ typedef struct {
bool waited_ = false; \
AioWait *wait_ = (wait); \
AioContext *ctx_ = (ctx); \
+ /* Increment wait_->num_waiters before evaluating cond. */ \
+ atomic_inc(&wait_->num_waiters); \
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
while ((cond)) { \
aio_poll(ctx_, true); \
@@ -84,8 +86,6 @@ typedef struct {
} else { \
assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \
- /* Increment wait_->num_waiters before evaluating cond. */ \
- atomic_inc(&wait_->num_waiters); \
while ((cond)) { \
if (ctx_) { \
aio_context_release(ctx_); \
@@ -96,8 +96,8 @@ typedef struct {
} \
waited_ = true; \
} \
- atomic_dec(&wait_->num_waiters); \
} \
+ atomic_dec(&wait_->num_waiters); \
waited_; })
/**
--
1.8.3.1

View File

@ -1,208 +0,0 @@
From 6d374393478f0d57ec8cd338342687d043565662 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:58 +0100
Subject: test-bdrv-drain: Drain with block jobs in an I/O thread
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-20-kwolf@redhat.com>
Patchwork-id: 82608
O-Subject: [RHEL-8 qemu-kvm PATCH 29/44] test-bdrv-drain: Drain with block jobs in an I/O thread
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
This extends the existing drain test with a block job to include
variants where the block job runs in a different AioContext.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit f62c172959cd2b6de4dd8ba782e855d64d94764b)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 92 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 86 insertions(+), 6 deletions(-)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 9bcb3c7..3cf3ba3 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -174,6 +174,28 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
}
}
+static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
+{
+ if (drain_type != BDRV_DRAIN_ALL) {
+ aio_context_acquire(bdrv_get_aio_context(bs));
+ }
+ do_drain_begin(drain_type, bs);
+ if (drain_type != BDRV_DRAIN_ALL) {
+ aio_context_release(bdrv_get_aio_context(bs));
+ }
+}
+
+static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
+{
+ if (drain_type != BDRV_DRAIN_ALL) {
+ aio_context_acquire(bdrv_get_aio_context(bs));
+ }
+ do_drain_end(drain_type, bs);
+ if (drain_type != BDRV_DRAIN_ALL) {
+ aio_context_release(bdrv_get_aio_context(bs));
+ }
+}
+
static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
{
BlockBackend *blk;
@@ -785,11 +807,13 @@ BlockJobDriver test_job_driver = {
},
};
-static void test_blockjob_common(enum drain_type drain_type)
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
{
BlockBackend *blk_src, *blk_target;
BlockDriverState *src, *target;
BlockJob *job;
+ IOThread *iothread = NULL;
+ AioContext *ctx;
int ret;
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
@@ -797,21 +821,31 @@ static void test_blockjob_common(enum drain_type drain_type)
blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk_src, src, &error_abort);
+ if (use_iothread) {
+ iothread = iothread_new();
+ ctx = iothread_get_aio_context(iothread);
+ blk_set_aio_context(blk_src, ctx);
+ } else {
+ ctx = qemu_get_aio_context();
+ }
+
target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
&error_abort);
blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk_target, target, &error_abort);
+ aio_context_acquire(ctx);
job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
0, 0, NULL, NULL, &error_abort);
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
job_start(&job->job);
+ aio_context_release(ctx);
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
- do_drain_begin(drain_type, src);
+ do_drain_begin_unlocked(drain_type, src);
if (drain_type == BDRV_DRAIN_ALL) {
/* bdrv_drain_all() drains both src and target */
@@ -822,7 +856,14 @@ static void test_blockjob_common(enum drain_type drain_type)
g_assert_true(job->job.paused);
g_assert_false(job->job.busy); /* The job is paused */
- do_drain_end(drain_type, src);
+ do_drain_end_unlocked(drain_type, src);
+
+ if (use_iothread) {
+ /* paused is reset in the I/O thread, wait for it */
+ while (job->job.paused) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+ }
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
@@ -841,32 +882,64 @@ static void test_blockjob_common(enum drain_type drain_type)
do_drain_end(drain_type, target);
+ if (use_iothread) {
+ /* paused is reset in the I/O thread, wait for it */
+ while (job->job.paused) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+ }
+
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
+ aio_context_acquire(ctx);
ret = job_complete_sync(&job->job, &error_abort);
g_assert_cmpint(ret, ==, 0);
+ if (use_iothread) {
+ blk_set_aio_context(blk_src, qemu_get_aio_context());
+ }
+ aio_context_release(ctx);
+
blk_unref(blk_src);
blk_unref(blk_target);
bdrv_unref(src);
bdrv_unref(target);
+
+ if (iothread) {
+ iothread_join(iothread);
+ }
}
static void test_blockjob_drain_all(void)
{
- test_blockjob_common(BDRV_DRAIN_ALL);
+ test_blockjob_common(BDRV_DRAIN_ALL, false);
}
static void test_blockjob_drain(void)
{
- test_blockjob_common(BDRV_DRAIN);
+ test_blockjob_common(BDRV_DRAIN, false);
}
static void test_blockjob_drain_subtree(void)
{
- test_blockjob_common(BDRV_SUBTREE_DRAIN);
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false);
+}
+
+static void test_blockjob_iothread_drain_all(void)
+{
+ test_blockjob_common(BDRV_DRAIN_ALL, true);
+}
+
+static void test_blockjob_iothread_drain(void)
+{
+ test_blockjob_common(BDRV_DRAIN, true);
+}
+
+static void test_blockjob_iothread_drain_subtree(void)
+{
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true);
}
@@ -1337,6 +1410,13 @@ int main(int argc, char **argv)
g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
test_blockjob_drain_subtree);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
+ test_blockjob_iothread_drain_all);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
+ test_blockjob_iothread_drain);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
+ test_blockjob_iothread_drain_subtree);
+
g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
--
1.8.3.1

View File

@ -1,86 +0,0 @@
From 99172abebcedfb48ca06d4c1bd0cd16372449600 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:21:59 +0100
Subject: test-blockjob: Acquire AioContext around job_cancel_sync()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-21-kwolf@redhat.com>
Patchwork-id: 82606
O-Subject: [RHEL-8 qemu-kvm PATCH 30/44] test-blockjob: Acquire AioContext around job_cancel_sync()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
All callers in QEMU proper hold the AioContext lock when calling
job_finish_sync(). test-blockjob should do the same when it calls the
function indirectly through job_cancel_sync().
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit 30c070a547322a5e41ce129d540bca3653b1a9c8)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
include/qemu/job.h | 6 ++++++
tests/test-blockjob.c | 6 ++++++
2 files changed, 12 insertions(+)
diff --git a/include/qemu/job.h b/include/qemu/job.h
index b4a784d..63c60ef 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -524,6 +524,8 @@ void job_user_cancel(Job *job, bool force, Error **errp);
*
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
+ *
+ * Callers must hold the AioContext lock of job->aio_context.
*/
int job_cancel_sync(Job *job);
@@ -541,6 +543,8 @@ void job_cancel_sync_all(void);
* function).
*
* Returns the return value from the job.
+ *
+ * Callers must hold the AioContext lock of job->aio_context.
*/
int job_complete_sync(Job *job, Error **errp);
@@ -566,6 +570,8 @@ void job_dismiss(Job **job, Error **errp);
*
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
* cancelled before completing, and -errno in other error cases.
+ *
+ * Callers must hold the AioContext lock of job->aio_context.
*/
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
index de4c1c2..652d1e8 100644
--- a/tests/test-blockjob.c
+++ b/tests/test-blockjob.c
@@ -223,6 +223,10 @@ static void cancel_common(CancelJob *s)
BlockJob *job = &s->common;
BlockBackend *blk = s->blk;
JobStatus sts = job->job.status;
+ AioContext *ctx;
+
+ ctx = job->job.aio_context;
+ aio_context_acquire(ctx);
job_cancel_sync(&job->job);
if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
@@ -232,6 +236,8 @@ static void cancel_common(CancelJob *s)
assert(job->job.status == JOB_STATUS_NULL);
job_unref(&job->job);
destroy_blk(blk);
+
+ aio_context_release(ctx);
}
static void test_cancel_created(void)
--
1.8.3.1

View File

@ -1,77 +0,0 @@
From 3f3282c8ffa29e3dbcf58618beefb36afe8ba79b Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:00 +0100
Subject: job: Use AIO_WAIT_WHILE() in job_finish_sync()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-22-kwolf@redhat.com>
Patchwork-id: 82612
O-Subject: [RHEL-8 qemu-kvm PATCH 31/44] job: Use AIO_WAIT_WHILE() in job_finish_sync()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
job_finish_sync() needs to release the AioContext lock of the job before
calling aio_poll(). Otherwise, callbacks called by aio_poll() would
possibly take the lock a second time and run into a deadlock with a
nested AIO_WAIT_WHILE() call.
Also, job_drain() without aio_poll() isn't necessarily enough to make
progress on a job, it could depend on bottom halves to be executed.
Combine both open-coded while loops into a single AIO_WAIT_WHILE() call
that solves both of these problems.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit de0fbe64806321fc3e6399bfab360553db87a41d)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
job.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/job.c b/job.c
index 48a767c..fa74558 100644
--- a/job.c
+++ b/job.c
@@ -29,6 +29,7 @@
#include "qemu/job.h"
#include "qemu/id.h"
#include "qemu/main-loop.h"
+#include "block/aio-wait.h"
#include "trace-root.h"
#include "qapi/qapi-events-job.h"
@@ -962,6 +963,7 @@ void job_complete(Job *job, Error **errp)
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
{
Error *local_err = NULL;
+ AioWait dummy_wait = {};
int ret;
job_ref(job);
@@ -974,14 +976,10 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
job_unref(job);
return -EBUSY;
}
- /* job_drain calls job_enter, and it should be enough to induce progress
- * until the job completes or moves to the main thread. */
- while (!job->deferred_to_main_loop && !job_is_completed(job)) {
- job_drain(job);
- }
- while (!job_is_completed(job)) {
- aio_poll(qemu_get_aio_context(), true);
- }
+
+ AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
+ (job_drain(job), !job_is_completed(job)));
+
ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
job_unref(job);
return ret;
--
1.8.3.1

View File

@ -1,59 +0,0 @@
From b9c555343b6567159effe1b3eb736fd1e02257bd Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:01 +0100
Subject: test-bdrv-drain: Test AIO_WAIT_WHILE() in completion callback
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-23-kwolf@redhat.com>
Patchwork-id: 82611
O-Subject: [RHEL-8 qemu-kvm PATCH 32/44] test-bdrv-drain: Test AIO_WAIT_WHILE() in completion callback
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
This is a regression test for a deadlock that occurred in block job
completion callbacks (via job_defer_to_main_loop) because the AioContext
lock was taken twice: once in job_finish_sync() and then again in
job_defer_to_main_loop_bh(). This would cause AIO_WAIT_WHILE() to hang.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit ae23dde9dd486e57e152a0ebc9802caddedc45fc)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 3cf3ba3..05f3b55 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -774,6 +774,15 @@ typedef struct TestBlockJob {
bool should_complete;
} TestBlockJob;
+static int test_job_prepare(Job *job)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
+ blk_flush(s->common.blk);
+ return 0;
+}
+
static int coroutine_fn test_job_run(Job *job, Error **errp)
{
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
@@ -804,6 +813,7 @@ BlockJobDriver test_job_driver = {
.drain = block_job_drain,
.run = test_job_run,
.complete = test_job_complete,
+ .prepare = test_job_prepare,
},
};
--
1.8.3.1

View File

@ -1,96 +0,0 @@
From 51c1069568d78941554c70f9084531c279899c83 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:02 +0100
Subject: block: Add missing locking in bdrv_co_drain_bh_cb()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-24-kwolf@redhat.com>
Patchwork-id: 82613
O-Subject: [RHEL-8 qemu-kvm PATCH 33/44] block: Add missing locking in bdrv_co_drain_bh_cb()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
bdrv_do_drained_begin/end() assume that they are called with the
AioContext lock of bs held. If we call drain functions from a coroutine
with the AioContext lock held, we yield and schedule a BH to move out of
coroutine context. This means that the lock for the home context of the
coroutine is released and must be re-acquired in the bottom half.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit aa1361d54aac43094b98024b8b6c804eb6e41661)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/io.c | 15 +++++++++++++++
include/qemu/coroutine.h | 5 +++++
util/qemu-coroutine.c | 5 +++++
3 files changed, 25 insertions(+)
diff --git a/block/io.c b/block/io.c
index 7100344..914ba78 100644
--- a/block/io.c
+++ b/block/io.c
@@ -288,6 +288,18 @@ static void bdrv_co_drain_bh_cb(void *opaque)
BlockDriverState *bs = data->bs;
if (bs) {
+ AioContext *ctx = bdrv_get_aio_context(bs);
+ AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
+
+ /*
+ * When the coroutine yielded, the lock for its home context was
+ * released, so we need to re-acquire it here. If it explicitly
+ * acquired a different context, the lock is still held and we don't
+ * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
+ */
+ if (ctx == co_ctx) {
+ aio_context_acquire(ctx);
+ }
bdrv_dec_in_flight(bs);
if (data->begin) {
bdrv_do_drained_begin(bs, data->recursive, data->parent,
@@ -296,6 +308,9 @@ static void bdrv_co_drain_bh_cb(void *opaque)
bdrv_do_drained_end(bs, data->recursive, data->parent,
data->ignore_bds_parents);
}
+ if (ctx == co_ctx) {
+ aio_context_release(ctx);
+ }
} else {
assert(data->begin);
bdrv_drain_all_begin();
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index 6f8a487..9801e7f 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -90,6 +90,11 @@ void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
void coroutine_fn qemu_coroutine_yield(void);
/**
+ * Get the AioContext of the given coroutine
+ */
+AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
+
+/**
* Get the currently executing coroutine
*/
Coroutine *coroutine_fn qemu_coroutine_self(void);
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
index 1ba4191..2295928 100644
--- a/util/qemu-coroutine.c
+++ b/util/qemu-coroutine.c
@@ -198,3 +198,8 @@ bool qemu_coroutine_entered(Coroutine *co)
{
return co->caller;
}
+
+AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
+{
+ return co->ctx;
+}
--
1.8.3.1

View File

@ -1,66 +0,0 @@
From ea3026a59a3772f84697af9b62b6272cfb41f40c Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:03 +0100
Subject: block-backend: Add .drained_poll callback
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-25-kwolf@redhat.com>
Patchwork-id: 82614
O-Subject: [RHEL-8 qemu-kvm PATCH 34/44] block-backend: Add .drained_poll callback
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
A bdrv_drain operation must ensure that all parents are quiesced, this
includes BlockBackends. Otherwise, callbacks called by requests that are
completed on the BDS layer, but not quite yet on the BlockBackend layer
could still create new requests.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit fe5258a503a87e69be37c9ac48799e293809386e)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/block-backend.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/block/block-backend.c b/block/block-backend.c
index f2f75a9..2b837d1 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -121,6 +121,7 @@ static void blk_root_inherit_options(int *child_flags, QDict *child_options,
abort();
}
static void blk_root_drained_begin(BdrvChild *child);
+static bool blk_root_drained_poll(BdrvChild *child);
static void blk_root_drained_end(BdrvChild *child);
static void blk_root_change_media(BdrvChild *child, bool load);
@@ -294,6 +295,7 @@ static const BdrvChildRole child_root = {
.get_parent_desc = blk_root_get_parent_desc,
.drained_begin = blk_root_drained_begin,
+ .drained_poll = blk_root_drained_poll,
.drained_end = blk_root_drained_end,
.activate = blk_root_activate,
@@ -2192,6 +2194,13 @@ static void blk_root_drained_begin(BdrvChild *child)
}
}
+static bool blk_root_drained_poll(BdrvChild *child)
+{
+ BlockBackend *blk = child->opaque;
+ assert(blk->quiesce_counter);
+ return !!blk->in_flight;
+}
+
static void blk_root_drained_end(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
--
1.8.3.1

View File

@ -1,67 +0,0 @@
From 21a2ef76c6aa33f0058d149b1bfdde1d27ba1df4 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:04 +0100
Subject: block-backend: Fix potential double blk_delete()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-26-kwolf@redhat.com>
Patchwork-id: 82615
O-Subject: [RHEL-8 qemu-kvm PATCH 35/44] block-backend: Fix potential double blk_delete()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
blk_unref() first decreases the refcount of the BlockBackend and calls
blk_delete() if the refcount reaches zero. Requests can still be in
flight at this point, they are only drained during blk_delete():
At this point, arbitrary callbacks can run. If any callback takes a
temporary BlockBackend reference, it will first increase the refcount to
1 and then decrease it to 0 again, triggering another blk_delete(). This
will cause a use-after-free crash in the outer blk_delete().
Fix it by draining the BlockBackend before decreasing to refcount to 0.
Assert in blk_ref() that it never takes the first refcount (which would
mean that the BlockBackend is already being deleted).
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 5ca9d21bd1c8eeb578d0964e31bd03d47c25773d)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/block-backend.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 2b837d1..94046f0 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -436,6 +436,7 @@ int blk_get_refcnt(BlockBackend *blk)
*/
void blk_ref(BlockBackend *blk)
{
+ assert(blk->refcnt > 0);
blk->refcnt++;
}
@@ -448,7 +449,13 @@ void blk_unref(BlockBackend *blk)
{
if (blk) {
assert(blk->refcnt > 0);
- if (!--blk->refcnt) {
+ if (blk->refcnt > 1) {
+ blk->refcnt--;
+ } else {
+ blk_drain(blk);
+ /* blk_drain() cannot resurrect blk, nobody held a reference */
+ assert(blk->refcnt == 1);
+ blk->refcnt = 0;
blk_delete(blk);
}
}
--
1.8.3.1

View File

@ -1,74 +0,0 @@
From 91ae719381f75ed3554b0c5e1d8bf58583a9208f Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:05 +0100
Subject: block-backend: Decrease in_flight only after callback
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-27-kwolf@redhat.com>
Patchwork-id: 82617
O-Subject: [RHEL-8 qemu-kvm PATCH 36/44] block-backend: Decrease in_flight only after callback
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Request callbacks can do pretty much anything, including operations that
will yield from the coroutine (such as draining the backend). In that
case, a decreased in_flight would be visible to other code and could
lead to a drain completing while the callback hasn't actually completed
yet.
Note that reordering these operations forbids calling drain directly
inside an AIO callback. As Paolo explains, indirectly calling it is
okay:
- Calling it through a coroutine is okay, because then
bdrv_drained_begin() goes through bdrv_co_yield_to_drain() and you
have in_flight=2 when bdrv_co_yield_to_drain() yields, then soon
in_flight=1 when the aio_co_wake() in the AIO callback completes, then
in_flight=0 after the bottom half starts.
- Calling it through a bottom half would be okay too, as long as the AIO
callback remembers to do inc_in_flight/dec_in_flight just like
bdrv_co_yield_to_drain() and bdrv_co_drain_bh_cb() do
A few more important cases that come to mind:
- A coroutine that yields because of I/O is okay, with a sequence
similar to bdrv_co_yield_to_drain().
- A coroutine that yields with no I/O pending will correctly decrease
in_flight to zero before yielding.
- Calling more AIO from the callback won't overflow the counter just
because of mutual recursion, because AIO functions always yield at
least once before invoking the callback.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 46aaf2a566e364a62315219255099cbf1c9b990d)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/block-backend.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 94046f0..9a3e060 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1341,8 +1341,8 @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
if (acb->has_returned) {
- blk_dec_in_flight(acb->rwco.blk);
acb->common.cb(acb->common.opaque, acb->rwco.ret);
+ blk_dec_in_flight(acb->rwco.blk);
qemu_aio_unref(acb);
}
}
--
1.8.3.1

View File

@ -1,104 +0,0 @@
From bc17446b1e7c9578a3e3079173891c93998dfa00 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:06 +0100
Subject: blockjob: Lie better in child_job_drained_poll()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-28-kwolf@redhat.com>
Patchwork-id: 82616
O-Subject: [RHEL-8 qemu-kvm PATCH 37/44] blockjob: Lie better in child_job_drained_poll()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Block jobs claim in .drained_poll() that they are in a quiescent state
as soon as job->deferred_to_main_loop is true. This is obviously wrong,
they still have a completion BH to run. We only get away with this
because commit 91af091f923 added an unconditional aio_poll(false) to the
drain functions, but this is bypassing the regular drain mechanisms.
However, just removing this and telling that the job is still active
doesn't work either: The completion callbacks themselves call drain
functions (directly, or indirectly with bdrv_reopen), so they would
deadlock then.
As a better lie, tell that the job is active as long as the BH is
pending, but falsely call it quiescent from the point in the BH when the
completion callback is called. At this point, nested drain calls won't
deadlock because they ignore the job, and outer drains will wait for the
job to really reach a quiescent state because the callback is already
running.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit b5a7a0573530698ee448b063ac01d485e30446bd)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
blockjob.c | 2 +-
include/qemu/job.h | 3 +++
job.c | 11 ++++++++++-
3 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/blockjob.c b/blockjob.c
index 8d27e8e..617d86f 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -164,7 +164,7 @@ static bool child_job_drained_poll(BdrvChild *c)
/* An inactive or completed job doesn't have any pending requests. Jobs
* with !job->busy are either already paused or have a pause point after
* being reentered, so no job driver code will run before they pause. */
- if (!job->busy || job_is_completed(job) || job->deferred_to_main_loop) {
+ if (!job->busy || job_is_completed(job)) {
return false;
}
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 63c60ef..9e7cd1e 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -76,6 +76,9 @@ typedef struct Job {
* Set to false by the job while the coroutine has yielded and may be
* re-entered by job_enter(). There may still be I/O or event loop activity
* pending. Accessed under block_job_mutex (in blockjob.c).
+ *
+ * When the job is deferred to the main loop, busy is true as long as the
+ * bottom half is still pending.
*/
bool busy;
diff --git a/job.c b/job.c
index fa74558..00a1cd1 100644
--- a/job.c
+++ b/job.c
@@ -857,7 +857,16 @@ static void job_exit(void *opaque)
AioContext *ctx = job->aio_context;
aio_context_acquire(ctx);
+
+ /* This is a lie, we're not quiescent, but still doing the completion
+ * callbacks. However, completion callbacks tend to involve operations that
+ * drain block nodes, and if .drained_poll still returned true, we would
+ * deadlock. */
+ job->busy = false;
+ job_event_idle(job);
+
job_completed(job);
+
aio_context_release(ctx);
}
@@ -872,8 +881,8 @@ static void coroutine_fn job_co_entry(void *opaque)
assert(job && job->driver && job->driver->run);
job_pause_point(job);
job->ret = job->driver->run(job, &job->err);
- job_event_idle(job);
job->deferred_to_main_loop = true;
+ job->busy = true;
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
}
--
1.8.3.1

View File

@ -1,64 +0,0 @@
From ce7a9c21d6a43b736d5aa2041acbd5d1edca0070 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:07 +0100
Subject: block: Remove aio_poll() in bdrv_drain_poll variants
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-29-kwolf@redhat.com>
Patchwork-id: 82619
O-Subject: [RHEL-8 qemu-kvm PATCH 38/44] block: Remove aio_poll() in bdrv_drain_poll variants
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
bdrv_drain_poll_top_level() was buggy because it didn't release the
AioContext lock of the node to be drained before calling aio_poll().
This way, callbacks called by aio_poll() would possibly take the lock a
second time and run into a deadlock with a nested AIO_WAIT_WHILE() call.
However, it turns out that the aio_poll() call isn't actually needed any
more. It was introduced in commit 91af091f923, which is effectively
reverted by this patch. The cases it was supposed to fix are now covered
by bdrv_drain_poll(), which waits for block jobs to reach a quiescent
state.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 4cf077b59fc73eec29f8b7d082919dbb278bdc86)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block/io.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/block/io.c b/block/io.c
index 914ba78..8b81ff3 100644
--- a/block/io.c
+++ b/block/io.c
@@ -268,10 +268,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
BdrvChild *ignore_parent)
{
- /* Execute pending BHs first and check everything else only after the BHs
- * have executed. */
- while (aio_poll(bs->aio_context, false));
-
return bdrv_drain_poll(bs, recursive, ignore_parent, false);
}
@@ -511,10 +507,6 @@ static bool bdrv_drain_all_poll(void)
BlockDriverState *bs = NULL;
bool result = false;
- /* Execute pending BHs first (may modify the graph) and check everything
- * else only after the BHs have executed. */
- while (aio_poll(qemu_get_aio_context(), false));
-
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
while ((bs = bdrv_next_all_states(bs))) {
--
1.8.3.1

View File

@ -1,63 +0,0 @@
From 6c315602205e494dd084a4692a06c16b0e233875 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:08 +0100
Subject: test-bdrv-drain: Test nested poll in bdrv_drain_poll_top_level()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-30-kwolf@redhat.com>
Patchwork-id: 82618
O-Subject: [RHEL-8 qemu-kvm PATCH 39/44] test-bdrv-drain: Test nested poll in bdrv_drain_poll_top_level()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
This is a regression test for a deadlock that could occur in callbacks
called from the aio_poll() in bdrv_drain_poll_top_level(). The
AioContext lock wasn't released and therefore would be taken a second
time in the callback. This would cause a possible AIO_WAIT_WHILE() in
the callback to hang.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
(cherry picked from commit ecc1a5c790cf2c7732cb9755ca388c2fe108d1a1)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 05f3b55..f4b57f7 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -636,6 +636,17 @@ static void test_iothread_aio_cb(void *opaque, int ret)
qemu_event_set(&done_event);
}
+static void test_iothread_main_thread_bh(void *opaque)
+{
+ struct test_iothread_data *data = opaque;
+
+ /* Test that the AioContext is not yet locked in a random BH that is
+ * executed during drain, otherwise this would deadlock. */
+ aio_context_acquire(bdrv_get_aio_context(data->bs));
+ bdrv_flush(data->bs);
+ aio_context_release(bdrv_get_aio_context(data->bs));
+}
+
/*
* Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
* The request involves a BH on iothread 2 before it can complete.
@@ -705,6 +716,8 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
aio_context_acquire(ctx_a);
}
+ aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
+
/* The request is running on the IOThread a. Draining its block device
* will make sure that it has completed as far as the BDS is concerned,
* but the drain in this thread can continue immediately after
--
1.8.3.1

View File

@ -1,85 +0,0 @@
From 287d4267dcb2d5f262dba7f6e7f35dcd294b622a Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:09 +0100
Subject: job: Avoid deadlocks in job_completed_txn_abort()
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-31-kwolf@redhat.com>
Patchwork-id: 82622
O-Subject: [RHEL-8 qemu-kvm PATCH 40/44] job: Avoid deadlocks in job_completed_txn_abort()
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Amongst others, job_finalize_single() calls the .prepare/.commit/.abort
callbacks of the individual job driver. Recently, their use was adapted
for all block jobs so that they involve code calling AIO_WAIT_WHILE()
now. Such code must be called under the AioContext lock for the
respective job, but without holding any other AioContext lock.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit 644f3a29bd4974aefd46d2adb5062d86063c8a50)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
job.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/job.c b/job.c
index 00a1cd1..0b02186 100644
--- a/job.c
+++ b/job.c
@@ -718,6 +718,7 @@ static void job_cancel_async(Job *job, bool force)
static void job_completed_txn_abort(Job *job)
{
+ AioContext *outer_ctx = job->aio_context;
AioContext *ctx;
JobTxn *txn = job->txn;
Job *other_job;
@@ -731,23 +732,26 @@ static void job_completed_txn_abort(Job *job)
txn->aborting = true;
job_txn_ref(txn);
- /* We are the first failed job. Cancel other jobs. */
- QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
- ctx = other_job->aio_context;
- aio_context_acquire(ctx);
- }
+ /* We can only hold the single job's AioContext lock while calling
+ * job_finalize_single() because the finalization callbacks can involve
+ * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */
+ aio_context_release(outer_ctx);
/* Other jobs are effectively cancelled by us, set the status for
* them; this job, however, may or may not be cancelled, depending
* on the caller, so leave it. */
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
if (other_job != job) {
+ ctx = other_job->aio_context;
+ aio_context_acquire(ctx);
job_cancel_async(other_job, false);
+ aio_context_release(ctx);
}
}
while (!QLIST_EMPTY(&txn->jobs)) {
other_job = QLIST_FIRST(&txn->jobs);
ctx = other_job->aio_context;
+ aio_context_acquire(ctx);
if (!job_is_completed(other_job)) {
assert(job_is_cancelled(other_job));
job_finish_sync(other_job, NULL, NULL);
@@ -756,6 +760,8 @@ static void job_completed_txn_abort(Job *job)
aio_context_release(ctx);
}
+ aio_context_acquire(outer_ctx);
+
job_txn_unref(txn);
}
--
1.8.3.1

View File

@ -1,241 +0,0 @@
From 10fbd3c89739a1879f47f2a2256831ce5e1ae7ad Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:10 +0100
Subject: test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-32-kwolf@redhat.com>
Patchwork-id: 82620
O-Subject: [RHEL-8 qemu-kvm PATCH 41/44] test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
This adds tests for calling AIO_WAIT_WHILE() in the .commit and .abort
callbacks. Both reasons why .abort could be called for a single job are
tested: Either .run or .prepare could return an error.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit d49725af46a7710cde02cc120b7f1e485154b483)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 116 +++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 104 insertions(+), 12 deletions(-)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index f4b57f7..d6202b2 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -784,6 +784,8 @@ static void test_iothread_drain_subtree(void)
typedef struct TestBlockJob {
BlockJob common;
+ int run_ret;
+ int prepare_ret;
bool should_complete;
} TestBlockJob;
@@ -793,7 +795,23 @@ static int test_job_prepare(Job *job)
/* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
blk_flush(s->common.blk);
- return 0;
+ return s->prepare_ret;
+}
+
+static void test_job_commit(Job *job)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
+ blk_flush(s->common.blk);
+}
+
+static void test_job_abort(Job *job)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
+ blk_flush(s->common.blk);
}
static int coroutine_fn test_job_run(Job *job, Error **errp)
@@ -809,7 +827,7 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
job_pause_point(&s->common.job);
}
- return 0;
+ return s->run_ret;
}
static void test_job_complete(Job *job, Error **errp)
@@ -827,14 +845,24 @@ BlockJobDriver test_job_driver = {
.run = test_job_run,
.complete = test_job_complete,
.prepare = test_job_prepare,
+ .commit = test_job_commit,
+ .abort = test_job_abort,
},
};
-static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
+enum test_job_result {
+ TEST_JOB_SUCCESS,
+ TEST_JOB_FAIL_RUN,
+ TEST_JOB_FAIL_PREPARE,
+};
+
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
+ enum test_job_result result)
{
BlockBackend *blk_src, *blk_target;
BlockDriverState *src, *target;
BlockJob *job;
+ TestBlockJob *tjob;
IOThread *iothread = NULL;
AioContext *ctx;
int ret;
@@ -858,9 +886,23 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
blk_insert_bs(blk_target, target, &error_abort);
aio_context_acquire(ctx);
- job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
- 0, 0, NULL, NULL, &error_abort);
+ tjob = block_job_create("job0", &test_job_driver, NULL, src,
+ 0, BLK_PERM_ALL,
+ 0, 0, NULL, NULL, &error_abort);
+ job = &tjob->common;
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
+
+ switch (result) {
+ case TEST_JOB_SUCCESS:
+ break;
+ case TEST_JOB_FAIL_RUN:
+ tjob->run_ret = -EIO;
+ break;
+ case TEST_JOB_FAIL_PREPARE:
+ tjob->prepare_ret = -EIO;
+ break;
+ }
+
job_start(&job->job);
aio_context_release(ctx);
@@ -918,7 +960,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
aio_context_acquire(ctx);
ret = job_complete_sync(&job->job, &error_abort);
- g_assert_cmpint(ret, ==, 0);
+ g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
if (use_iothread) {
blk_set_aio_context(blk_src, qemu_get_aio_context());
@@ -937,32 +979,68 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
static void test_blockjob_drain_all(void)
{
- test_blockjob_common(BDRV_DRAIN_ALL, false);
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
}
static void test_blockjob_drain(void)
{
- test_blockjob_common(BDRV_DRAIN, false);
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
}
static void test_blockjob_drain_subtree(void)
{
- test_blockjob_common(BDRV_SUBTREE_DRAIN, false);
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
+}
+
+static void test_blockjob_error_drain_all(void)
+{
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
+}
+
+static void test_blockjob_error_drain(void)
+{
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
+}
+
+static void test_blockjob_error_drain_subtree(void)
+{
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
}
static void test_blockjob_iothread_drain_all(void)
{
- test_blockjob_common(BDRV_DRAIN_ALL, true);
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
}
static void test_blockjob_iothread_drain(void)
{
- test_blockjob_common(BDRV_DRAIN, true);
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
}
static void test_blockjob_iothread_drain_subtree(void)
{
- test_blockjob_common(BDRV_SUBTREE_DRAIN, true);
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
+}
+
+static void test_blockjob_iothread_error_drain_all(void)
+{
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
+}
+
+static void test_blockjob_iothread_error_drain(void)
+{
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
+}
+
+static void test_blockjob_iothread_error_drain_subtree(void)
+{
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
}
@@ -1433,6 +1511,13 @@ int main(int argc, char **argv)
g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
test_blockjob_drain_subtree);
+ g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
+ test_blockjob_error_drain_all);
+ g_test_add_func("/bdrv-drain/blockjob/error/drain",
+ test_blockjob_error_drain);
+ g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
+ test_blockjob_error_drain_subtree);
+
g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
test_blockjob_iothread_drain_all);
g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
@@ -1440,6 +1525,13 @@ int main(int argc, char **argv)
g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
test_blockjob_iothread_drain_subtree);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
+ test_blockjob_iothread_error_drain_all);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
+ test_blockjob_iothread_error_drain);
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
+ test_blockjob_iothread_error_drain_subtree);
+
g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
--
1.8.3.1

View File

@ -1,69 +0,0 @@
From 1eaa60bc24cb3fecba8da61f21c44e6f4c9ee4c1 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:11 +0100
Subject: test-bdrv-drain: Fix outdated comments
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-33-kwolf@redhat.com>
Patchwork-id: 82621
O-Subject: [RHEL-8 qemu-kvm PATCH 42/44] test-bdrv-drain: Fix outdated comments
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
Commit 89bd030533e changed the test case from using job_sleep_ns() to
using qemu_co_sleep_ns() instead. Also, block_job_sleep_ns() became
job_sleep_ns() in commit 5d43e86e11f.
In both cases, some comments in the test case were not updated. Do that
now.
Reported-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit 5599c162c3bec2bc8f0123e4d5802a70d9984b3b)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index d6202b2..7e7ba9b 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -820,9 +820,9 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
job_transition_to_ready(&s->common.job);
while (!s->should_complete) {
- /* Avoid block_job_sleep_ns() because it marks the job as !busy. We
- * want to emulate some actual activity (probably some I/O) here so
- * that drain has to wait for this acitivity to stop. */
+ /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
+ * emulate some actual activity (probably some I/O) here so that drain
+ * has to wait for this activity to stop. */
qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
job_pause_point(&s->common.job);
}
@@ -908,7 +908,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
- g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
do_drain_begin_unlocked(drain_type, src);
@@ -956,7 +956,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
- g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
aio_context_acquire(ctx);
ret = job_complete_sync(&job->job, &error_abort);
--
1.8.3.1

View File

@ -1,367 +0,0 @@
From ea2355d819127ace6195e1d007bc305a49e7d465 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:12 +0100
Subject: block: Use a single global AioWait
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-34-kwolf@redhat.com>
Patchwork-id: 82623
O-Subject: [RHEL-8 qemu-kvm PATCH 43/44] block: Use a single global AioWait
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
When draining a block node, we recurse to its parent and for subtree
drains also to its children. A single AIO_WAIT_WHILE() is then used to
wait for bdrv_drain_poll() to become true, which depends on all of the
nodes we recursed to. However, if the respective child or parent becomes
quiescent and calls bdrv_wakeup(), only the AioWait of the child/parent
is checked, while AIO_WAIT_WHILE() depends on the AioWait of the
original node.
Fix this by using a single AioWait for all callers of AIO_WAIT_WHILE().
This may mean that the draining thread gets a few more unnecessary
wakeups because an unrelated operation got completed, but we already
wake it up when something _could_ have changed rather than only if it
has certainly changed.
Apart from that, drain is a slow path anyway. In theory it would be
possible to use wakeups more selectively and still correctly, but the
gains are likely not worth the additional complexity. In fact, this
patch is a nice simplification for some places in the code.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit cfe29d8294e06420e15d4938421ae006c8ac49e7)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
block.c | 5 -----
block/block-backend.c | 11 ++++-------
block/io.c | 7 ++-----
blockjob.c | 13 +------------
include/block/aio-wait.h | 22 +++++++++++-----------
include/block/block.h | 6 +-----
include/block/block_int.h | 3 ---
include/block/blockjob.h | 10 ----------
job.c | 3 +--
util/aio-wait.c | 11 ++++++-----
10 files changed, 26 insertions(+), 65 deletions(-)
diff --git a/block.c b/block.c
index 39f373e..9b55956 100644
--- a/block.c
+++ b/block.c
@@ -4865,11 +4865,6 @@ AioContext *bdrv_get_aio_context(BlockDriverState *bs)
return bs ? bs->aio_context : qemu_get_aio_context();
}
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs)
-{
- return bs ? &bs->wait : NULL;
-}
-
void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co)
{
aio_co_enter(bdrv_get_aio_context(bs), co);
diff --git a/block/block-backend.c b/block/block-backend.c
index 9a3e060..723ab5a 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -88,7 +88,6 @@ struct BlockBackend {
* Accessed with atomic ops.
*/
unsigned int in_flight;
- AioWait wait;
};
typedef struct BlockBackendAIOCB {
@@ -1300,7 +1299,7 @@ static void blk_inc_in_flight(BlockBackend *blk)
static void blk_dec_in_flight(BlockBackend *blk)
{
atomic_dec(&blk->in_flight);
- aio_wait_kick(&blk->wait);
+ aio_wait_kick();
}
static void error_callback_bh(void *opaque)
@@ -1601,9 +1600,8 @@ void blk_drain(BlockBackend *blk)
}
/* We may have -ENOMEDIUM completions in flight */
- AIO_WAIT_WHILE(&blk->wait,
- blk_get_aio_context(blk),
- atomic_mb_read(&blk->in_flight) > 0);
+ AIO_WAIT_WHILE(blk_get_aio_context(blk),
+ atomic_mb_read(&blk->in_flight) > 0);
if (bs) {
bdrv_drained_end(bs);
@@ -1622,8 +1620,7 @@ void blk_drain_all(void)
aio_context_acquire(ctx);
/* We may have -ENOMEDIUM completions in flight */
- AIO_WAIT_WHILE(&blk->wait, ctx,
- atomic_mb_read(&blk->in_flight) > 0);
+ AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
aio_context_release(ctx);
}
diff --git a/block/io.c b/block/io.c
index 8b81ff3..bd9d688 100644
--- a/block/io.c
+++ b/block/io.c
@@ -38,8 +38,6 @@
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
-static AioWait drain_all_aio_wait;
-
static void bdrv_parent_cb_resize(BlockDriverState *bs);
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags);
@@ -557,7 +555,7 @@ void bdrv_drain_all_begin(void)
}
/* Now poll the in-flight requests */
- AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
+ AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
while ((bs = bdrv_next_all_states(bs))) {
bdrv_drain_assert_idle(bs);
@@ -713,8 +711,7 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
void bdrv_wakeup(BlockDriverState *bs)
{
- aio_wait_kick(bdrv_get_aio_wait(bs));
- aio_wait_kick(&drain_all_aio_wait);
+ aio_wait_kick();
}
void bdrv_dec_in_flight(BlockDriverState *bs)
diff --git a/blockjob.c b/blockjob.c
index 617d86f..06f2429 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -221,20 +221,9 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
return 0;
}
-void block_job_wakeup_all_bdrv(BlockJob *job)
-{
- GSList *l;
-
- for (l = job->nodes; l; l = l->next) {
- BdrvChild *c = l->data;
- bdrv_wakeup(c->bs);
- }
-}
-
static void block_job_on_idle(Notifier *n, void *opaque)
{
- BlockJob *job = opaque;
- block_job_wakeup_all_bdrv(job);
+ aio_wait_kick();
}
bool block_job_is_internal(BlockJob *job)
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index 600fad1..afd0ff7 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -30,14 +30,15 @@
/**
* AioWait:
*
- * An object that facilitates synchronous waiting on a condition. The main
- * loop can wait on an operation running in an IOThread as follows:
+ * An object that facilitates synchronous waiting on a condition. A single
+ * global AioWait object (global_aio_wait) is used internally.
+ *
+ * The main loop can wait on an operation running in an IOThread as follows:
*
- * AioWait *wait = ...;
* AioContext *ctx = ...;
* MyWork work = { .done = false };
* schedule_my_work_in_iothread(ctx, &work);
- * AIO_WAIT_WHILE(wait, ctx, !work.done);
+ * AIO_WAIT_WHILE(ctx, !work.done);
*
* The IOThread must call aio_wait_kick() to notify the main loop when
* work.done changes:
@@ -46,7 +47,7 @@
* {
* ...
* work.done = true;
- * aio_wait_kick(wait);
+ * aio_wait_kick();
* }
*/
typedef struct {
@@ -54,9 +55,10 @@ typedef struct {
unsigned num_waiters;
} AioWait;
+extern AioWait global_aio_wait;
+
/**
* AIO_WAIT_WHILE:
- * @wait: the aio wait object
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
* caller does not hold a lock) are involved in the polling condition.
* @cond: wait while this conditional expression is true
@@ -72,9 +74,9 @@ typedef struct {
* wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead.
*/
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
+#define AIO_WAIT_WHILE(ctx, cond) ({ \
bool waited_ = false; \
- AioWait *wait_ = (wait); \
+ AioWait *wait_ = &global_aio_wait; \
AioContext *ctx_ = (ctx); \
/* Increment wait_->num_waiters before evaluating cond. */ \
atomic_inc(&wait_->num_waiters); \
@@ -102,14 +104,12 @@ typedef struct {
/**
* aio_wait_kick:
- * @wait: the aio wait object that should re-evaluate its condition
- *
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
* synchronous operations performed in an IOThread, the main thread lets the
* IOThread's event loop run, waiting for the operation to complete. A
* aio_wait_kick() call will wake up the main thread.
*/
-void aio_wait_kick(AioWait *wait);
+void aio_wait_kick(void);
/**
* aio_wait_bh_oneshot:
diff --git a/include/block/block.h b/include/block/block.h
index 4e0871a..4edc1e8 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -410,13 +410,9 @@ void bdrv_drain_all_begin(void);
void bdrv_drain_all_end(void);
void bdrv_drain_all(void);
-/* Returns NULL when bs == NULL */
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs);
-
#define BDRV_POLL_WHILE(bs, cond) ({ \
BlockDriverState *bs_ = (bs); \
- AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_), \
- bdrv_get_aio_context(bs_), \
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
cond); })
int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 4000d2a..92ecbd8 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -794,9 +794,6 @@ struct BlockDriverState {
unsigned int in_flight;
unsigned int serialising_in_flight;
- /* Kicked to signal main loop when a request completes. */
- AioWait wait;
-
/* counter for nested bdrv_io_plug.
* Accessed with atomic ops.
*/
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 2290bbb..ede0bd8 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -122,16 +122,6 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
void block_job_remove_all_bdrv(BlockJob *job);
/**
- * block_job_wakeup_all_bdrv:
- * @job: The block job
- *
- * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
- * job. This function is to be called whenever child_job_drained_poll() would
- * go from true to false to notify waiting drain requests.
- */
-void block_job_wakeup_all_bdrv(BlockJob *job);
-
-/**
* block_job_set_speed:
* @job: The job to set the speed for.
* @speed: The new value
diff --git a/job.c b/job.c
index 0b02186..ed4da6f 100644
--- a/job.c
+++ b/job.c
@@ -978,7 +978,6 @@ void job_complete(Job *job, Error **errp)
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
{
Error *local_err = NULL;
- AioWait dummy_wait = {};
int ret;
job_ref(job);
@@ -992,7 +991,7 @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
return -EBUSY;
}
- AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
+ AIO_WAIT_WHILE(job->aio_context,
(job_drain(job), !job_is_completed(job)));
ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
diff --git a/util/aio-wait.c b/util/aio-wait.c
index b8a8f86..b487749 100644
--- a/util/aio-wait.c
+++ b/util/aio-wait.c
@@ -26,21 +26,22 @@
#include "qemu/main-loop.h"
#include "block/aio-wait.h"
+AioWait global_aio_wait;
+
static void dummy_bh_cb(void *opaque)
{
/* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
}
-void aio_wait_kick(AioWait *wait)
+void aio_wait_kick(void)
{
/* The barrier (or an atomic op) is in the caller. */
- if (atomic_read(&wait->num_waiters)) {
+ if (atomic_read(&global_aio_wait.num_waiters)) {
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
}
}
typedef struct {
- AioWait wait;
bool done;
QEMUBHFunc *cb;
void *opaque;
@@ -54,7 +55,7 @@ static void aio_wait_bh(void *opaque)
data->cb(data->opaque);
data->done = true;
- aio_wait_kick(&data->wait);
+ aio_wait_kick();
}
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
@@ -67,5 +68,5 @@ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
- AIO_WAIT_WHILE(&data.wait, ctx, !data.done);
+ AIO_WAIT_WHILE(ctx, !data.done);
}
--
1.8.3.1

View File

@ -1,198 +0,0 @@
From f31ce5e7d486c860d44cb103b672f81de9bc537c Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 10 Oct 2018 20:22:13 +0100
Subject: test-bdrv-drain: Test draining job source child and parent
RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <20181010202213.7372-35-kwolf@redhat.com>
Patchwork-id: 82624
O-Subject: [RHEL-8 qemu-kvm PATCH 44/44] test-bdrv-drain: Test draining job source child and parent
Bugzilla: 1637976
RH-Acked-by: Max Reitz <mreitz@redhat.com>
RH-Acked-by: John Snow <jsnow@redhat.com>
RH-Acked-by: Thomas Huth <thuth@redhat.com>
For the block job drain test, don't only test draining the source and
the target node, but create a backing chain for the source
(source_backing <- source <- source_overlay) and test draining each of
the nodes in it.
When using iothreads, the source node (and therefore the job) is in a
different AioContext than the drain, which happens from the main
thread. This way, the main thread waits in AIO_WAIT_WHILE() for the
iothread to make process and aio_wait_kick() is required to notify it.
The test validates that calling bdrv_wakeup() for a child or a parent
node will actually notify AIO_WAIT_WHILE() instead of letting it hang.
Increase the sleep time a bit (to 1 ms) because the test case is racy
and with the shorter sleep, it didn't reproduce the bug it is supposed
to test for me under 'rr record -n'.
This was because bdrv_drain_invoke_entry() (in the main thread) was only
called after the job had already reached the pause point, so we got a
bdrv_dec_in_flight() from the main thread and the additional
aio_wait_kick() when the job becomes idle (that we really wanted to test
here) wasn't even necessary any more to make progress.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit d8b3afd597d54e496809b05ac39ac29a5799664f)
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
---
tests/test-bdrv-drain.c | 77 ++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 69 insertions(+), 8 deletions(-)
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
index 7e7ba9b..8641b54 100644
--- a/tests/test-bdrv-drain.c
+++ b/tests/test-bdrv-drain.c
@@ -786,6 +786,7 @@ typedef struct TestBlockJob {
BlockJob common;
int run_ret;
int prepare_ret;
+ bool running;
bool should_complete;
} TestBlockJob;
@@ -818,12 +819,17 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
{
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
+ /* We are running the actual job code past the pause point in
+ * job_co_entry(). */
+ s->running = true;
+
job_transition_to_ready(&s->common.job);
while (!s->should_complete) {
/* Avoid job_sleep_ns() because it marks the job as !busy. We want to
* emulate some actual activity (probably some I/O) here so that drain
* has to wait for this activity to stop. */
- qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
+
job_pause_point(&s->common.job);
}
@@ -856,11 +862,19 @@ enum test_job_result {
TEST_JOB_FAIL_PREPARE,
};
-static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
- enum test_job_result result)
+enum test_job_drain_node {
+ TEST_JOB_DRAIN_SRC,
+ TEST_JOB_DRAIN_SRC_CHILD,
+ TEST_JOB_DRAIN_SRC_PARENT,
+};
+
+static void test_blockjob_common_drain_node(enum drain_type drain_type,
+ bool use_iothread,
+ enum test_job_result result,
+ enum test_job_drain_node drain_node)
{
BlockBackend *blk_src, *blk_target;
- BlockDriverState *src, *target;
+ BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
BlockJob *job;
TestBlockJob *tjob;
IOThread *iothread = NULL;
@@ -869,8 +883,32 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
&error_abort);
+ src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
+ BDRV_O_RDWR, &error_abort);
+ src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
+ BDRV_O_RDWR, &error_abort);
+
+ bdrv_set_backing_hd(src_overlay, src, &error_abort);
+ bdrv_unref(src);
+ bdrv_set_backing_hd(src, src_backing, &error_abort);
+ bdrv_unref(src_backing);
+
blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
- blk_insert_bs(blk_src, src, &error_abort);
+ blk_insert_bs(blk_src, src_overlay, &error_abort);
+
+ switch (drain_node) {
+ case TEST_JOB_DRAIN_SRC:
+ drain_bs = src;
+ break;
+ case TEST_JOB_DRAIN_SRC_CHILD:
+ drain_bs = src_backing;
+ break;
+ case TEST_JOB_DRAIN_SRC_PARENT:
+ drain_bs = src_overlay;
+ break;
+ default:
+ g_assert_not_reached();
+ }
if (use_iothread) {
iothread = iothread_new();
@@ -906,11 +944,21 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
job_start(&job->job);
aio_context_release(ctx);
+ if (use_iothread) {
+ /* job_co_entry() is run in the I/O thread, wait for the actual job
+ * code to start (we don't want to catch the job in the pause point in
+ * job_co_entry(). */
+ while (!tjob->running) {
+ aio_poll(qemu_get_aio_context(), false);
+ }
+ }
+
g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->job.paused);
+ g_assert_true(tjob->running);
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
- do_drain_begin_unlocked(drain_type, src);
+ do_drain_begin_unlocked(drain_type, drain_bs);
if (drain_type == BDRV_DRAIN_ALL) {
/* bdrv_drain_all() drains both src and target */
@@ -921,7 +969,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
g_assert_true(job->job.paused);
g_assert_false(job->job.busy); /* The job is paused */
- do_drain_end_unlocked(drain_type, src);
+ do_drain_end_unlocked(drain_type, drain_bs);
if (use_iothread) {
/* paused is reset in the I/O thread, wait for it */
@@ -969,7 +1017,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
blk_unref(blk_src);
blk_unref(blk_target);
- bdrv_unref(src);
+ bdrv_unref(src_overlay);
bdrv_unref(target);
if (iothread) {
@@ -977,6 +1025,19 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
}
}
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
+ enum test_job_result result)
+{
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
+ TEST_JOB_DRAIN_SRC);
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
+ TEST_JOB_DRAIN_SRC_CHILD);
+ if (drain_type == BDRV_SUBTREE_DRAIN) {
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
+ TEST_JOB_DRAIN_SRC_PARENT);
+ }
+}
+
static void test_blockjob_drain_all(void)
{
test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
--
1.8.3.1

Some files were not shown because too many files have changed in this diff Show More