systemd/0098-test-answer-2nd-mdadm-create-question-for-compat-wit.patch
Jan Macku e20fafc72a systemd-257-3
Resolves: RHEL-44417, RHEL-71409, RHEL-72798
2025-01-08 09:25:36 +01:00

85 lines
5.4 KiB
Diff

From b2320ced3873981f1215eddb597cfa4aad5bd1b6 Mon Sep 17 00:00:00 2001
From: Luca Boccassi <luca.boccassi@gmail.com>
Date: Sun, 22 Dec 2024 13:31:36 +0000
Subject: [PATCH] test: answer 2nd mdadm --create question for compat with new
version
New version of mdadm now asks a second question, so send 'y' twice
to it in the test scripts
[ 5.253483] TEST-64-UDEV-STORAGE.sh[684]: + echo y
[ 5.254412] TEST-64-UDEV-STORAGE.sh[685]: + mdadm --create /dev/md/mdmirror --name mdmirror --uuid aaaaaaaa:bbbbbbbb:cccccccc:00000001 /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm0 /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm1 -v -f --level=1 --raid-devices=2
[ 5.254759] TEST-64-UDEV-STORAGE.sh[685]: To optimalize recovery speed, it is recommended to enable write-indent bitmap, do you want to enable it now? [y/N]? mdadm: Note: this array has metadata at the start and
[ 5.255085] TEST-64-UDEV-STORAGE.sh[685]: may not be suitable as a boot device. If you plan to
[ 5.255418] TEST-64-UDEV-STORAGE.sh[685]: store '/boot' on this device please ensure that
[ 5.255745] TEST-64-UDEV-STORAGE.sh[685]: your boot-loader understands md/v1.x metadata, or use
[ 5.256285] TEST-64-UDEV-STORAGE.sh[685]: --metadata=0.90
[ 5.256672] TEST-64-UDEV-STORAGE.sh[685]: mdadm: size set to 64512K
[ 5.257063] TEST-64-UDEV-STORAGE.sh[685]: Continue creating array [y/N]? mdadm: create aborted.
This is backward compatible with the older version that asks just one
question
(cherry picked from commit 16406420ea449b75e70a7dced05d7b98bc0f5376)
---
test/units/TEST-64-UDEV-STORAGE.sh | 8 ++++----
test/units/TEST-74-AUX-UTILS.bootctl.sh | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/test/units/TEST-64-UDEV-STORAGE.sh b/test/units/TEST-64-UDEV-STORAGE.sh
index 24c1adaeda..01c3dd68ea 100755
--- a/test/units/TEST-64-UDEV-STORAGE.sh
+++ b/test/units/TEST-64-UDEV-STORAGE.sh
@@ -1080,7 +1080,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-label/$part_name" # ext4 partition
)
# Create a simple RAID 1 with an ext4 filesystem
- echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2
+ printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2
udevadm wait --settle --timeout=30 "$raid_dev"
# udevd does not lock md devices, hence we need to trigger uevent after creating filesystem.
mkfs.ext4 -L "$part_name" "$raid_dev"
@@ -1111,7 +1111,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-label/$part_name" # ext4 partition
)
# Create a simple RAID 5 with an ext4 filesystem
- echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3
+ printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3
udevadm wait --settle --timeout=30 "$raid_dev"
mkfs.ext4 -L "$part_name" "$raid_dev"
udevadm trigger --settle "$raid_dev"
@@ -1152,7 +1152,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-id/md-uuid-$uuid-part3"
)
# Create a simple RAID 10 with an ext4 filesystem
- echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4
+ printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4
udevadm wait --settle --timeout=30 "$raid_dev"
# Partition the raid device
# Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices.
@@ -1208,7 +1208,7 @@ testcase_mdadm_lvm() {
"/dev/disk/by-label/$part_name" # ext4 partition
)
# Create a RAID 10 with LVM + ext4
- echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4
+ printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4
udevadm wait --settle --timeout=30 "$raid_dev"
# Create an LVM on the MD
lvm pvcreate -y "$raid_dev"
diff --git a/test/units/TEST-74-AUX-UTILS.bootctl.sh b/test/units/TEST-74-AUX-UTILS.bootctl.sh
index 46fd5d1f2d..650c289aca 100755
--- a/test/units/TEST-74-AUX-UTILS.bootctl.sh
+++ b/test/units/TEST-74-AUX-UTILS.bootctl.sh
@@ -215,9 +215,9 @@ EOF
udevadm settle
- echo y | mdadm --create /dev/md/raid-esp --name "raid-esp" "${LOOPDEV1}p1" "${LOOPDEV2}p1" -v -f --level=1 --raid-devices=2
+ printf 'y\ny\n' | mdadm --create /dev/md/raid-esp --name "raid-esp" "${LOOPDEV1}p1" "${LOOPDEV2}p1" -v -f --level=1 --raid-devices=2
mkfs.vfat /dev/md/raid-esp
- echo y | mdadm --create /dev/md/raid-root --name "raid-root" "${LOOPDEV1}p2" "${LOOPDEV2}p2" -v -f --level=1 --raid-devices=2
+ printf 'y\ny\n' | mdadm --create /dev/md/raid-root --name "raid-root" "${LOOPDEV1}p2" "${LOOPDEV2}p2" -v -f --level=1 --raid-devices=2
mkfs.ext4 /dev/md/raid-root
mkfs.btrfs -f -M -d raid1 -m raid1 -L "raid-boot" "${LOOPDEV1}p3" "${LOOPDEV2}p3"