From 7ad55258322c98a62c6a56599e2fc4fef52c66b1 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 5 Oct 2021 05:40:54 -0400 Subject: [PATCH] import rear-2.6-3.el8 --- .gitignore | 2 +- .rear.metadata | 2 +- SOURCES/rear-asciidoc.patch | 584 +++++ SOURCES/rear-bz1631183.patch | 275 --- SOURCES/rear-bz1639705.patch | 1487 ------------- SOURCES/rear-bz1652828-bz1652853.patch | 156 -- SOURCES/rear-bz1653214.patch | 17 - SOURCES/rear-bz1655956.patch | 35 - SOURCES/rear-bz1659137.patch | 91 - SOURCES/rear-bz1663515.patch | 17 - SOURCES/rear-bz1672938.patch | 94 - SOURCES/rear-bz1685166.patch | 40 - SOURCES/rear-bz1692575.patch | 33 - SOURCES/rear-bz1693608.patch | 113 - SOURCES/rear-bz1700807.patch | 12 - SOURCES/rear-bz1711123.patch | 87 - SOURCES/rear-bz1726982.patch | 17 - SOURCES/rear-bz1729502-ppc64-iso-level.patch | 37 - SOURCES/rear-bz1732328.patch | 119 - SOURCES/rear-bz1737042.patch | 194 -- SOURCES/rear-bz1743303-rubrik.patch | 354 --- SOURCES/rear-bz1747468.patch | 112 + SOURCES/rear-bz1832394.patch | 185 +- SOURCES/rear-bz1843809-skip-longhorn.patch | 60 - SOURCES/rear-bz1882060.patch | 14 - SOURCES/rear-bz1930662.patch | 693 ++++++ SOURCES/rear-bz1945869.patch | 274 +++ SOURCES/rear-bz1958247.patch | 2040 ++++++++++++++++++ SOURCES/rear-bz1983013.patch | 68 + SOURCES/rear-bz1993296.patch | 34 + SOURCES/rear-rhbz1610638.patch | 85 - SOURCES/rear-rhbz1610647.patch | 50 - SOURCES/rear-sfdc02343208.patch | 17 - SOURCES/rear-sfdc02772301.patch | 8 +- SPECS/rear.spec | 109 +- 35 files changed, 4013 insertions(+), 3502 deletions(-) create mode 100644 SOURCES/rear-asciidoc.patch delete mode 100644 SOURCES/rear-bz1631183.patch delete mode 100644 SOURCES/rear-bz1639705.patch delete mode 100644 SOURCES/rear-bz1652828-bz1652853.patch delete mode 100644 SOURCES/rear-bz1653214.patch delete mode 100644 SOURCES/rear-bz1655956.patch delete mode 100644 SOURCES/rear-bz1659137.patch delete mode 100644 SOURCES/rear-bz1663515.patch delete mode 100644 SOURCES/rear-bz1672938.patch delete mode 100644 SOURCES/rear-bz1685166.patch delete mode 100644 SOURCES/rear-bz1692575.patch delete mode 100644 SOURCES/rear-bz1693608.patch delete mode 100644 SOURCES/rear-bz1700807.patch delete mode 100644 SOURCES/rear-bz1711123.patch delete mode 100644 SOURCES/rear-bz1726982.patch delete mode 100644 SOURCES/rear-bz1729502-ppc64-iso-level.patch delete mode 100644 SOURCES/rear-bz1732328.patch delete mode 100644 SOURCES/rear-bz1737042.patch delete mode 100644 SOURCES/rear-bz1743303-rubrik.patch create mode 100644 SOURCES/rear-bz1747468.patch delete mode 100644 SOURCES/rear-bz1843809-skip-longhorn.patch delete mode 100644 SOURCES/rear-bz1882060.patch create mode 100644 SOURCES/rear-bz1930662.patch create mode 100644 SOURCES/rear-bz1945869.patch create mode 100644 SOURCES/rear-bz1958247.patch create mode 100644 SOURCES/rear-bz1983013.patch create mode 100644 SOURCES/rear-bz1993296.patch delete mode 100644 SOURCES/rear-rhbz1610638.patch delete mode 100644 SOURCES/rear-rhbz1610647.patch delete mode 100644 SOURCES/rear-sfdc02343208.patch diff --git a/.gitignore b/.gitignore index 38668b3..5d90595 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/rear-2.4.tar.gz +SOURCES/rear-2.6.tar.gz diff --git a/.rear.metadata b/.rear.metadata index 68e7cba..a5f7fb3 100644 --- a/.rear.metadata +++ b/.rear.metadata @@ -1 +1 @@ -9f6037ea582635ed78f1dffaa8a7cc5cdc7db37a SOURCES/rear-2.4.tar.gz +13c23ad59254438ffcd0cde6400fd991cbfe194e SOURCES/rear-2.6.tar.gz diff --git a/SOURCES/rear-asciidoc.patch b/SOURCES/rear-asciidoc.patch new file mode 100644 index 0000000..d224eb7 --- /dev/null +++ b/SOURCES/rear-asciidoc.patch @@ -0,0 +1,584 @@ +diff --git a/doc/user-guide/12-BLOCKCLONE.adoc b/doc/user-guide/12-BLOCKCLONE.adoc +index 061f0f49..2d4e0ed1 100644 +--- a/doc/user-guide/12-BLOCKCLONE.adoc ++++ b/doc/user-guide/12-BLOCKCLONE.adoc +@@ -40,17 +40,17 @@ First we need to set some global options in _local.conf_, + In our small example backups will be stored in _/mnt/rear_ directory + on BACKUP_URL NFS server. + +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS + BACKUP_OPTIONS="nfsvers=3,nolock" + BACKUP_URL=nfs:///mnt/rear +-``` ++-------------------------- + + Now we will define variables that will apply only for targeted block device + +-``` ++-------------------------- + # cat alien.conf + BACKUP=BLOCKCLONE # Define BLOCKCLONE as backup method + BACKUP_PROG_ARCHIVE="alien" # Name of image file +@@ -66,16 +66,16 @@ BLOCKCLONE_MBR_FILE="alien_boot_strap.img" # Output filename for b + BLOCKCLONE_PARTITIONS_CONF_FILE="alien_partitions.conf" # Output filename for partition configuration + + BLOCKCLONE_ALLOW_MOUNTED="yes" # Device can be mounted during backup (default NO) +-``` ++-------------------------- + + ==== Running backup + Save partitions configuration, bootstrap code and create actual backup of /dev/sdc1 +-``` ++-------------------------- + # rear -C alien mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system +-``` ++-------------------------- + # rear -C alien restoreonly + Restore alien.dd.img to device: [/dev/sdc1] # User is always prompted for restore destination + Device /dev/sdc1 was not found. # If destination does not exist ReaR will try to create it (or fail if BLOCKCLONE_SAVE_MBR_DEV was not set during backup) +@@ -102,7 +102,7 @@ Device Boot Start End Sectors Size Id Type + The partition table has been altered. + Calling ioctl() to re-read partition table. + Syncing disks. +-``` ++-------------------------- + + ==== Summary + In first example we have run backup of /dev/sdc1 partition and stored it on NFS +@@ -127,37 +127,37 @@ In next example we will do backup/restore using BLOCKCLONE and `ntfsclone` + of Linux (installed on /dev/sda) and Windows 10 (installed on /dev/sdb). + + TIP: You can locate right disk devices using `df` and `os-prober` +-``` ++-------------------------- + # df -h /boot + Filesystem Size Used Avail Use% Mounted on + /dev/sda1 10G 4.9G 5.2G 49% / # Linux is most probably installed on /dev/sda + + # os-prober + /dev/sdb1:Windows 10 (loader):Windows:chain # Windows 10 is most probably installed on /dev/sdb +-``` ++-------------------------- + + First we will configure some ReaR backup global options + (similar to link:12-BLOCKCLONE.adoc#1-backuprestore-of-arbitrary-block-device-with-blockclone-and-dd-on-nfs-server[first example] + we will do backup/restore with help of NFS server). + +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS + BACKUP_OPTIONS="nfsvers=3,nolock" + BACKUP_URL=nfs:///mnt/rear + REQUIRED_PROGS+=( ntfsclone ) +-``` ++-------------------------- + + Now we will define backup parameters for Linux. + +-``` ++-------------------------- + # cat base_os.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP_PROG_EXCLUDE+=( '/media/*' ) +-``` ++-------------------------- + + Our Windows 10 is by default installed on two separate partitions + (partition 1 for boot data and partition 2 for disk C:), +@@ -165,7 +165,7 @@ Our Windows 10 is by default installed on two separate partitions + + Windows boot partition: + +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_boot" +@@ -179,10 +179,10 @@ BLOCKCLONE_PROG_OPTS="--quiet" + BLOCKCLONE_SAVE_MBR_DEV="/dev/sdb" + BLOCKCLONE_MBR_FILE="windows_boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="windows_partitions.conf" +-``` ++-------------------------- + + Windows data partition (disk C:\): +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -196,35 +196,35 @@ BLOCKCLONE_PROG_OPTS="--quiet" + BLOCKCLONE_SAVE_MBR_DEV="/dev/sdb" + BLOCKCLONE_MBR_FILE="windows_boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="windows_partitions.conf" +-``` ++-------------------------- + + ==== Running backup + First we will create backup of Linux. `mkbackup` command will create bootable + ISO image with ReaR rescue/recovery system that will be later used for + booting broken system and consecutive recovery. +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Now we create backup of Windows 10 boot partition. Command `mkbackuponly` + will ensure that only partition data and partition layout will be saved + (ReaR rescue/recovery system will not be created which is exactly what we want). +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly +-``` ++-------------------------- + + Similarly, we create backup of Windows 10 data partition (disk C:\) +-``` ++-------------------------- + # rear -C windows_data mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + As a first step after ReaR rescue/recovery system booted, + we will recover Linux. This step will recover all Linux file systems, + OS data and bootloader. Windows disk will remain untouched. +-``` ++-------------------------- + # rear -C base_os recover +-``` ++-------------------------- + + In second step will recover Windows 10 boot partition. During this step ReaR + will detect that destination partition is not present and ask us for device +@@ -234,25 +234,25 @@ In second step will recover Windows 10 boot partition. During this step ReaR + partition(s) configuration (currently mounted under _/mnt/local_) will + remain untouched. Before starting Windows 10 recovery we should identify + right disk for recovery, as mentioned earlier disk size could be a good start. +-``` ++-------------------------- + # fdisk -l /dev/sdb + Disk /dev/sdb: 50 GiB, 53687091200 bytes, 104857600 sectors +-``` ++-------------------------- + + _/dev/sdb_ looks to be right destination, so we can proceed with restore. +-``` ++-------------------------- + # rear -C windows_boot restoreonly + Restore windows_boot.img to device: [/dev/sdb1] + Device /dev/sdb1 was not found. + Restore partition layout to (^c to abort): [/dev/sdb] + Checking that no-one is using this disk right now ... OK + ... +-``` ++-------------------------- + + Last step is to recover Windows 10 OS data (C:\). + Partitions on _/dev/sdb_ were already created in previous step, + hence ReaR will skip prompt for restoring partition layout. +-``` ++-------------------------- + # rear -C windows_data restoreonly + Restore windows_data.img to device: [/dev/sdb2] + Ntfsclone image version: 10.1 +@@ -263,7 +263,7 @@ Space in use : 9396 MB (27.8%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + ... +-``` ++-------------------------- + + At this stage Linux together with Windows 10 is successfully restored. + +@@ -286,7 +286,7 @@ In this example we will do backup/restore using BLOCKCLONE and `ntfsclone` + Backups will be stored on NFS server. + + First we set global ReaR options +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS +@@ -300,23 +300,23 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" + +-``` ++-------------------------- + + IMPORTANT: BLOCKCLONE_STRICT_PARTITIONING is mandatory if backing up + Linux / Windows that shares one disk. Not using this option might result to + unbootable Windows 10 installation. + + Linux configuration +-``` ++-------------------------- + # cat base_os.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP_PROG_EXCLUDE+=( '/media/*' ) +-``` ++-------------------------- + + Windows 10 boot partition configuration +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + +@@ -328,10 +328,10 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda1" +-``` ++-------------------------- + + Windows 10 data partition configuration +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -342,42 +342,42 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda2" +-``` ++-------------------------- + + ==== Running backup + + Backup of Linux +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Backup of Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly +-``` ++-------------------------- + + Backup of Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + Restore Linux +-``` ++-------------------------- + # rear -C base_os recover +-``` ++-------------------------- + + During this step ReaR will also create both Windows 10 partitions + + Restore Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data restoreonly +-``` ++-------------------------- + + Restore Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot restoreonly +-``` ++-------------------------- + + === 4. Backup/restore of Linux / Windows 10 dual boot setup sharing same disk with USB as destination + +@@ -389,7 +389,7 @@ In this example we will do backup/restore using BLOCKCLONE and `ntfsclone` + Backups will be stored on USB disk drive (_/dev/sdb_ in this example). + + Global options +-``` ++-------------------------- + # cat local.conf + OUTPUT=USB + BACKUP=NETFS +@@ -407,10 +407,10 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" +-``` ++-------------------------- + + Options used during Linux backup/restore. +-``` ++-------------------------- + # cat local.conf + OUTPUT=USB + BACKUP=NETFS +@@ -428,14 +428,14 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" +-``` ++-------------------------- + + IMPORTANT: USB_SUFFIX option is mandatory as it avoids ReaR to hold every + backup in separate directory, this behavior is essential for BLOCKCLONE + backup method to work correctly. + + Windows boot partition options +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + +@@ -447,10 +447,10 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda1" +-``` ++-------------------------- + + Windows data partition options +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -461,11 +461,11 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda2" +-``` ++-------------------------- + + ==== Running backup + First we need to format target USB device, with `rear format` command +-``` ++-------------------------- + # rear -v format /dev/sdb + Relax-and-Recover 2.00 / Git + Using log file: /var/log/rear/rear-centosd.log +@@ -477,15 +477,15 @@ Creating ReaR data partition up to 100% of '/dev/sdb' + Setting 'boot' flag on /dev/sdb + Creating ext3 filesystem with label 'REAR-000' on '/dev/sdb1' + Adjusting filesystem parameters on '/dev/sdb1' +-``` ++-------------------------- + + Backup of Linux +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Backup of Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly + NTFS volume version: 3.1 + Cluster size : 4096 bytes +@@ -496,10 +496,10 @@ Accounting clusters ... + Space in use : 338 MB (64.4%) + Saving NTFS to image ... + Syncing ... +-``` ++-------------------------- + + Backup of Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data mkbackuponly + NTFS volume version: 3.1 + Cluster size : 4096 bytes +@@ -510,7 +510,7 @@ Accounting clusters ... + Space in use : 9833 MB (54.3%) + Saving NTFS to image ... + Syncing ... +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + For sake of this demonstration I've purposely used ReaR's rescue/recovery media +@@ -519,7 +519,7 @@ For sake of this demonstration I've purposely used ReaR's rescue/recovery media + demonstrate possibility of ReaR to recover backup to arbitrary disk. + + As first step Linux will be restored, this will create all the partitions + needed, even those used by Windows 10. +-``` ++-------------------------- + RESCUE centosd:~ # rear -C base_os recover + Relax-and-Recover 2.00 / Git + Using log file: /var/log/rear/rear-centosd.log +@@ -541,13 +541,13 @@ Original disk /dev/sda does not exist in the target system. Please choose an app + 2) /dev/sdb + 3) Do not map disk. + #? +-``` ++-------------------------- + + Now ReaR recover command stops as it detected that disk layout is not identical. + As our desired restore target is _/dev/sdb_ we choose right disk and continue + recovery. ReaR will ask to check created restore scripts, but this is not + needed in our scenario. +-``` ++-------------------------- + #? 2 + 2017-01-25 20:54:01 Disk /dev/sdb chosen as replacement for /dev/sda. + Disk /dev/sdb chosen as replacement for /dev/sda. +@@ -607,11 +607,11 @@ Skip installing GRUB Legacy boot loader because GRUB 2 is installed (grub-probe + Installing GRUB2 boot loader + Finished recovering your system. You can explore it under '/mnt/local'. + Saving /var/log/rear/rear-centosd.log as /var/log/rear/rear-centosd-recover-base_os.log +-``` ++-------------------------- + + Now we have Linux part restored, GRUB installed and partitions created, hence + we can continue with Windows 10 boot partition recovery. +-``` ++-------------------------- + RESCUE centosd:~ # rear -C windows_boot restoreonly + Restore windows_boot.nc.img to device: [/dev/sda1] /dev/sdb1 + Ntfsclone image version: 10.1 +@@ -622,12 +622,12 @@ Space in use : 338 MB (64.4%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + Syncing ... +-``` ++-------------------------- + + Similarly to Linux restore, we were prompted for restore destination, which + is /dev/sdb1 in our case. + + As the last step we will recover Windows 10 data partition +-``` ++-------------------------- + RESCUE centosd:~ # rear -C windows_data restoreonly + Restore windows_data.nc.img to device: [/dev/sda2] /dev/sdb2 + Ntfsclone image version: 10.1 +@@ -638,7 +638,7 @@ Space in use : 9867 MB (54.5%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + Syncing ... +-``` ++-------------------------- + + Again after restoreonly command is launched, ReaR prompts for restore + destination. + +@@ -662,25 +662,25 @@ The _BLOCKCLONE_TRY_UNMOUNT_ is important here: it will attempt to unmount the + run the risk that the data may be inconsistent. + + Global options +-``` ++-------------------------- + # cat site.conf + OUTPUT=ISO + KEEP_OLD_OUTPUT_COPY=1 + BACKUP_URL="nfs:///Stations_bkup/rear/" +-``` ++-------------------------- + + Options used for the base OS backup: +-``` ++-------------------------- + # cat base_system.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_EXCLUDE+=( '/products/*' ) + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP=NETFS +-``` ++-------------------------- + + Options used to take the encrypted filesystem image: +-``` ++-------------------------- + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP=BLOCKCLONE +@@ -694,18 +694,18 @@ BLOCKCLONE_SOURCE_DEV="/dev/vg00/lvol4" + + BLOCKCLONE_ALLOW_MOUNTED="yes" + BLOCKCLONE_TRY_UNMOUNT="yes" +-``` ++-------------------------- + + ==== Running backup + Base OS backup: +-``` ++-------------------------- + # rear -C base_system mkbackup +-``` ++-------------------------- + + Create image of encrypted filesystem: +-``` ++-------------------------- + # rear -C products_backup mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + First recover the base OS. This will create all the partitions needed, including +@@ -713,7 +713,7 @@ First recover the base OS. This will create all the partitions needed, including + As illustrated below, you will be prompted to chose a new encryption passphrase. + Please provide one, but you need not care about its value as it will get overwritten + during the next phase: +-``` ++-------------------------- + RESCUE pc-pan:~ # rear -C base_system.conf recover + [...] + Please enter the password for LUKS device cr_vg00-lvol4 (/dev/mapper/vg00-lvol4): +@@ -724,7 +724,7 @@ Creating filesystem of type xfs with mount point /products on /dev/mapper/cr_vg0 + Mounting filesystem /products + Disk layout created. + [...] +-``` ++-------------------------- + + Now we can proceed and restore the encrypted filesystem image. The target filesystem + will have been mounted by ReaR during the previous phase, but this will be +@@ -732,12 +732,12 @@ Now we can proceed and restore the encrypted filesystem image. The target filesy + to "yes". + + As illustrated below, you will be prompted for the target block device to use. + Confirm by pressing Enter or type in another value: +-``` ++-------------------------- + RESCUE pc-pan:~ # rear -C products_backup.conf restoreonly + [...] + Restore backup-products_backup.dd.img to device: [/dev/vg00/lvol4] + [...] +-``` ++-------------------------- + + Please note that the target device will not be re-mounted by the script at the end + of the restore phase. If needed, this should be done manually. + +diff --git a/doc/user-guide/16-Rubrik-CDM.adoc b/doc/user-guide/16-Rubrik-CDM.adoc +index 41f37d20..3ac23b7b 100644 +--- a/doc/user-guide/16-Rubrik-CDM.adoc ++++ b/doc/user-guide/16-Rubrik-CDM.adoc +@@ -84,7 +84,7 @@ To make CentOS v8.0 work the following line was needed: + == Test Matrix + + .Test Matrix +-[%header,format=csv] ++[options="header",format="csv"] + |=== + Operating System,DHCP,Static IP,Virtual,Physical,LVM Root Disk,Plain Root Disk,EXT3,EXT4,XFS,BTRFS,Original Cluster,Replication Cluster + CentOS 7.3,,pass,Pass,,Pass,,,,Pass,,Pass, diff --git a/SOURCES/rear-bz1631183.patch b/SOURCES/rear-bz1631183.patch deleted file mode 100644 index 7c0ad0f..0000000 --- a/SOURCES/rear-bz1631183.patch +++ /dev/null @@ -1,275 +0,0 @@ -diff --git a/usr/share/rear/backup/NETFS/default/500_make_backup.sh b/usr/share/rear/backup/NETFS/default/500_make_backup.sh -index 47266910..7170cda6 100644 ---- a/usr/share/rear/backup/NETFS/default/500_make_backup.sh -+++ b/usr/share/rear/backup/NETFS/default/500_make_backup.sh -@@ -67,49 +67,99 @@ else - SPLIT_COMMAND="dd of=$backuparchive" - fi - -+# Used by "tar" method to record which pipe command failed -+FAILING_BACKUP_PROG_FILE="$TMP_DIR/failing_backup_prog" -+FAILING_BACKUP_PROG_RC_FILE="$TMP_DIR/failing_backup_prog_rc" -+ - LogPrint "Creating $BACKUP_PROG archive '$backuparchive'" - ProgressStart "Preparing archive operation" - ( - case "$(basename ${BACKUP_PROG})" in -- # tar compatible programs here -- (tar) -- set_tar_features -- Log $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \ -- --no-wildcards-match-slash --one-file-system \ -- --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \ -- $BACKUP_PROG_CREATE_NEWER_OPTIONS \ -- ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -- -X $TMP_DIR/backup-exclude.txt -C / -c -f - \ -- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE \| $BACKUP_PROG_CRYPT_OPTIONS BACKUP_PROG_CRYPT_KEY \| $SPLIT_COMMAND -- $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \ -- --no-wildcards-match-slash --one-file-system \ -- --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \ -- $BACKUP_PROG_CREATE_NEWER_OPTIONS \ -- ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -- -X $TMP_DIR/backup-exclude.txt -C / -c -f - \ -- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE | $BACKUP_PROG_CRYPT_OPTIONS $BACKUP_PROG_CRYPT_KEY | $SPLIT_COMMAND -- ;; -- (rsync) -- # make sure that the target is a directory -- mkdir -p $v "$backuparchive" >&2 -- Log $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \ -- --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \ -- $(cat $TMP_DIR/backup-include.txt) "$backuparchive" -- $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \ -- --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \ -- $(cat $TMP_DIR/backup-include.txt) "$backuparchive" >&2 -- ;; -- (*) -- Log "Using unsupported backup program '$BACKUP_PROG'" -- Log $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -- $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \ -- "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \ -- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive -- $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -- $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \ -- "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \ -- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive -- ;; -+ # tar compatible programs here -+ (tar) -+ set_tar_features -+ Log $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \ -+ --no-wildcards-match-slash --one-file-system \ -+ --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \ -+ $BACKUP_PROG_CREATE_NEWER_OPTIONS \ -+ ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -+ -X $TMP_DIR/backup-exclude.txt -C / -c -f - \ -+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE \| $BACKUP_PROG_CRYPT_OPTIONS BACKUP_PROG_CRYPT_KEY \| $SPLIT_COMMAND -+ -+ # Variable used to record the short name of piped commands in case of -+ # error, e.g. ( "tar" "cat" "dd" ) in case of unencrypted and unsplit backup. -+ backup_prog_shortnames=( -+ "$(basename $(echo "$BACKUP_PROG" | awk '{ print $1 }'))" -+ "$(basename $(echo "$BACKUP_PROG_CRYPT_OPTIONS" | awk '{ print $1 }'))" -+ "$(basename $(echo "$SPLIT_COMMAND" | awk '{ print $1 }'))" -+ ) -+ for index in ${!backup_prog_shortnames[@]} ; do -+ [ -n "${backup_prog_shortnames[$index]}" ] || BugError "No computed shortname for pipe component $index" -+ done -+ -+ $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \ -+ --no-wildcards-match-slash --one-file-system \ -+ --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \ -+ $BACKUP_PROG_CREATE_NEWER_OPTIONS \ -+ ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} \ -+ "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -+ -X $TMP_DIR/backup-exclude.txt -C / -c -f - \ -+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE | \ -+ \ -+ $BACKUP_PROG_CRYPT_OPTIONS $BACKUP_PROG_CRYPT_KEY | \ -+ \ -+ $SPLIT_COMMAND -+ pipes_rc=( ${PIPESTATUS[@]} ) -+ -+ # Exit code logic: -+ # - never return rc=1 (this is reserved for "tar" warning about modified files) -+ # - process exit code in pipe's reverse order -+ # - if last command failed (e.g. "dd"), return an error -+ # - otherwise if previous command failed (e.g. "encrypt"), return an error -+ # ... -+ # - otherwise return "tar" exit code -+ # -+ # When an error occurs, record the program name in $FAILING_BACKUP_PROG_FILE -+ # and real exit code in $FAILING_BACKUP_PROG_RC_FILE. -+ -+ let index=${#pipes_rc[@]}-1 -+ while [ $index -ge 0 ] ; do -+ rc=${pipes_rc[$index]} -+ if [ $rc -ne 0 ] ; then -+ echo "${backup_prog_shortnames[$index]}" > $FAILING_BACKUP_PROG_FILE -+ echo "$rc" > $FAILING_BACKUP_PROG_RC_FILE -+ if [ $rc -eq 1 ] && [ "${backup_prog_shortnames[$index]}" != "tar" ] ; then -+ rc=2 -+ fi -+ exit $rc -+ fi -+ # This pipe command succeeded, check the previous one -+ let index-- -+ done -+ # This was a success -+ exit 0 -+ ;; -+ (rsync) -+ # make sure that the target is a directory -+ mkdir -p $v "$backuparchive" >&2 -+ Log $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \ -+ --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \ -+ $(cat $TMP_DIR/backup-include.txt) "$backuparchive" -+ $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \ -+ --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \ -+ $(cat $TMP_DIR/backup-include.txt) "$backuparchive" >&2 -+ ;; -+ (*) -+ Log "Using unsupported backup program '$BACKUP_PROG'" -+ Log $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -+ $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \ -+ "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \ -+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive -+ $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \ -+ $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \ -+ "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \ -+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive -+ ;; - esac 2> "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" - # important trick: the backup prog is the last in each case entry and the case .. esac is the last command - # in the (..) subshell. As a result the return code of the subshell is the return code of the backup prog! -@@ -121,44 +171,44 @@ sleep 1 # Give the backup software a good chance to start working - - # return disk usage in bytes - function get_disk_used() { -- let "$(stat -f -c 'used=(%b-%f)*%S' $1)" -- echo $used -+ let "$(stat -f -c 'used=(%b-%f)*%S' $1)" -+ echo $used - } - - # While the backup runs in a sub-process, display some progress information to the user. - # ProgressInfo texts have a space at the end to get the 'OK' from ProgressStop shown separated. - test "$PROGRESS_WAIT_SECONDS" || PROGRESS_WAIT_SECONDS=1 - case "$( basename $BACKUP_PROG )" in -- (tar) -- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -- #blocks="$(stat -c %b ${backuparchive})" -- #size="$((blocks*512))" -- size="$(stat -c %s ${backuparchive}* | awk '{s+=$1} END {print s}')" -- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -- done -- ;; -- (rsync) -- # since we do not want to do a $(du -s) run every second we count disk usage instead -- # this obviously leads to wrong results in case something else is writing to the same -- # disk at the same time as is very likely with a networked file system. For local disks -- # this should be good enough and in any case this is only some eye candy. -- # TODO: Find a fast way to count the actual transfer data, preferrable getting the info from rsync. -- let old_disk_used="$(get_disk_used "$backuparchive")" -- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -- let disk_used="$(get_disk_used "$backuparchive")" size=disk_used-old_disk_used -- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -- done -- ;; -- (*) -- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -- size="$(stat -c "%s" "$backuparchive")" || { -- kill -9 $BackupPID -- ProgressError -- Error "$(basename $BACKUP_PROG) failed to create the archive file" -- } -- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -- done -- ;; -+ (tar) -+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -+ #blocks="$(stat -c %b ${backuparchive})" -+ #size="$((blocks*512))" -+ size="$(stat -c %s ${backuparchive}* | awk '{s+=$1} END {print s}')" -+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -+ done -+ ;; -+ (rsync) -+ # since we do not want to do a $(du -s) run every second we count disk usage instead -+ # this obviously leads to wrong results in case something else is writing to the same -+ # disk at the same time as is very likely with a networked file system. For local disks -+ # this should be good enough and in any case this is only some eye candy. -+ # TODO: Find a fast way to count the actual transfer data, preferrable getting the info from rsync. -+ let old_disk_used="$(get_disk_used "$backuparchive")" -+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -+ let disk_used="$(get_disk_used "$backuparchive")" size=disk_used-old_disk_used -+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -+ done -+ ;; -+ (*) -+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do -+ size="$(stat -c "%s" "$backuparchive")" || { -+ kill -9 $BackupPID -+ ProgressError -+ Error "$(basename $BACKUP_PROG) failed to create the archive file" -+ } -+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] " -+ done -+ ;; - esac - ProgressStop - transfertime="$((SECONDS-starttime))" -@@ -177,10 +227,12 @@ sleep 1 - # everyone should see this warning, even if not verbose - case "$(basename $BACKUP_PROG)" in - (tar) -- if (( $backup_prog_rc == 1 )); then -- LogPrint "WARNING: $(basename $BACKUP_PROG) ended with return code $backup_prog_rc and below output: -+ if (( $backup_prog_rc != 0 )); then -+ prog="$(cat $FAILING_BACKUP_PROG_FILE)" -+ if (( $backup_prog_rc == 1 )); then -+ LogUserOutput "WARNING: $prog ended with return code 1 and below output: - ---snip--- --$(grep '^tar: ' $RUNTIME_LOGFILE | sed -e 's/^/ /' | tail -n3) -+$(grep '^tar: ' "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" | sed -e 's/^/ /' | tail -n3) - ---------- - This means that files have been modified during the archiving - process. As a result the backup may not be completely consistent -@@ -188,16 +240,19 @@ or may not be a perfect copy of the system. Relax-and-Recover - will continue, however it is highly advisable to verify the - backup in order to be sure to safely recover this system. - " -- elif (( $backup_prog_rc > 1 )); then -- Error "$(basename $BACKUP_PROG) failed with return code $backup_prog_rc and below output: -+ else -+ rc=$(cat $FAILING_BACKUP_PROG_RC_FILE) -+ Error "$prog failed with return code $rc and below output: - ---snip--- --$(grep '^tar: ' $RUNTIME_LOGFILE | sed -e 's/^/ /' | tail -n3) -+$(grep "^$prog: " "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" | sed -e 's/^/ /' | tail -n3) - ---------- - This means that the archiving process ended prematurely, or did - not even start. As a result it is unlikely you can recover this - system properly. Relax-and-Recover is therefore aborting execution. - " -- fi;; -+ fi -+ fi -+ ;; - (*) - if (( $backup_prog_rc > 0 )) ; then - Error "$(basename $BACKUP_PROG) failed with return code $backup_prog_rc -@@ -212,10 +267,12 @@ esac - - tar_message="$(tac $RUNTIME_LOGFILE | grep -m1 '^Total bytes written: ')" - if [ $backup_prog_rc -eq 0 -a "$tar_message" ] ; then -- LogPrint "$tar_message in $transfertime seconds." -+ LogPrint "$tar_message in $transfertime seconds." - elif [ "$size" ]; then -- LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" -+ LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" - fi - - ### Copy progress log to backup media - cp $v "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" "${opath}/${BACKUP_PROG_ARCHIVE}.log" >&2 -+ -+# vim: set et ts=4 sw=4: diff --git a/SOURCES/rear-bz1639705.patch b/SOURCES/rear-bz1639705.patch deleted file mode 100644 index 88b6358..0000000 --- a/SOURCES/rear-bz1639705.patch +++ /dev/null @@ -1,1487 +0,0 @@ -diff --git a/tests/setup1/README b/tests/setup1/README -index 75bc000b..1cc4faa8 100644 ---- a/tests/setup1/README -+++ b/tests/setup1/README -@@ -3,7 +3,7 @@ - +----------------------------------+ - - --You need a VM with 10 network interfaces (eth0 being the main interface). -+You need a VM with 13 network interfaces (eth0 being the main interface). - All interfaces except eth0 can be non-reachable. - - kvm.xml is an example template of such VM. -@@ -12,10 +12,11 @@ kvm.xml is an example template of such VM. - eth0 IP 192.168.122.x/24 default 192.168.122.1 (x=177) - bond12 eth1 + eth2 mode=4 miimon=1000 IP 1.1.1.1 - bond34 eth3 + eth4 mode=1 miimon=100 IP 2.2.2.2 route 102.0.0.0/8 --team56 eth5 + eth6 IP 3.3.3.3 route 103.0.0.0/8 -+team56 eth5 + eth6 lacp IP 3.3.3.3 route 103.0.0.0/8 - bridge78 eth7 + eth8 IP 4.4.4.4 route 104.0.0.0/8 - vlan1eth9 eth9.1 IP 5.5.5.5 route 105.0.0.0/8 - eth10 IP 6.6.6.6 route 106.0.0.0/8 -+team1112 eth11 + eth12 activebackup IP 7.7.7.7 route 107.0.0.0/8 - - routes: - default via 192.168.122.1 dev eth0 -@@ -24,23 +25,24 @@ default via 192.168.122.1 dev eth0 - 104.0.0.0/8 via 4.4.4.254 dev bridge78 - 105.0.0.0/8 via 5.5.5.254 dev eth9.1 - 106.0.0.0/8 via 6.6.6.254 dev eth10 -+107.0.0.0/8 via 7.7.7.254 dev team1112 - - - Running the unit tests: - ---------------------- - --After copying the files and making sure the script has been adapted, -+After copying the network configuration files and rebooting the system, - - from the VM, run the following command: - --# for i in $(seq 1 8); do ./tcase$i.sh; done -+# for i in $(seq 1 8); do ./tcase${i}.sh; done - - This will record the generated network+route files. - - Then to verify result, run the following command (this will take down the - network, except eth0): - --# for i in $(seq 1 8); do ./verify.sh tcase$i_results; done -+# for i in $(seq 1 8); do ./verify.sh tcase${i}_results; done - - - ------------------------------------------------------------------------------- -@@ -51,92 +53,98 @@ Test Case #1: standard - - Expected results: - --team56 -> eth5 - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev bond34 --103.0.0.0/8 via 3.3.3.254 dev eth5 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev bridge78 - 105.0.0.0/8 via 5.5.5.254 dev eth9.1 - 106.0.0.0/8 via 6.6.6.254 dev eth10 -+107.0.0.0/8 via 7.7.7.254 dev team1112 - - - Test Case #2: simplification - ------------ - --SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y -+SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y SIMPLIFY_TEAMING=y - - Expected results: - --bond12 -> eth1 -+bond12 -> bond12 (not simplified because mode=4) - bond34 -> eth3 --team56 -> eth5 -+team56 -> team56 (not simplified because runner=lacp) - bridge78 -> eth7 -+team1112 -> eth11 - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev eth3 --103.0.0.0/8 via 3.3.3.254 dev eth5 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev eth7 - 105.0.0.0/8 via 5.5.5.254 dev eth9.1 - 106.0.0.0/8 via 6.6.6.254 dev eth10 -+107.0.0.0/8 via 7.7.7.254 dev eth11 - - --Test Case #3: standard with eth2, eth4, eth6, eth8, eth10 DOWN -+Test Case #3: standard with eth2, eth4, eth6, eth8, eth10, eth12 DOWN - ------------ - --for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifdown $eth; done - - Expected results: - - eth2, eth4, eth6, eth8, eth10 not in file - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev bond34 --103.0.0.0/8 via 3.3.3.254 dev eth5 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev bridge78 - 105.0.0.0/8 via 5.5.5.254 dev eth9.1 -+107.0.0.0/8 via 7.7.7.254 dev team1112 - - --Test Case #4: simplification with eth2, eth4, eth6, eth8, eth10 DOWN -+Test Case #4: simplification with eth2, eth4, eth6, eth8, eth10, eth12 DOWN - ------------ - --for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifdown $eth; done - - Expected results: - --eth2, eth4, eth6, eth8, eth10 not in file -+eth4, eth6, eth8, eth10, eth12 not in file - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev eth3 --103.0.0.0/8 via 3.3.3.254 dev eth5 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev eth7 - 105.0.0.0/8 via 5.5.5.254 dev eth9.1 -+107.0.0.0/8 via 7.7.7.254 dev eth11 - - --Test Case #5: standard with eth1, eth3, eth5, eth7, eth9 DOWN -+Test Case #5: standard with eth1, eth3, eth5, eth7, eth9, eth11 DOWN - ------------ - --for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done -+for eth in eth1 eth3 eth5 eth7 eth9 eth11; do ifdown $eth; done - - Expected results: - - eth2, eth4, eth6, eth8, eth10 not in file - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev bond34 --103.0.0.0/8 via 3.3.3.254 dev eth6 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev bridge78 - 106.0.0.0/8 via 6.6.6.254 dev eth10 -+107.0.0.0/8 via 7.7.7.254 dev team1112 - - --Test Case #6: simplification with eth1, eth3, eth5, eth7, eth9 DOWN -+Test Case #6: simplification with eth1, eth3, eth5, eth7, eth9, eth11 DOWN - ------------ - --for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done -+for eth in eth1 eth3 eth5 eth7 eth9 eth11; do ifdown $eth; done - - Expected results: - --eth2, eth4, eth6, eth8, eth10 not in file -+eth4, eth6, eth8, eth10 not in file - default via 192.168.122.1 dev eth0 - 102.0.0.0/8 via 2.2.2.254 dev eth4 --103.0.0.0/8 via 3.3.3.254 dev eth6 -+103.0.0.0/8 via 3.3.3.254 dev team56 - 104.0.0.0/8 via 4.4.4.254 dev eth8 - 106.0.0.0/8 via 6.6.6.254 dev eth10 -+107.0.0.0/8 via 7.7.7.254 dev eth12 - - - Test Case #7: IP address mapping -@@ -149,6 +157,7 @@ bond34 2.2.2.100/16 - bridge78 4.4.4.100/24 - #eth9.1 5.5.5.100/24 - eth10 6.6.6.100/16 -+team1112 7.7.7.100/16 - - Expected results: - -@@ -156,14 +165,15 @@ bond12 -> 1.1.1.100/16 - bond34 -> 2.2.2.100/16 - eth5 -> 3.3.3.3/16 (no mapping) - bridge78 -> 4.4.4.100/24 --eth9.1 => 5.5.5.5/24 (no mapping) --eth10 => 6.6.6.100/16 -+eth9.1 -> 5.5.5.5/24 (no mapping) -+eth10 -> 6.6.6.100/16 -+team1112 -> 7.7.7.100/16 - - - Test Case #8: IP address mapping & simplification - ------------ - --SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y -+SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y SIMPLIFY_TEAMING=y - - # cat mappings/ip_addresses - bond12 1.1.1.100/16 -@@ -172,12 +182,14 @@ bond34 2.2.2.100/16 - bridge78 4.4.4.100/24 - #eth9.1 5.5.5.100/24 - eth10 6.6.6.100/16 -+team1112 7.7.7.100/16 - - Expected results: - --eth1 => 1.1.1.100/16 --eth3 => 2.2.2.100/16 --eth5 => 3.3.3.3/16 (no mapping) --eth7 => 4.4.4.100/24 --eth9.1 => 5.5.5.5/24 (no mapping) --eth10 => 6.6.6.100/16 -+bond12 -> 1.1.1.100/16 -+eth3 -> 2.2.2.100/16 -+eth5 -> 3.3.3.3/16 (no mapping) -+eth7 -> 4.4.4.100/24 -+eth9.1 -> 5.5.5.5/24 (no mapping) -+eth10 -> 6.6.6.100/16 -+eth11 -> 7.7.7.100/16 -diff --git a/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth11 b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth11 -new file mode 100644 -index 00000000..f8709b3e ---- /dev/null -+++ b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth11 -@@ -0,0 +1,6 @@ -+NAME="eth11" -+DEVICE="eth11" -+ONBOOT="yes" -+TEAM_MASTER="team1112" -+DEVICETYPE="TeamPort" -+TYPE="Ethernet" -diff --git a/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth12 b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth12 -new file mode 100644 -index 00000000..a5d62722 ---- /dev/null -+++ b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-eth12 -@@ -0,0 +1,6 @@ -+NAME="eth12" -+DEVICE="eth12" -+ONBOOT="yes" -+TEAM_MASTER="team1112" -+DEVICETYPE="TeamPort" -+TYPE="Ethernet" -diff --git a/tests/setup1/etc/sysconfig/network-scripts/ifcfg-team1112 b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-team1112 -new file mode 100644 -index 00000000..fd48fb47 ---- /dev/null -+++ b/tests/setup1/etc/sysconfig/network-scripts/ifcfg-team1112 -@@ -0,0 +1,9 @@ -+DEVICE="team1112" -+TEAM_CONFIG='{"device":"team1112","runner":{"name":"activebackup"},"link_watch":{"name":"ethtool"},"ports":{"eth11":{"prio":-10,"sticky":true},"eth12":{"prio":100}}}' -+PROXY_METHOD="none" -+BROWSER_ONLY="no" -+NAME="team1112" -+ONBOOT="yes" -+DEVICETYPE="Team" -+IPADDR=7.7.7.7 -+PREFIX=16 -diff --git a/tests/setup1/etc/sysconfig/network-scripts/route-team1112 b/tests/setup1/etc/sysconfig/network-scripts/route-team1112 -new file mode 100644 -index 00000000..d3e82f33 ---- /dev/null -+++ b/tests/setup1/etc/sysconfig/network-scripts/route-team1112 -@@ -0,0 +1 @@ -+107.0.0.0/8 via 7.7.7.254 -diff --git a/tests/setup1/kvm.xml b/tests/setup1/kvm.xml -index 3e7e84aa..2e7d10cf 100644 ---- a/tests/setup1/kvm.xml -+++ b/tests/setup1/kvm.xml -@@ -106,6 +106,14 @@ - -
- -+ -+ -+
-+ -+ -+ -+
-+ - - - -diff --git a/tests/setup1/mappings/ip_addresses b/tests/setup1/mappings/ip_addresses -index 145300b9..102fdfcf 100644 ---- a/tests/setup1/mappings/ip_addresses -+++ b/tests/setup1/mappings/ip_addresses -@@ -4,3 +4,4 @@ bond34 2.2.2.100/16 - bridge78 4.4.4.100/24 - #eth9.1 5.5.5.100/24 - eth10 6.6.6.100/16 -+team1112 7.7.7.100/16 -diff --git a/tests/setup1/run.sh b/tests/setup1/run.sh -index 8dbc436b..3de8a5ba 100644 ---- a/tests/setup1/run.sh -+++ b/tests/setup1/run.sh -@@ -4,9 +4,9 @@ echo - echo "$0" - echo - --REAR_DIR="/path/to/rear/sources" -+REAR_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/../..")" - --RESULT_DIR="/root/$(basename $0 .sh)_results" -+RESULT_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")/$(basename $0 .sh)_results" - mkdir -p $RESULT_DIR - - function DebugPrint () { -@@ -41,9 +41,10 @@ function has_binary () { - which $1 >/dev/null 2>&1 - } - --TMP_DIR=/root/tmp -+TMP_DIR="/tmp/$(basename "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")" - --rm -fr $TMP_DIR -+rm -fr $TMP_DIR >/dev/null 2>&1 -+mkdir -p $TMP_DIR - - # Add to sed -e below to test "ip_link_supports_bridge='false'" (RHEL6) - # -e "s#\$ip_link_supports_bridge#'false'#" \ -@@ -53,13 +54,13 @@ rm -fr $TMP_DIR - - # Add to sed -e below to have code using 'brctl' instead of 'ip link' (RHEL6) - # -e "s#\$net_devices_have_lower_links#'false'#" \ --sed -e "s#^network_devices_setup_script=.*#network_devices_setup_script=/tmp/60-network-devices.sh#" \ -- $REAR_DIR/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh > /tmp/310_network_devices.sh --sed "s#^netscript=.*#netscript=/tmp/62-routing.sh#" $REAR_DIR/usr/share/rear/rescue/GNU/Linux/350_routing.sh > /tmp/350_routing.sh -+sed -e "s#^network_devices_setup_script=.*#network_devices_setup_script=$TMP_DIR/60-network-devices.sh#" \ -+ $REAR_DIR/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh > $TMP_DIR/310_network_devices.sh -+sed "s#^network_routing_setup_script=.*#network_routing_setup_script=$TMP_DIR/62-routing.sh#" $REAR_DIR/usr/share/rear/rescue/GNU/Linux/350_routing.sh > $TMP_DIR/350_routing.sh - --. /tmp/310_network_devices.sh --. /tmp/350_routing.sh -+. $TMP_DIR/310_network_devices.sh -+. $TMP_DIR/350_routing.sh - --for f in /tmp/60-network-devices.sh /tmp/62-routing.sh; do -+for f in $TMP_DIR/60-network-devices.sh $TMP_DIR/62-routing.sh; do - grep -v "dev eth0" $f > $RESULT_DIR/$(basename $f) - done -diff --git a/tests/setup1/tcase1.sh b/tests/setup1/tcase1.sh -index 597f3f5c..7b5032c7 100755 ---- a/tests/setup1/tcase1.sh -+++ b/tests/setup1/tcase1.sh -@@ -1,6 +1,5 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup1/tcase1_results/ip_a.expected b/tests/setup1/tcase1_results/ip_a.expected -index e039f6f9..415d840b 100644 ---- a/tests/setup1/tcase1_results/ip_a.expected -+++ b/tests/setup1/tcase1_results/ip_a.expected -@@ -2,14 +2,15 @@ eth1: mtu 1500 qdisc pfifo_fast master b - eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.6/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 -+eth12: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 - bond12: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 1.1.1.1/16 scope global bond12 - bond34: mtu 1500 qdisc noqueue state UP qlen 1000 -@@ -18,3 +19,7 @@ bridge78: mtu 1500 qdisc noqueue state UP qlen - inet 4.4.4.4/24 scope global bridge78 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team1112: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 7.7.7.7/16 scope global team1112 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase1_results/ip_r.expected b/tests/setup1/tcase1_results/ip_r.expected -index 0e5ee87c..f246714c 100644 ---- a/tests/setup1/tcase1_results/ip_r.expected -+++ b/tests/setup1/tcase1_results/ip_r.expected -@@ -3,9 +3,11 @@ - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 - 6.6.0.0/16 proto kernel scope link src 6.6.6.6 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase2.sh b/tests/setup1/tcase2.sh -index faa39396..65d1aa34 100755 ---- a/tests/setup1/tcase2.sh -+++ b/tests/setup1/tcase2.sh -@@ -1,9 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup1/tcase2_results/ip_a.expected b/tests/setup1/tcase2_results/ip_a.expected -index 51280843..21c5e686 100644 ---- a/tests/setup1/tcase2_results/ip_a.expected -+++ b/tests/setup1/tcase2_results/ip_a.expected -@@ -1,17 +1,22 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth1 --eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 2.2.2.2/16 scope global eth3 - eth4: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 4.4.4.4/24 scope global eth7 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.6/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+ inet 7.7.7.7/16 scope global eth11 -+eth12: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase2_results/ip_r.expected b/tests/setup1/tcase2_results/ip_r.expected -index 0e5ee87c..f246714c 100644 ---- a/tests/setup1/tcase2_results/ip_r.expected -+++ b/tests/setup1/tcase2_results/ip_r.expected -@@ -3,9 +3,11 @@ - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 - 6.6.0.0/16 proto kernel scope link src 6.6.6.6 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase3.sh b/tests/setup1/tcase3.sh -index 45ce04ce..7ab38f3b 100755 ---- a/tests/setup1/tcase3.sh -+++ b/tests/setup1/tcase3.sh -@@ -1,10 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - --for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - --for eth in eth2 eth4 eth6 eth8 eth10; do ifup $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifup $eth; done -diff --git a/tests/setup1/tcase3_results/ip_a.expected b/tests/setup1/tcase3_results/ip_a.expected -index 9adf521d..6071fb27 100644 ---- a/tests/setup1/tcase3_results/ip_a.expected -+++ b/tests/setup1/tcase3_results/ip_a.expected -@@ -2,13 +2,14 @@ eth1: mtu 1500 qdisc pfifo_fast master b - eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth11: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 -+eth12: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - bond12: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 1.1.1.1/16 scope global bond12 - bond34: mtu 1500 qdisc noqueue state UP qlen 1000 -@@ -17,3 +18,7 @@ bridge78: mtu 1500 qdisc noqueue state UP qlen - inet 4.4.4.4/24 scope global bridge78 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -+team1112: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 7.7.7.7/16 scope global team1112 -diff --git a/tests/setup1/tcase3_results/ip_r.expected b/tests/setup1/tcase3_results/ip_r.expected -index 62f16947..223a7db1 100644 ---- a/tests/setup1/tcase3_results/ip_r.expected -+++ b/tests/setup1/tcase3_results/ip_r.expected -@@ -2,8 +2,10 @@ - 103.0.0.0/8 via 3.3.3.254 - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase4.sh b/tests/setup1/tcase4.sh -index d28d2606..ac22ea73 100755 ---- a/tests/setup1/tcase4.sh -+++ b/tests/setup1/tcase4.sh -@@ -1,13 +1,13 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - --for eth in eth2 eth4 eth6 eth8 eth10; do ifup $eth; done -+for eth in eth2 eth4 eth6 eth8 eth10 eth12; do ifup $eth; done -diff --git a/tests/setup1/tcase4_results/ip_a.expected b/tests/setup1/tcase4_results/ip_a.expected -index e12bd4d0..7112b5af 100644 ---- a/tests/setup1/tcase4_results/ip_a.expected -+++ b/tests/setup1/tcase4_results/ip_a.expected -@@ -1,16 +1,21 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth1 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 2.2.2.2/16 scope global eth3 - eth4: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 4.4.4.4/24 scope global eth7 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth11: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+ inet 7.7.7.7/16 scope global eth11 -+eth12: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase4_results/ip_r.expected b/tests/setup1/tcase4_results/ip_r.expected -index 62f16947..223a7db1 100644 ---- a/tests/setup1/tcase4_results/ip_r.expected -+++ b/tests/setup1/tcase4_results/ip_r.expected -@@ -2,8 +2,10 @@ - 103.0.0.0/8 via 3.3.3.254 - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase5.sh b/tests/setup1/tcase5.sh -index adb09bda..dc786a8a 100755 ---- a/tests/setup1/tcase5.sh -+++ b/tests/setup1/tcase5.sh -@@ -1,10 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - --for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done -+for eth in eth1 eth3 eth5 eth7 eth9 eth11; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - --for eth in eth1 eth3 eth5 eth7 eth9; do ifup $eth; done -+for eth in eth1 eth3 eth5 eth7 eth9 eth11; do ifup $eth; done -diff --git a/tests/setup1/tcase5_results/ip_a.expected b/tests/setup1/tcase5_results/ip_a.expected -index bd1152c8..e55915fc 100644 ---- a/tests/setup1/tcase5_results/ip_a.expected -+++ b/tests/setup1/tcase5_results/ip_a.expected -@@ -3,16 +3,21 @@ eth2: mtu 1500 qdisc pfifo_fast master b - eth3: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth5: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth6: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth6 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.6/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth12: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 - bond12: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 1.1.1.1/16 scope global bond12 - bond34: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 2.2.2.2/16 scope global bond34 - bridge78: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 4.4.4.4/24 scope global bridge78 -+team1112: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 7.7.7.7/16 scope global team1112 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase5_results/ip_r.expected b/tests/setup1/tcase5_results/ip_r.expected -index 87a8f9fe..50638c8a 100644 ---- a/tests/setup1/tcase5_results/ip_r.expected -+++ b/tests/setup1/tcase5_results/ip_r.expected -@@ -2,8 +2,10 @@ - 103.0.0.0/8 via 3.3.3.254 - 104.0.0.0/8 via 4.4.4.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 6.6.0.0/16 proto kernel scope link src 6.6.6.6 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase6.sh b/tests/setup1/tcase6.sh -index 0f11c0ae..698b88c3 100755 ---- a/tests/setup1/tcase6.sh -+++ b/tests/setup1/tcase6.sh -@@ -1,13 +1,13 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done -+for eth in eth1 eth3 eth5 eth7 eth9 eth11; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - --for eth in eth1 eth3 eth5 eth7 vlan1eth9; do ifup $eth; done -+for eth in eth1 eth3 eth5 eth7 vlan1eth9 eth11; do ifup $eth; done -diff --git a/tests/setup1/tcase6_results/ip_a.expected b/tests/setup1/tcase6_results/ip_a.expected -index c04e1e12..4f086280 100644 ---- a/tests/setup1/tcase6_results/ip_a.expected -+++ b/tests/setup1/tcase6_results/ip_a.expected -@@ -1,15 +1,20 @@ -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12 - eth1: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth2: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth2 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 2.2.2.2/16 scope global eth4 - eth5: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth6: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth6 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 4.4.4.4/24 scope global eth8 - eth9: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.6/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth12: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+ inet 7.7.7.7/16 scope global eth12 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase6_results/ip_r.expected b/tests/setup1/tcase6_results/ip_r.expected -index 87a8f9fe..50638c8a 100644 ---- a/tests/setup1/tcase6_results/ip_r.expected -+++ b/tests/setup1/tcase6_results/ip_r.expected -@@ -2,8 +2,10 @@ - 103.0.0.0/8 via 3.3.3.254 - 104.0.0.0/8 via 4.4.4.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.1 - 2.2.0.0/16 proto kernel scope link src 2.2.2.2 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.4 - 6.6.0.0/16 proto kernel scope link src 6.6.6.6 -+7.7.0.0/16 proto kernel scope link src 7.7.7.7 -diff --git a/tests/setup1/tcase7.sh b/tests/setup1/tcase7.sh -index 57b3cdff..7dc07f63 100755 ---- a/tests/setup1/tcase7.sh -+++ b/tests/setup1/tcase7.sh -@@ -1,5 +1,5 @@ - #!/bin/bash - --CONFIG_DIR=/root -+CONFIG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup1/tcase7_results/ip_a.expected b/tests/setup1/tcase7_results/ip_a.expected -index c6ace88f..dba7dab6 100644 ---- a/tests/setup1/tcase7_results/ip_a.expected -+++ b/tests/setup1/tcase7_results/ip_a.expected -@@ -2,14 +2,15 @@ eth1: mtu 1500 qdisc pfifo_fast master b - eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast master bridge78 state UP qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.100/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 -+eth12: mtu 1500 qdisc pfifo_fast master team1112 state UP qlen 1000 - bond12: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 1.1.1.100/16 scope global bond12 - bond34: mtu 1500 qdisc noqueue state UP qlen 1000 -@@ -18,3 +19,7 @@ bridge78: mtu 1500 qdisc noqueue state UP qlen - inet 4.4.4.100/24 scope global bridge78 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team1112: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 7.7.7.100/16 scope global team1112 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase7_results/ip_r.expected b/tests/setup1/tcase7_results/ip_r.expected -index 2481d7f3..a89163a6 100644 ---- a/tests/setup1/tcase7_results/ip_r.expected -+++ b/tests/setup1/tcase7_results/ip_r.expected -@@ -3,9 +3,11 @@ - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.100 - 2.2.0.0/16 proto kernel scope link src 2.2.2.100 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.100 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 - 6.6.0.0/16 proto kernel scope link src 6.6.6.100 -+7.7.0.0/16 proto kernel scope link src 7.7.7.100 -diff --git a/tests/setup1/tcase8.sh b/tests/setup1/tcase8.sh -index c40c5e15..6982d005 100755 ---- a/tests/setup1/tcase8.sh -+++ b/tests/setup1/tcase8.sh -@@ -1,8 +1,9 @@ - #!/bin/bash - --CONFIG_DIR=/root -+CONFIG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup1/tcase8_results/ip_a.expected b/tests/setup1/tcase8_results/ip_a.expected -index 6801434d..fb0e2d1e 100644 ---- a/tests/setup1/tcase8_results/ip_a.expected -+++ b/tests/setup1/tcase8_results/ip_a.expected -@@ -1,17 +1,22 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 1.1.1.100/16 scope global eth1 --eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.100/16 scope global bond12 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 2.2.2.100/16 scope global eth3 - eth4: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team56 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 4.4.4.100/24 scope global eth7 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - inet 6.6.6.100/16 scope global eth10 -+eth11: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+ inet 7.7.7.100/16 scope global eth11 -+eth12: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth9.1@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 5.5.5.5/24 scope global eth9.1 -+team56: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team56 -diff --git a/tests/setup1/tcase8_results/ip_r.expected b/tests/setup1/tcase8_results/ip_r.expected -index 2481d7f3..a89163a6 100644 ---- a/tests/setup1/tcase8_results/ip_r.expected -+++ b/tests/setup1/tcase8_results/ip_r.expected -@@ -3,9 +3,11 @@ - 104.0.0.0/8 via 4.4.4.254 - 105.0.0.0/8 via 5.5.5.254 - 106.0.0.0/8 via 6.6.6.254 -+107.0.0.0/8 via 7.7.7.254 - 1.1.0.0/16 proto kernel scope link src 1.1.1.100 - 2.2.0.0/16 proto kernel scope link src 2.2.2.100 - 3.3.0.0/16 proto kernel scope link src 3.3.3.3 - 4.4.4.0/24 proto kernel scope link src 4.4.4.100 - 5.5.5.0/24 proto kernel scope link src 5.5.5.5 - 6.6.0.0/16 proto kernel scope link src 6.6.6.100 -+7.7.0.0/16 proto kernel scope link src 7.7.7.100 -diff --git a/tests/setup1/verify.sh b/tests/setup1/verify.sh -index a9fd9c6c..146d017b 100755 ---- a/tests/setup1/verify.sh -+++ b/tests/setup1/verify.sh -@@ -32,7 +32,7 @@ DEVICES="$( ls /sys/class/net/ | egrep -wv "(bonding_masters|eth0|lo)" )" - - for dev in $DEVICES; do - ip addr show dev $dev --done 2>/dev/null | egrep -w "(mtu|inet)" | sed "s/^[0-9]*: //" > $tmpfile_ipa -+done 2>/dev/null | egrep -w "(mtu|inet)" | sed -e "s/^[0-9]*: //" -e "s/ group \S* / /" > $tmpfile_ipa - - for dev in $DEVICES; do - ip r show dev $dev -diff --git a/tests/setup2/README b/tests/setup2/README -index 40731f83..34b123fb 100644 ---- a/tests/setup2/README -+++ b/tests/setup2/README -@@ -3,7 +3,7 @@ - +----------------------------------+ - - --You need a VM with 10 network interfaces (eth0 being the main interface). -+You need a VM with 11 network interfaces (eth0 being the main interface). - All interfaces except eth0 can be non-reachable. - - kvm.xml is an example template of such VM. -@@ -31,18 +31,18 @@ default via 192.168.122.1 dev eth0 - Running the unit tests: - ---------------------- - --After copying the files and making sure the script has been adapted, -+After copying the network configuration files and rebooting the system, - - from the VM, run the following command: - --# for i in $(seq 1 8); do ./tcase$i.sh; done -+# for i in $(seq 1 8); do ./tcase${i}.sh; done - - This will record the generated network+route files. - - Then to verify result, run the following command (this will take down the - network, except eth0): - --# for i in $(seq 1 8); do ./verify.sh tcase$i_results; done -+# for i in $(seq 1 8); do ./verify.sh tcase${i}_results; done - - - ------------------------------------------------------------------------------- -@@ -64,15 +64,15 @@ team89 -> eth8 - Test Case #2: simplification - ------------ - --SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y -+SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y SIMPLIFY_TEAMING=y - - Expected results: - --bond12 -> eth1 -+bond12 -> bond12 (not simplified, because mode=4) - bond34 -> eth3 - bridge2 -> eth3.2 - team567 -> eth5.5 --team89 -> eth8 -+team89 -> team89 (not simplified, because runner=lacp) - bridge3 -> eth10.3 - - -@@ -100,7 +100,6 @@ for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done - Expected results: - - eth2, eth4, eth6, eth8, eth10 not in file --bond12 -> eth1 - bond34 -> eth3 - bridge2 -> eth3.2 - team567 -> eth5.5 -@@ -136,7 +135,6 @@ for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done - Expected results: - - eth2, eth4, eth6, eth8, eth10 not in file --bond12 -> eth2 - bond34 -> eth4 - team567 -> eth6 - team89 -> eth8 -@@ -168,7 +166,7 @@ bridge3 -> 4.4.4.100/24 - Test Case #8: IP address mapping & simplification - ------------ - --SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y -+SIMPLIFY_BONDING=y SIMPLIFY_BRIDGE=y SIMPLIFY_TEAMING=y - - # cat mappings/ip_addresses - bond12.1 1.1.1.100/16 -diff --git a/tests/setup2/tcase1.sh b/tests/setup2/tcase1.sh -index 597f3f5c..7b5032c7 100755 ---- a/tests/setup2/tcase1.sh -+++ b/tests/setup2/tcase1.sh -@@ -1,6 +1,5 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup2/tcase1_results/ip_a.expected b/tests/setup2/tcase1_results/ip_a.expected -index 8e36b66c..23a2d8df 100644 ---- a/tests/setup2/tcase1_results/ip_a.expected -+++ b/tests/setup2/tcase1_results/ip_a.expected -@@ -14,10 +14,12 @@ eth2: mtu 1500 qdisc pfifo_fast master b - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth5.5@eth5: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5.5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth8: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth8.3@eth8: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 --eth9: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5.5@eth5: mtu 1500 qdisc noqueue master team567 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 -+eth7: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 -+eth8: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+eth9: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+team567: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team567 -+team89: mtu 1500 qdisc noqueue state UP qlen 1000 -+team89.3@team89: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -diff --git a/tests/setup2/tcase2.sh b/tests/setup2/tcase2.sh -index faa39396..65d1aa34 100755 ---- a/tests/setup2/tcase2.sh -+++ b/tests/setup2/tcase2.sh -@@ -1,9 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup2/tcase2_results/ip_a.expected b/tests/setup2/tcase2_results/ip_a.expected -index b2043ea2..097c6ed8 100644 ---- a/tests/setup2/tcase2_results/ip_a.expected -+++ b/tests/setup2/tcase2_results/ip_a.expected -@@ -1,10 +1,11 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+bond12.1@bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12.1 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10.3@eth10: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 4.4.4.4/24 scope global eth10.3 --eth1.1@eth1: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth1.1 --eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth3.2@eth3: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 2.2.2.2/24 scope global eth3.2 -diff --git a/tests/setup2/tcase3.sh b/tests/setup2/tcase3.sh -index d23a1198..3b792b42 100755 ---- a/tests/setup2/tcase3.sh -+++ b/tests/setup2/tcase3.sh -@@ -1,10 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - - for eth in eth2 eth4 eth6 eth8 vlan3eth10; do ifup $eth; done -diff --git a/tests/setup2/tcase3_results/ip_a.expected b/tests/setup2/tcase3_results/ip_a.expected -index 0b0c00ba..6cbcf572 100644 ---- a/tests/setup2/tcase3_results/ip_a.expected -+++ b/tests/setup2/tcase3_results/ip_a.expected -@@ -13,10 +13,12 @@ eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth5.5@eth5: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth5.5 -+eth5.5@eth5: mtu 1500 qdisc noqueue master team567 state UP qlen 1000 - eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth7: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth9.3@eth9: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -+eth9: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+team567: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team567 -+team89.3@team89: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -+team89: mtu 1500 qdisc noqueue state UP qlen 1000 -diff --git a/tests/setup2/tcase4.sh b/tests/setup2/tcase4.sh -index f66236ff..5d8cfdd5 100755 ---- a/tests/setup2/tcase4.sh -+++ b/tests/setup2/tcase4.sh -@@ -1,13 +1,13 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - - for eth in eth2 eth4 eth6 eth8 eth10; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - - for eth in eth2 eth4 eth6 eth8 vlan3eth10; do ifup $eth; done -diff --git a/tests/setup2/tcase4_results/ip_a.expected b/tests/setup2/tcase4_results/ip_a.expected -index 8171a018..6afa74e5 100644 ---- a/tests/setup2/tcase4_results/ip_a.expected -+++ b/tests/setup2/tcase4_results/ip_a.expected -@@ -1,7 +1,8 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+bond12.1@bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12.1 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth1.1@eth1: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth1.1 - eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth3.2@eth3: mtu 1500 qdisc noqueue state UP qlen 1000 -@@ -13,6 +14,7 @@ eth5.5@eth5: mtu 1500 qdisc noqueue state UP q - eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth8: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth9: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth9.3@eth9: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 4.4.4.4/24 scope global eth9.3 -+eth9: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+team89: mtu 1500 qdisc noqueue state UP qlen 1000 -+team89.3@team89: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 4.4.4.4/24 scope global team89.3 -diff --git a/tests/setup2/tcase5.sh b/tests/setup2/tcase5.sh -index 1085a80d..e332d9af 100755 ---- a/tests/setup2/tcase5.sh -+++ b/tests/setup2/tcase5.sh -@@ -1,10 +1,9 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - - for eth in eth1 eth3 vlan5eth5 eth7 eth9; do ifup $eth; done -diff --git a/tests/setup2/tcase5_results/ip_a.expected b/tests/setup2/tcase5_results/ip_a.expected -index 4841575c..2d08e19c 100644 ---- a/tests/setup2/tcase5_results/ip_a.expected -+++ b/tests/setup2/tcase5_results/ip_a.expected -@@ -14,9 +14,11 @@ eth2: mtu 1500 qdisc pfifo_fast master b - eth3: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth5: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth6: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -- inet 3.3.3.3/16 scope global eth6 -+eth6: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 - eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth8: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth8.3@eth8: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -+eth8: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 - eth9: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+team567: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.3/16 scope global team567 -+team89: mtu 1500 qdisc noqueue state UP qlen 1000 -+team89.3@team89: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -diff --git a/tests/setup2/tcase6.sh b/tests/setup2/tcase6.sh -index 998672bf..b9a5153f 100755 ---- a/tests/setup2/tcase6.sh -+++ b/tests/setup2/tcase6.sh -@@ -1,13 +1,13 @@ - #!/bin/bash - - unset CONFIG_DIR --#CONFIG_DIR=/root - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - - for eth in eth1 eth3 eth5 eth7 eth9; do ifdown $eth; done - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh - - for eth in eth1 eth3 vlan5eth5 eth7 eth9; do ifup $eth; done -diff --git a/tests/setup2/tcase6_results/ip_a.expected b/tests/setup2/tcase6_results/ip_a.expected -index 879aff9b..4239377a 100644 ---- a/tests/setup2/tcase6_results/ip_a.expected -+++ b/tests/setup2/tcase6_results/ip_a.expected -@@ -1,10 +1,11 @@ -+bond12.1@bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.1/16 scope global bond12.1 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 - eth1: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10.3@eth10: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 4.4.4.4/24 scope global eth10.3 --eth2: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth2.1@eth2: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 1.1.1.1/16 scope global eth2.1 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth4.2@eth4: mtu 1500 qdisc noqueue state UP qlen 1000 -diff --git a/tests/setup2/tcase7.sh b/tests/setup2/tcase7.sh -index 57b3cdff..7dc07f63 100755 ---- a/tests/setup2/tcase7.sh -+++ b/tests/setup2/tcase7.sh -@@ -1,5 +1,5 @@ - #!/bin/bash - --CONFIG_DIR=/root -+CONFIG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup2/tcase7_results/ip_a.expected b/tests/setup2/tcase7_results/ip_a.expected -index a1825c41..4136fb4b 100644 ---- a/tests/setup2/tcase7_results/ip_a.expected -+++ b/tests/setup2/tcase7_results/ip_a.expected -@@ -14,10 +14,12 @@ eth2: mtu 1500 qdisc pfifo_fast master b - eth3: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth4: mtu 1500 qdisc pfifo_fast master bond34 state UP qlen 1000 - eth5: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth5.5@eth5: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 3.3.3.100/16 scope global eth5.5 --eth6: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth7: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 --eth8: mtu 1500 qdisc pfifo_fast state UP qlen 1000 --eth8.3@eth8: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 --eth9: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth5.5@eth5: mtu 1500 qdisc noqueue master team567 state UP qlen 1000 -+eth6: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 -+eth7: mtu 1500 qdisc pfifo_fast master team567 state UP qlen 1000 -+eth8: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+eth9: mtu 1500 qdisc pfifo_fast master team89 state UP qlen 1000 -+team567: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 3.3.3.100/16 scope global team567 -+team89: mtu 1500 qdisc noqueue state UP qlen 1000 -+team89.3@team89: mtu 1500 qdisc noqueue master bridge3 state UP qlen 1000 -diff --git a/tests/setup2/tcase8.sh b/tests/setup2/tcase8.sh -index c40c5e15..6982d005 100755 ---- a/tests/setup2/tcase8.sh -+++ b/tests/setup2/tcase8.sh -@@ -1,8 +1,9 @@ - #!/bin/bash - --CONFIG_DIR=/root -+CONFIG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" - - export SIMPLIFY_BONDING=y - export SIMPLIFY_BRIDGE=y -+export SIMPLIFY_TEAMING=y - --. ./run.sh -+. "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"/run.sh -diff --git a/tests/setup2/tcase8_results/ip_a.expected b/tests/setup2/tcase8_results/ip_a.expected -index 28140aa1..19e11934 100644 ---- a/tests/setup2/tcase8_results/ip_a.expected -+++ b/tests/setup2/tcase8_results/ip_a.expected -@@ -1,10 +1,11 @@ --eth1: mtu 1500 qdisc pfifo_fast state UP qlen 1000 -+bond12.1@bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+ inet 1.1.1.100/16 scope global bond12.1 -+bond12: mtu 1500 qdisc noqueue state UP qlen 1000 -+eth1: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth10: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth10.3@eth10: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 4.4.4.100/24 scope global eth10.3 --eth1.1@eth1: mtu 1500 qdisc noqueue state UP qlen 1000 -- inet 1.1.1.100/16 scope global eth1.1 --eth2: mtu 1500 qdisc pfifo_fast state DOWN qlen 1000 -+eth2: mtu 1500 qdisc pfifo_fast master bond12 state UP qlen 1000 - eth3: mtu 1500 qdisc pfifo_fast state UP qlen 1000 - eth3.2@eth3: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 2.2.2.2/24 scope global eth3.2 -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 796f228a..ef118998 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -2237,15 +2237,16 @@ USE_CFG2HTML= - # If SKIP_CFG2HTML is enabled, skip this script (backward compatibility) - the var will become obsolete in rear-1.18 - # SKIP_CFG2HTML= - --# Simplify bonding setups by configuring always the first active device of a bond --SIMPLIFY_BONDING= -+# Simplify bonding setups by configuring always the first active device of a -+# bond, except when mode is 4 (IEEE 802.3ad policy) -+SIMPLIFY_BONDING=no - - # Simplify bridge setups by configuring always the first active device of a bridge --SIMPLIFY_BRIDGE= -+SIMPLIFY_BRIDGE=no - --# Simplify team setups by configuring always the first active device of a team --# Current implementation only supports this mode --#SIMPLIFY_TEAM= -+# Simplify team setups by configuring always the first active device of a team, -+# except when runner is 'lacp' (IEEE 802.3ad policy) -+SIMPLIFY_TEAMING=no - - # Serial Console support is enabled if serial devices are found on the system and - # then matching kernel command line parameters like 'console=ttyS0,9600 console=ttyS1,9600' -diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -index 8d24d2d5..4698618c 100644 ---- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -@@ -146,7 +146,7 @@ ip link help 2>&1 | grep -qw bridge && ip_link_supports_bridge='true' - # - # - if it is a bond - # --# - if SIMPLIFY_BONDING is set -+# - if SIMPLIFY_BONDING is set and mode is not 4 (IEEE 802.3ad policy) - # - configure the first UP underlying interface using ALGO - # - keep record of interface mapping into new underlying interface - # - otherwise -@@ -157,10 +157,14 @@ ip link help 2>&1 | grep -qw bridge && ip_link_supports_bridge='true' - # - # - configure the vlan based on parent's interface - # --# - if it is a team, only simplification is currently implemented -+# - if it is a team - # --# - configure the first UP underlying interface using ALGO --# - keep record of interface mapping into new underlying interface -+# - if SIMPLIFY_TEAMING is set and runner is not 'lacp' (IEEE 802.3ad policy) -+# - configure the first UP underlying interface using ALGO -+# - keep record of interface mapping into new underlying interface -+# - otherwise -+# - configure the team -+# - configure all UP underlying interfaces using ALGO - # - # - in any case, a given interface is only configured once; when an interface - # has already been configured, configuration code should be ignored by -@@ -470,7 +474,7 @@ function handle_bridge () { - local tmpfile=$( mktemp ) - local itf - -- if test "$SIMPLIFY_BRIDGE" ; then -+ if is_true "$SIMPLIFY_BRIDGE" ; then - for itf in $( get_lower_interfaces $network_interface ) ; do - DebugPrint "$network_interface has lower interface $itf" - is_interface_up $itf || continue -@@ -552,6 +556,7 @@ function handle_bridge () { - } - - already_set_up_teams="" -+team_initialized= - - function handle_team () { - local network_interface=$1 -@@ -569,13 +574,58 @@ function handle_team () { - fi - already_set_up_teams+=" $network_interface" - -- # FIXME? Team code below simplifies the configuration, returning first port only -- # There should a SIMPLIFY_TEAM variable for that -- - local rc - local nitfs=0 - local tmpfile=$( mktemp ) - local itf -+ local teaming_runner="$( teamdctl "$network_interface" state item get setup.runner_name )" -+ -+ if is_true "$SIMPLIFY_TEAMING" && [ "$teaming_runner" != "lacp" ] ; then -+ -+ for itf in $( get_lower_interfaces $network_interface ) ; do -+ DebugPrint "$network_interface has lower interface $itf" -+ is_interface_up $itf || continue -+ is_linked_to_physical $itf || continue -+ handle_interface $itf >$tmpfile -+ rc=$? -+ [ $rc -eq $rc_error ] && continue -+ let nitfs++ -+ echo "# Original interface was $network_interface, now is $itf" -+ [ $rc -eq $rc_success ] && cat $tmpfile -+ # itf may have been mapped into some other interface -+ itf=$( get_mapped_network_interface $itf ) -+ # We found an interface, so stop here after mapping team to lower interface -+ map_network_interface $network_interface $itf -+ break -+ done -+ rm $tmpfile -+ -+ # If we didn't find any lower interface, we are in trouble ... -+ if [ $nitfs -eq 0 ] ; then -+ LogPrintError "Couldn't find any suitable lower interface for '$network_interface'." -+ return $rc_error -+ fi -+ -+ # setup_device_params has already been called by interface team was mapped onto -+ -+ return $rc_success -+ elif is_true "$SIMPLIFY_TEAMING" ; then -+ # Teaming runner 'lacp' (IEEE 802.3ad policy) cannot be simplified -+ # because there is some special setup on the switch itself, requiring -+ # to keep the system's network interface's configuration intact. -+ LogPrint "Note: not simplifying network configuration for '$network_interface' because teaming runner is 'lacp' (IEEE 802.3ad policy)." -+ fi -+ -+ # -+ # Non-simplified teaming mode -+ # -+ -+ if [ -z "$team_initialized" ] ; then -+ PROGS=( "${PROGS[@]}" 'teamd' 'teamdctl' ) -+ team_initialized="y" -+ fi -+ -+ local teamconfig="$( teamdctl -o "$network_interface" config dump actual )" - - for itf in $( get_lower_interfaces $network_interface ) ; do - DebugPrint "$network_interface has lower interface $itf" -@@ -585,13 +635,15 @@ function handle_team () { - rc=$? - [ $rc -eq $rc_error ] && continue - let nitfs++ -- echo "# Original interface was $network_interface, now is $itf" - [ $rc -eq $rc_success ] && cat $tmpfile - # itf may have been mapped into some other interface -- itf=$( get_mapped_network_interface $itf ) -- # We found an interface, so stop here after mapping team to lower interface -- map_network_interface $network_interface $itf -- break -+ local newitf=$( get_mapped_network_interface $itf ) -+ if [ "$itf" != "$newitf" ] ; then -+ # Fix the teaming configuration -+ teamconfig="$( echo "$teamconfig" | sed "s/\"$itf\"/\"$newitf\"/g" )" -+ fi -+ # Make sure lower device is down before configuring the team -+ echo "ip link set dev $itf down" - done - rm $tmpfile - -@@ -601,12 +653,15 @@ function handle_team () { - return $rc_error - fi - -- # setup_device_params has already been called by interface team was mapped onto -+ echo "teamd -d -c '$teamconfig'" -+ -+ setup_device_params $network_interface - - return $rc_success - } - - already_set_up_bonds="" -+bond_initialized= - - function handle_bond () { - local network_interface=$1 -@@ -618,22 +673,19 @@ function handle_bond () { - - DebugPrint "$network_interface is a bond" - -- if [ -z "$already_set_up_bonds" ] ; then -- if ! test "$SIMPLIFY_BONDING" ; then -- echo "modprobe bonding" -- MODULES=( "${MODULES[@]}" 'bonding' ) -- fi -- elif [[ " $already_set_up_bonds " == *\ $network_interface\ * ]] ; then -+ if [[ " $already_set_up_bonds " == *\ $network_interface\ * ]] ; then - DebugPrint "$network_interface already handled..." - return $rc_ignore - fi - already_set_up_bonds+=" $network_interface" - -+ local rc - local nitfs=0 - local tmpfile=$( mktemp ) - local itf -+ local bonding_mode=$( awk '{ print $2 }' $sysfspath/bonding/mode ) - -- if test "$SIMPLIFY_BONDING" ; then -+ if is_true "$SIMPLIFY_BONDING" && [ $bonding_mode -ne 4 ] ; then - for itf in $( get_lower_interfaces $network_interface ) ; do - DebugPrint "$network_interface has lower interface $itf" - is_interface_up $itf || continue -@@ -663,13 +715,23 @@ function handle_bond () { - # setup_device_params has already been called by interface bond was mapped onto - - return $rc_success -+ elif is_true "$SIMPLIFY_BONDING" ; then -+ # Bond mode '4' (IEEE 802.3ad policy) cannot be simplified because -+ # there is some special setup on the switch itself, requiring to keep -+ # the system's network interface's configuration intact. -+ LogPrint "Note: not simplifying network configuration for '$network_interface' because bonding mode is '4' (IEEE 802.3ad policy)." - fi - - # - # Non-simplified bonding mode - # - -- local bonding_mode=$( awk '{ print $2 }' $sysfspath/bonding/mode ) -+ if [ -z "$bond_initialized" ] ; then -+ echo "modprobe bonding" -+ MODULES=( "${MODULES[@]}" 'bonding' ) -+ bond_initialized="y" -+ fi -+ - local miimon=$( cat $sysfspath/bonding/miimon ) - local use_carrier=$( cat $sysfspath/bonding/use_carrier ) - -@@ -689,7 +751,8 @@ EOT - is_interface_up $itf || continue - is_linked_to_physical $itf || continue - handle_interface $itf >$tmpfile -- [ $? -eq $rc_error ] && continue -+ rc=$? -+ [ $rc -eq $rc_error ] && continue - let nitfs++ - [ $rc -eq $rc_success ] && cat $tmpfile - # itf may have been mapped into some other interface -@@ -884,3 +947,5 @@ unset -f handle_team - unset -f handle_bond - unset -f handle_vlan - unset -f handle_physdev -+ -+# vim: set et ts=4 sw=4: diff --git a/SOURCES/rear-bz1652828-bz1652853.patch b/SOURCES/rear-bz1652828-bz1652853.patch deleted file mode 100644 index 2432451..0000000 --- a/SOURCES/rear-bz1652828-bz1652853.patch +++ /dev/null @@ -1,156 +0,0 @@ -diff --git a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh -index 9c4212ae..873e244e 100644 ---- a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh -+++ b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh -@@ -54,9 +54,13 @@ Log "copy_as_is_executables = ${copy_as_is_executables[@]}" - # add them to the LIBS list if they are not yet included in the copied files: - Log "Adding required libraries of executables in all the copied files to LIBS" - local required_library="" --for required_library in $( RequiredSharedOjects "${copy_as_is_executables[@]}" ) ; do -- # Skip when the required library was already actually copied by 'tar' above: -- grep -q "$required_library" $copy_as_is_filelist_file && continue -+for required_library in $( RequiredSharedObjects "${copy_as_is_executables[@]}" ) ; do -+ # Skip when the required library was already actually copied by 'tar' above. -+ # grep for a full line (copy_as_is_filelist_file contains 1 file name per line) -+ # to avoid that libraries get skipped when their library path is a substring -+ # of another already copied library, e.g. do not skip /path/to/lib when -+ # /other/path/to/lib was already copied, cf. https://github.com/rear/rear/pull/1976 -+ grep -q "^${required_library}\$" $copy_as_is_filelist_file && continue - # Skip when the required library is already in LIBS: - IsInArray "$required_library" "${LIBS[@]}" && continue - Log "Adding required library '$required_library' to LIBS" -diff --git a/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh b/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh -index f7809bc7..ed02dea9 100644 ---- a/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh -+++ b/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh -@@ -62,16 +62,16 @@ Log "Binaries being copied: ${all_binaries[@]}" - copy_binaries "$ROOTFS_DIR/bin" "${all_binaries[@]}" - - # Copy libraries: --# It is crucial to also have all LIBS itself in all_libs because RequiredSharedOjects() -+# It is crucial to also have all LIBS itself in all_libs because RequiredSharedObjects() - # outputs only those libraries that are required by a library but not the library itself - # so that without all LIBS itself in all_libs those libraries in LIBS are missing that - # are not needed by a binary in all_binaries (all_binaries were already copied above). --# RequiredSharedOjects outputs the required shared objects on STDOUT. -+# RequiredSharedObjects outputs the required shared objects on STDOUT. - # The output are absolute paths to the required shared objects. - # The output can also be symbolic links (also as absolute paths). - # In case of symbolic links only the link but not the link target is output. - # Therefore for symbolic links also the link target gets copied below. --local all_libs=( "${LIBS[@]}" $( RequiredSharedOjects "${all_binaries[@]}" "${LIBS[@]}" ) ) -+local all_libs=( "${LIBS[@]}" $( RequiredSharedObjects "${all_binaries[@]}" "${LIBS[@]}" ) ) - - Log "Libraries being copied: ${all_libs[@]}" - local lib="" -diff --git a/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh b/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh -index 9803200d..8cb27d78 100644 ---- a/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh -+++ b/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh -@@ -8,7 +8,7 @@ if is_true $KEEP_BUILD_DIR; then - executable_dependencies_list="$TMP_DIR/executable-dependencies" - - for executable in "${executables[@]}"; do -- dependents=( $(RequiredSharedOjects "$ROOTFS_DIR/$executable") ) -+ dependents=( $(RequiredSharedObjects "$ROOTFS_DIR/$executable") ) - echo "$executable: ${dependents[*]}" - done > "$executable_dependencies_list" - -diff --git a/usr/share/rear/build/default/980_verify_rootfs.sh b/usr/share/rear/build/default/980_verify_rootfs.sh -index f8b3e8e9..d03e5f34 100644 ---- a/usr/share/rear/build/default/980_verify_rootfs.sh -+++ b/usr/share/rear/build/default/980_verify_rootfs.sh -@@ -51,6 +51,11 @@ if test "$BACKUP" = "SESAM" ; then - # related libraries - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SESAM_LD_LIBRARY_PATH - fi -+if test "$BACKUP" = "NBU" ; then -+ # Use a NBU-specific LD_LIBRARY_PATH to find NBU libraries -+ # see https://github.com/rear/rear/issues/1974 -+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$NBU_LD_LIBRARY_PATH -+fi - # Actually test all binaries for 'not found' libraries. - # Find all binaries and libraries also e.g. those that are copied via COPY_AS_IS into other paths: - for binary in $( find $ROOTFS_DIR -type f -executable -printf '/%P\n' ); do -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 0f0d0675..796f228a 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -1467,7 +1467,8 @@ OBDR_BLOCKSIZE=2048 - ## - # - COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt ) --COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" "/usr/openv/netbackup/bin/xbp" ) -+COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) -+NBU_LD_LIBRARY_PATH="/usr/openv/lib" - PROGS_NBU=( ) - - ## -diff --git a/usr/share/rear/lib/_input-output-functions.sh b/usr/share/rear/lib/_input-output-functions.sh -index d5eed43e..bdbf593d 100644 ---- a/usr/share/rear/lib/_input-output-functions.sh -+++ b/usr/share/rear/lib/_input-output-functions.sh -@@ -324,10 +324,11 @@ function LogToSyslog () { - # see https://github.com/rear/rear/issues/729 - function has_binary () { - for bin in $@ ; do -- # Suppress success output via stdout (but keep failure output via stderr): -- if type $bin 1>/dev/null ; then -- return 0 -- fi -+ # Suppress success output via stdout which is crucial when has_binary is called -+ # in other functions that provide their intended function results via stdout -+ # to not pollute intended function results with intermixed has_binary stdout -+ # (e.g. the RequiredSharedObjects function) but keep failure output via stderr: -+ type $bin 1>/dev/null && return 0 - done - return 1 - } -diff --git a/usr/share/rear/lib/linux-functions.sh b/usr/share/rear/lib/linux-functions.sh -index 6a767367..3fb44e6d 100644 ---- a/usr/share/rear/lib/linux-functions.sh -+++ b/usr/share/rear/lib/linux-functions.sh -@@ -100,13 +100,13 @@ function FindStorageDrivers () { - - # Determine all required shared objects (shared/dynamic libraries) - # for programs and/or shared objects (binaries) specified in $@. --# RequiredSharedOjects outputs the required shared objects on STDOUT. -+# RequiredSharedObjects outputs the required shared objects on STDOUT. - # The output are absolute paths to the required shared objects. - # The output can also be symbolic links (also as absolute paths). - # In case of symbolic links only the link but not the link target is output. --function RequiredSharedOjects () { -- has_binary ldd || Error "Cannot run RequiredSharedOjects() because there is no ldd binary" -- Log "RequiredSharedOjects: Determining required shared objects" -+function RequiredSharedObjects () { -+ has_binary ldd || Error "Cannot run RequiredSharedObjects() because there is no ldd binary" -+ Log "RequiredSharedObjects: Determining required shared objects" - # It uses 'ldd' to determine all required shared objects because 'ldd' outputs - # also transitively required shared objects i.e. libraries needed by libraries, - # e.g. for /usr/sbin/parted also the libraries needed by the libparted library: -@@ -164,10 +164,11 @@ function RequiredSharedOjects () { - # 2. Line: " lib (mem-addr)" -> virtual library - # 3. Line: " lib => not found" -> print error to stderr - # 4. Line: " lib => /path/to/lib (mem-addr)" -> print $3 '/path/to/lib' -- # 5. Line: " /path/to/lib (mem-addr)" -> print $1 '/path/to/lib' -+ # 5. Line: " /path/to/lib => /path/to/lib2 (mem-addr)" -> print $3 '/path/to/lib2' -+ # 6. Line: " /path/to/lib (mem-addr)" -> print $1 '/path/to/lib' - ldd "$@" | awk ' /^\t.+ => not found/ { print "Shared object " $1 " not found" > "/dev/stderr" } - /^\t.+ => \// { print $3 } -- /^\t\// { print $1 } ' | sort -u -+ /^\t\// && !/ => / { print $1 } ' | sort -u - } - - # Provide a shell, with custom exit-prompt and history -diff --git a/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh b/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh -index e01dcdbd..3cc29777 100644 ---- a/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh -+++ b/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh -@@ -5,5 +5,6 @@ - Log "Running: /usr/openv/netbackup/bin/bplist command" - LANG=C /usr/openv/netbackup/bin/bplist -l -s `date -d "-5 days" \ - "+%m/%d/%Y"` / >/dev/null --[ $? -gt 0 ] && LogPrint "WARNING: Netbackup bplist check failed with error code $?. -+rc=$? -+[ $rc -gt 0 ] && LogPrint "WARNING: Netbackup bplist check failed with error code ${rc}. - See $RUNTIME_LOGFILE for more details." diff --git a/SOURCES/rear-bz1653214.patch b/SOURCES/rear-bz1653214.patch deleted file mode 100644 index 8ed8ace..0000000 --- a/SOURCES/rear-bz1653214.patch +++ /dev/null @@ -1,17 +0,0 @@ -diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -index 4698618c..6b3194bd 100644 ---- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -@@ -854,7 +854,11 @@ function handle_physdev () { - - DebugPrint "$network_interface is a physical device" - -- mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'." -+ if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then -+ mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )" -+ else -+ mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'." -+ fi - # Skip fake interfaces without MAC address - [ "$mac" != "00:00:00:00:00:00" ] || return $rc_error - diff --git a/SOURCES/rear-bz1655956.patch b/SOURCES/rear-bz1655956.patch deleted file mode 100644 index ea9c039..0000000 --- a/SOURCES/rear-bz1655956.patch +++ /dev/null @@ -1,35 +0,0 @@ -diff --git a/usr/sbin/rear b/usr/sbin/rear -index a51f820c..e21156bf 100755 ---- a/usr/sbin/rear -+++ b/usr/sbin/rear -@@ -527,6 +527,10 @@ Build date: $( date -R ) - if test "$WORKFLOW" != "help" ; then - # Create temporary work area and register removal exit task: - BUILD_DIR="$( mktemp -d -t rear.XXXXXXXXXXXXXXX || Error "Could not create build area '$BUILD_DIR'" )" -+ # Since 'mktemp' doesn't always return a path under /tmp, the build -+ # directory has always to be excluded for safety -+ BACKUP_PROG_EXCLUDE+=( "$BUILD_DIR" ) -+ - QuietAddExitTask cleanup_build_area_and_end_program - Log "Using build area '$BUILD_DIR'" - ROOTFS_DIR=$BUILD_DIR/rootfs -@@ -582,3 +586,5 @@ if test "$WORKFLOW" != "help" ; then - fi - - exit $EXIT_CODE -+ -+# vim: set et ts=4 sw=4: -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index ef118998..52b05eea 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -47,7 +47,9 @@ - # is set in the environment where /usr/sbin/rear is called. - # To have a specific working area directory prefix for Relax-and-Recover - # specify in /etc/rear/local.conf something like -+# - # export TMPDIR="/prefix/for/rear/working/directory" -+# - # where /prefix/for/rear/working/directory must already exist. - # This is useful for example when there is not sufficient free space - # in /tmp or $TMPDIR for the ISO image or even the backup archive. diff --git a/SOURCES/rear-bz1659137.patch b/SOURCES/rear-bz1659137.patch deleted file mode 100644 index e76336b..0000000 --- a/SOURCES/rear-bz1659137.patch +++ /dev/null @@ -1,91 +0,0 @@ -diff --git a/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh b/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh -index e3ebacce..17cd1dd6 100644 ---- a/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh -+++ b/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh -@@ -26,18 +26,47 @@ ORIGINAL_MACS=() - # The ORIGINAL_DEVICES collects the original device names: - ORIGINAL_DEVICES=() - # The ORIG_MACS_FILE contains lines of the form: network_interface mac_address -+ -+# Temporary rear_mappings_mac used when interfaces have been renamed -+tmp_mac_mapping_file=$(mktemp) -+ - # TODO: What should happen if there is no ORIG_MACS_FILE or when it is empty? - while read orig_dev orig_mac junk ; do - ORIGINAL_DEVICES=( "${ORIGINAL_DEVICES[@]}" "$orig_dev") - ORIGINAL_MACS=( "${ORIGINAL_MACS[@]}" "$orig_mac" ) -- # Continue with the next original MAC address if it is found on the current system: -- ip link show | grep -q "$orig_mac" && continue -- MIGRATE_DEVNAMES=( "${MIGRATE_DEVNAMES[@]}" "$orig_dev" ) -- MIGRATE_MACS=( "${MIGRATE_MACS[@]}" "$orig_mac" ) -+ # Continue with the next original MAC address if it is found on the current -+ # system, otherwise we consider it needs migration: -+ new_dev=$( get_device_by_hwaddr "$orig_mac" ) -+ if [ -n "$new_dev" ] ; then -+ [ "$new_dev" = "$orig_dev" ] && continue -+ # The device was found but has been renamed (it was customized in -+ # source system). -+ # Create a temporary mac mapping, we don't want finalize() to update -+ # the ifcfg-* files! -+ echo "$orig_mac $orig_mac $orig_dev" >> $tmp_mac_mapping_file -+ else -+ MIGRATE_MACS+=( "$orig_mac" ) -+ fi - done < $ORIG_MACS_FILE - --# Skip this process if all MACs and network interfacs (devices) are accounted for: --test ${#MIGRATE_MACS[@]} -eq 0 && test ${#MIGRATE_DEVNAMES[@]} -eq 0 && return 0 -+ -+if [ ${#MIGRATE_MACS[@]} -ne 0 ] ; then -+ # If some MACs were not found (MIGRATE_MACS not empty) then, we need a migration -+ : -+elif [ -s $tmp_mac_mapping_file ] ; then -+ # Else, if some devices were renamed, we also need a migration, but it will -+ # be automatic thanks to the $tmp_mac_mapping_file mapping file -+ -+ # We do not need the $MAC_MAPPING_FILE file from the user, just overwrite it -+ # Later, we will remove that file to not have finalize() modify the ifcfg-* -+ # files. -+ mkdir -p $(dirname $MAC_MAPPING_FILE) -+ cp $tmp_mac_mapping_file $MAC_MAPPING_FILE -+else -+ # Skip this process if all MACs and network interfaces (devices) are accounted for -+ unset tmp_mac_mapping_file -+ return 0 -+fi - - # Find the MAC addresses that are now available. - # This is an array with values of the form "$dev $mac $driver" -@@ -74,7 +103,7 @@ done - # so that it is shown to the user what MAC address mappings will be done: - if read_and_strip_file $MAC_MAPPING_FILE ; then - while read orig_dev orig_mac junk ; do -- read_and_strip_file $MAC_MAPPING_FILE | grep -q "$orig_mac" && MANUAL_MAC_MAPPING=true -+ read_and_strip_file $MAC_MAPPING_FILE | grep -qw "^$orig_mac" && MANUAL_MAC_MAPPING=true - done < $ORIG_MACS_FILE - fi - -@@ -237,7 +266,7 @@ if is_true $reload_udev ; then - echo -n "Reloading udev ... " - # Force udev to reload rules (as they were just changed) - # Failback to "udevadm control --reload" in case of problem (as specify in udevadm manpage in SLES12) -- # If nothing work, then wait 1 seconf delay. It should let the time for udev to detect changes in the rules files. -+ # If nothing work, then wait 1 second delay. It should let the time for udev to detect changes in the rules files. - udevadm control --reload-rules || udevadm control --reload || sleep 1 - my_udevtrigger - sleep 1 -@@ -252,5 +281,13 @@ if is_true $reload_udev ; then - fi - - # A later script in finalize/* will also go over the MAC mappings file and --# apply them to the files in the recovered system. -+# apply them to the files in the recovered system, unless we did the mapping -+# automatically, which means some device has been renamed and will probably -+# gets its name back upon reboot. -+if [ -s $tmp_mac_mapping_file ] ; then -+ rm $MAC_MAPPING_FILE $tmp_mac_mapping_file -+fi -+ -+unset tmp_mac_mapping_file - -+# vim: set et ts=4 sw=4: diff --git a/SOURCES/rear-bz1663515.patch b/SOURCES/rear-bz1663515.patch deleted file mode 100644 index 6c63d92..0000000 --- a/SOURCES/rear-bz1663515.patch +++ /dev/null @@ -1,17 +0,0 @@ -diff --git a/usr/share/rear/lib/uefi-functions.sh b/usr/share/rear/lib/uefi-functions.sh -index e40f2ab0..95e6292d 100644 ---- a/usr/share/rear/lib/uefi-functions.sh -+++ b/usr/share/rear/lib/uefi-functions.sh -@@ -46,6 +46,11 @@ function build_bootx86_efi { - Log "Did not find grub-mkimage (cannot build bootx86.efi)" - return - fi -- $gmkimage $v -O x86_64-efi -c $TMP_DIR/mnt/EFI/BOOT/embedded_grub.cfg -o $TMP_DIR/mnt/EFI/BOOT/BOOTX64.efi -p "/EFI/BOOT" part_gpt part_msdos fat ext2 normal chain boot configfile linux linuxefi multiboot jfs iso9660 usb usbms usb_keyboard video udf ntfs all_video gzio efi_gop reboot search test echo btrfs -+ # as not all Linux distro's have the same grub modules present we verify what we have (see also https://github.com/rear/rear/pull/2001) -+ grub_modules="" -+ for grub_module in part_gpt part_msdos fat ext2 normal chain boot configfile linux linuxefi multiboot jfs iso9660 usb usbms usb_keyboard video udf ntfs all_video gzio efi_gop reboot search test echo btrfs ; do -+ test "$( find /boot -type f -name "$grub_module.mod" 2>/dev/null )" && grub_modules="$grub_modules $grub_module" -+ done -+ $gmkimage $v -O x86_64-efi -c $TMP_DIR/mnt/EFI/BOOT/embedded_grub.cfg -o $TMP_DIR/mnt/EFI/BOOT/BOOTX64.efi -p "/EFI/BOOT" $grub_modules - StopIfError "Error occurred during $gmkimage of BOOTX64.efi" - } diff --git a/SOURCES/rear-bz1672938.patch b/SOURCES/rear-bz1672938.patch deleted file mode 100644 index dea54f2..0000000 --- a/SOURCES/rear-bz1672938.patch +++ /dev/null @@ -1,94 +0,0 @@ -diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh -index aa6fc938..4bc59131 100644 ---- a/usr/share/rear/lib/layout-functions.sh -+++ b/usr/share/rear/lib/layout-functions.sh -@@ -478,6 +478,9 @@ get_sysfs_name() { - ### /dev/dm-3 -> /dev/mapper/system-tmp - ### /dev/dm-4 -> /dev/mapper/oralun - ### /dev/dm-5 -> /dev/mapper/oralunp1 -+### /dev/sda -> /dev/sda -+### -+### Returns 0 on success, 1 if device is not existing - get_device_name() { - ### strip common prefixes - local name=${1#/dev/} -@@ -485,47 +488,39 @@ get_device_name() { - - contains_visible_char "$name" || BugError "Empty string passed to get_device_name" - -- ### Translate dm-8 -> mapper/test -- local device dev_number mapper_number -- if [[ -d /sys/block/$name ]] ; then -+ if [[ "$name" =~ ^mapper/ ]]; then -+ echo "/dev/$name" -+ return 0 -+ fi -+ -+ if [[ -L "/dev/$name" ]] ; then -+ # Map vg/lv into dm-X, which will then be resolved later -+ name="$( basename $(readlink -f /dev/$name) )" -+ fi -+ -+ if [[ "$name" =~ ^dm- ]] ; then -+ local device - if [[ -r /sys/block/$name/dm/name ]] ; then - ### recent kernels have a dm subfolder -- echo "/dev/mapper/$( < /sys/block/$name/dm/name)"; -- return 0 -+ device="$( < /sys/block/$name/dm/name )" - else -- ### loop over all block devices -- dev_number=$( < /sys/block/$name/dev) -- for device in /dev/mapper/* ; do -- mapper_number=$(dmsetup info -c --noheadings -o major,minor ${device#/dev/mapper/} 2>/dev/null ) -- if [ "$dev_number" = "$mapper_number" ] ; then -- echo "$device" -- return 0 -- fi -- done -+ local dev_number=$( < /sys/block/$name/dev) -+ if [[ ! -r "$TMP_DIR/dmsetup_info.txt" ]] ; then -+ dmsetup info --noheadings -c -o name,major,minor > "$TMP_DIR/dmsetup_info.txt" -+ fi -+ device="$( awk -F ':' "/$dev_number\$/ { print \$1 }" < "$TMP_DIR/dmsetup_info.txt" )" -+ [[ -n "$device" ]] || BugError "No device returned for major/minor $dev_number" - fi -- fi -- -- ### Translate device name to mapper name. ex: vg/lv -> mapper/vg-lv -- if [[ "$name" =~ ^mapper/ ]]; then -- echo "/dev/$name" -+ echo "/dev/mapper/$device" - return 0 - fi -- if my_dm=`readlink /dev/$name`; then -- for mapper_dev in /dev/mapper/*; do -- if mapper_dm=`readlink $mapper_dev`; then -- if [ "$my_dm" = "$mapper_dm" ]; then -- echo $mapper_dev -- return 0 -- fi -- fi -- done -- fi - - ### handle cciss sysfs naming - name=${name//!//} - - ### just return the possibly nonexisting name - echo "/dev/$name" -+ [[ -r "/dev/$name" ]] && return 0 - return 1 - } - -diff --git a/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh b/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh -index 0de4f60b..64276dfe 100644 ---- a/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh -+++ b/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh -@@ -1,3 +1,6 @@ -+# don't collect this anymore, this can be very slow -+return 0 -+ - # collect output from production SAN disks - - find /sys/class/fc_transport -follow -maxdepth 6 \( -name model -o -name vendor -o -name rev -name state -o -name model_name -o -name size -o -name node_name \) 2>/dev/null| egrep -v 'driver|rport|power|drivers|devices' | xargs grep '.' > $VAR_DIR/recovery/fc_transport.info >&2 diff --git a/SOURCES/rear-bz1685166.patch b/SOURCES/rear-bz1685166.patch deleted file mode 100644 index 365310e..0000000 --- a/SOURCES/rear-bz1685166.patch +++ /dev/null @@ -1,40 +0,0 @@ -diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -index 6b3194bd..57e8777a 100644 ---- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -@@ -562,7 +562,12 @@ function handle_team () { - local network_interface=$1 - local sysfspath=/sys/class/net/$network_interface - -- if [ "$( ethtool -i $network_interface | awk '$1 == "driver:" { print $2 }' )" != "team" ] ; then -+ if has_binary ethtool ; then -+ if [ "$( ethtool -i $network_interface | awk '$1 == "driver:" { print $2 }' )" != "team" ] ; then -+ return $rc_error -+ fi -+ else -+ LogPrintError "Couldn't determine if network interface '$network_interface' is a Team, skipping." - return $rc_error - fi - -@@ -854,10 +859,17 @@ function handle_physdev () { - - DebugPrint "$network_interface is a physical device" - -- if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then -- mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )" -- else -- mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'." -+ local mac="" -+ -+ if has_binary ethtool ; then -+ mac="$( ethtool -P $network_interface 2>/dev/null | awk '{ print $NF }' )" -+ fi -+ if [ -z "$mac" ] ; then -+ if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then -+ mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )" -+ else -+ mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'." -+ fi - fi - # Skip fake interfaces without MAC address - [ "$mac" != "00:00:00:00:00:00" ] || return $rc_error diff --git a/SOURCES/rear-bz1692575.patch b/SOURCES/rear-bz1692575.patch deleted file mode 100644 index a5abadd..0000000 --- a/SOURCES/rear-bz1692575.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff -up rear-2.4/usr/share/rear/conf/GNU/Linux.conf.empty rear-2.4/usr/share/rear/conf/GNU/Linux.conf ---- rear-2.4/usr/share/rear/conf/GNU/Linux.conf.empty 2019-09-10 09:45:50.381285069 +0200 -+++ rear-2.4/usr/share/rear/conf/GNU/Linux.conf 2019-09-10 09:45:50.421284309 +0200 -@@ -276,6 +276,6 @@ COPY_AS_IS_EXCLUDE=( "${COPY_AS_IS_EXCLU - # some stuff for the Linux command line - KERNEL_CMDLINE="$KERNEL_CMDLINE selinux=0" - # common users and groups --CLONE_USERS=( "${CLONE_USERS[@]:-}" daemon rpc usbmuxd usbmux vcsa nobody dbus ) --CLONE_GROUPS=( "${CLONE_GROUPS[@]:-}" tty usbmuxd usbmux fuse kvm oinstall dbus ) -+CLONE_USERS+=( daemon rpc usbmuxd usbmux vcsa nobody dbus ) -+CLONE_GROUPS+=( tty usbmuxd usbmux fuse kvm oinstall dbus ) - -diff -up rear-2.4/usr/share/rear/rescue/default/900_clone_users_and_groups.sh.empty rear-2.4/usr/share/rear/rescue/default/900_clone_users_and_groups.sh ---- rear-2.4/usr/share/rear/rescue/default/900_clone_users_and_groups.sh.empty 2018-06-21 10:40:53.000000000 +0200 -+++ rear-2.4/usr/share/rear/rescue/default/900_clone_users_and_groups.sh 2019-09-10 09:45:50.421284309 +0200 -@@ -40,6 +40,8 @@ local group="" - # because it should succeed when there is any non-empty array member, not necessarily the first one: - test "${CLONE_USERS[*]}" && Log "Cloning users: ${CLONE_USERS[@]}" - for user in "${CLONE_USERS[@]}" ; do -+ # Skip empty user values, cf. https://github.com/rear/rear/issues/2220 -+ test $user || continue - # Skip if the user exists already in the ReaR recovery system: - grep -q "^$user:" $ROOTFS_DIR/etc/passwd && continue - # Skip if the user does not exist in the current system: -@@ -78,6 +80,8 @@ done - # because it should succeed when there is any non-empty array member, not necessarily the first one: - test "${CLONE_GROUPS[*]}" && Log "Cloning groups: ${CLONE_GROUPS[@]}" - for group in "${CLONE_GROUPS[@]}" ; do -+ # Skip empty group values, cf. https://github.com/rear/rear/issues/2220 -+ test $group || continue - # Skip if the group exists already in the ReaR recovery system: - grep -q "^$group:" $ROOTFS_DIR/etc/group && continue - # Skip if the group does not exist in the current system: diff --git a/SOURCES/rear-bz1693608.patch b/SOURCES/rear-bz1693608.patch deleted file mode 100644 index 9d8aae2..0000000 --- a/SOURCES/rear-bz1693608.patch +++ /dev/null @@ -1,113 +0,0 @@ -diff -up rear-2.4/usr/share/rear/build/default/980_verify_rootfs.sh.build rear-2.4/usr/share/rear/build/default/980_verify_rootfs.sh ---- rear-2.4/usr/share/rear/build/default/980_verify_rootfs.sh.build 2019-09-10 09:31:05.971102125 +0200 -+++ rear-2.4/usr/share/rear/build/default/980_verify_rootfs.sh 2019-09-10 09:31:06.002101535 +0200 -@@ -3,6 +3,33 @@ - # i.e. test that the ReaR recovery system will be usable - # to avoid issues like https://github.com/rear/rear/issues/1494 - -+if test "$KEEP_BUILD_DIR" = "errors"; then -+ local keep_build_dir_on_errors=1 -+else -+ # KEEP_BUILD_DIR does not say to keep it on errors -+ # - effective value depends on whether we are running interactively -+ if tty -s ; then -+ local keep_build_dir_on_errors=1 -+ else -+ local keep_build_dir_on_errors=0 -+ fi -+fi -+ -+function keep_build_dir() { -+ if ! is_true "$KEEP_BUILD_DIR" && ! is_false "$KEEP_BUILD_DIR"; then -+ # is either empty or equal to "errors" ... or some garbage value -+ local orig_keep_build_dir="$KEEP_BUILD_DIR" -+ KEEP_BUILD_DIR="${keep_build_dir_on_errors}" -+ fi -+ if is_true "$KEEP_BUILD_DIR" ; then -+ LogPrintError "Build area kept for investigation in $BUILD_DIR, remove it when not needed" -+ elif ! is_false "$orig_keep_build_dir" ; then -+ # if users disabled preserving the build dir explicitly, let's not bother them with messages -+ LogPrintError "Build area $BUILD_DIR will be removed" -+ LogPrintError "To preserve it for investigation set KEEP_BUILD_DIR=errors or run ReaR with -d" -+ fi -+} -+ - # In case the filesystem that contains the ROOTFS_DIR is mounted 'noexec' we cannot do the 'chroot' tests. - # The filesystem_name function in linux-functions.sh returns the mountpoint (not a filesystem name like 'ext4'): - local rootfs_dir_fs_mountpoint=$( filesystem_name $ROOTFS_DIR ) -@@ -16,7 +43,7 @@ Log "Testing that $ROOTFS_DIR contains a - - # The bash test ensures that we have a working bash in the ReaR recovery system: - if ! chroot $ROOTFS_DIR /bin/bash -c true ; then -- KEEP_BUILD_DIR=1 -+ keep_build_dir - BugError "ReaR recovery system in '$ROOTFS_DIR' is broken: 'bash -c true' failed" - fi - -@@ -25,7 +52,7 @@ fi - # First test is 'ldd /bin/bash' to ensure 'ldd' works: - Log "Testing 'ldd /bin/bash' to ensure 'ldd' works for the subsequent 'ldd' tests" - if ! chroot $ROOTFS_DIR /bin/ldd /bin/bash 1>&2 ; then -- KEEP_BUILD_DIR=1 -+ keep_build_dir - BugError "ReaR recovery system in '$ROOTFS_DIR' is broken: 'ldd /bin/bash' failed" - fi - # Now test each binary (except links) with ldd and look for 'not found' libraries. -@@ -83,7 +110,7 @@ test $old_LD_LIBRARY_PATH && export LD_L - # Report binaries with 'not found' shared object dependencies: - if contains_visible_char "$broken_binaries" ; then - LogPrintError "There are binaries or libraries in the ReaR recovery system that need additional libraries" -- KEEP_BUILD_DIR=1 -+ keep_build_dir - local fatal_missing_library="" - local ldd_output="" - for binary in $broken_binaries ; do -@@ -113,9 +140,10 @@ if contains_visible_char "$broken_binari - # Show only the missing libraries to the user to not flood his screen with tons of other ldd output lines: - PrintError "$( grep 'not found' <<<"$ldd_output" )" - done -+ LogPrintError "ReaR recovery system in '$ROOTFS_DIR' needs additional libraries, check $RUNTIME_LOGFILE for details" -+ is_true "$fatal_missing_library" && keep_build_dir - # Usually it should be no BugError when there are libraries missing for particular binaries because probably - # the reason is that the user added only the plain binaries with COPY_AS_IS (instead of using REQUIRED_PROGS): - is_true "$fatal_missing_library" && Error "ReaR recovery system in '$ROOTFS_DIR' not usable" -- LogPrintError "ReaR recovery system in '$ROOTFS_DIR' needs additional libraries, check $RUNTIME_LOGFILE for details" - fi - -diff -up rear-2.4/usr/share/rear/conf/default.conf.build rear-2.4/usr/share/rear/conf/default.conf ---- rear-2.4/usr/share/rear/conf/default.conf.build 2019-09-10 09:31:05.996101649 +0200 -+++ rear-2.4/usr/share/rear/conf/default.conf 2019-09-10 09:31:06.002101535 +0200 -@@ -125,9 +125,19 @@ OS_VERSION=none - # supported values that could make it work even for your system. - # See the SetOSVendorAndVersion function in the config-functions.sh script. - --# Keep the build area after we are done (BOOL). -+# Keep the build area after we are done (ternary). - # Useful to inspect the ReaR recovery system content in $TMPDIR/rear.XXXXXXXXXXXXXXX/rootfs/ --# directly without the need to extract it from the initramfs/initrd in the ISO image: -+# directly without the need to extract it from the initramfs/initrd in the ISO image. -+# Set to "y", "Yes", etc. to always keep the build area, to "n", "No", etc. to never keep it. -+# KEEP_BUILD_DIR is automatically set to true in debug mode (-d) and in debugscript mode (-D). -+# In addition to true (any value that is recognized as 'yes' by the is_true function) -+# and false (any value that is recognized as 'no' by the is_false function) it can be set -+# to several special values: -+# - "errors" to obtain the old behaviour where KEEP_BUILD_DIR was always set -+# to true to keep the build area when errors in the ReaR recovery system were detected. -+# - empty (KEEP_BUILD_DIR="") which means that the build area will be kept on errors -+# if running interactively (in a terminal) and not otherwise (to avoid cluttering -+# /tmp by cron or other automated jobs in case of errors). - KEEP_BUILD_DIR="" - - # No default workflows. This variable is filled in where the workflows are defined -diff -up rear-2.4/usr/share/rear/lib/framework-functions.sh.build rear-2.4/usr/share/rear/lib/framework-functions.sh ---- rear-2.4/usr/share/rear/lib/framework-functions.sh.build 2018-06-21 10:40:53.000000000 +0200 -+++ rear-2.4/usr/share/rear/lib/framework-functions.sh 2019-09-10 09:31:06.002101535 +0200 -@@ -106,7 +106,7 @@ function SourceStage () { - function cleanup_build_area_and_end_program () { - # Cleanup build area - Log "Finished in $((SECONDS-STARTTIME)) seconds" -- if test "$KEEP_BUILD_DIR" ; then -+ if is_true "$KEEP_BUILD_DIR" ; then - LogPrint "You should also rm -Rf $BUILD_DIR" - else - Log "Removing build area $BUILD_DIR" diff --git a/SOURCES/rear-bz1700807.patch b/SOURCES/rear-bz1700807.patch deleted file mode 100644 index 50c43ae..0000000 --- a/SOURCES/rear-bz1700807.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -up rear-2.4/usr/share/rear/conf/default.conf.nbu rear-2.4/usr/share/rear/conf/default.conf ---- rear-2.4/usr/share/rear/conf/default.conf.nbu 2019-09-10 09:37:56.619290929 +0200 -+++ rear-2.4/usr/share/rear/conf/default.conf 2019-09-10 09:37:56.621290891 +0200 -@@ -1480,7 +1480,7 @@ OBDR_BLOCKSIZE=2048 - # - COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt ) - COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) --NBU_LD_LIBRARY_PATH="/usr/openv/lib" -+NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/" - PROGS_NBU=( ) - - ## diff --git a/SOURCES/rear-bz1711123.patch b/SOURCES/rear-bz1711123.patch deleted file mode 100644 index 70f3a8b..0000000 --- a/SOURCES/rear-bz1711123.patch +++ /dev/null @@ -1,87 +0,0 @@ -diff --git a/usr/share/rear/layout/save/GNU/Linux/230_filesystem_layout.sh b/usr/share/rear/layout/save/GNU/Linux/230_filesystem_layout.sh -index 2fc8ccd3..60306718 100644 ---- a/usr/share/rear/layout/save/GNU/Linux/230_filesystem_layout.sh -+++ b/usr/share/rear/layout/save/GNU/Linux/230_filesystem_layout.sh -@@ -50,13 +50,28 @@ fi - # so that in particular what is mounted at '/' is output before other stuff. - read_filesystems_command="$read_filesystems_command | sort -t ' ' -k 1,1 -u" - --# docker daemon mounts file systems for its docker containers --# see also https://docs.docker.com/storage/storagedriver/device-mapper-driver/#configure-direct-lvm-mode-for-production --# As it is for container usage only we do not to backup these up or recreate as this disk device is completely under --# control by docker itself (even the recreation of it incl, the creation of the volume group). Usually this is --# done via a kind of cookbook (Chef, puppet or ansible) -+# The Docker daemon mounts file systems for its Docker containers, see also -+# https://docs.docker.com/storage/storagedriver/device-mapper-driver/#configure-direct-lvm-mode-for-production -+# As it is for container usage only we do not to backup these up or recreate as this disk device is completely -+# under control by Docker itself (even the recreation of it incl, the creation of the volume group). -+# Usually this is done via a kind of cookbook (Chef, puppet or ansible). - docker_is_running="" --service docker status >/dev/null 2>&1 && docker_is_running="yes" -+docker_root_dir="" -+if service docker status &>/dev/null ; then -+ docker_is_running="yes" -+ # When the Docker daemon/service is running, try to get its 'Docker Root Dir': -+ # Kill 'docker info' with SIGTERM after 5 seconds and with SIGKILL after additional 2 seconds -+ # because there are too many crippled Docker installations, cf. https://github.com/rear/rear/pull/2021 -+ docker_root_dir=$( timeout -k 2s 5s docker info | grep 'Docker Root Dir' | awk '{print $4}' ) -+ # Things may go wrong in the 'Docker specific exclude part' below -+ # when Docker is used but its 'Docker Root Dir' cannot be determined -+ # cf. https://github.com/rear/rear/issues/1989 -+ if test "$docker_root_dir" ; then -+ LogPrint "Docker is running, skipping filesystems mounted below Docker Root Dir $docker_root_dir" -+ else -+ LogPrintError "Cannot determine Docker Root Dir - things may go wrong - check $DISKLAYOUT_FILE" -+ fi -+fi - - # Begin writing output to DISKLAYOUT_FILE: - ( -@@ -64,9 +79,10 @@ service docker status >/dev/null 2>&1 && docker_is_running="yes" - echo "# Format: fs [uuid=] [label=