From b14857c71cebd241a8c78ea37f8d54ab22473cc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Zaoral?= Date: Mon, 5 Feb 2024 13:54:07 +0100 Subject: [PATCH] rebase to version 2.7 - drop obsolete patches - rebase remaining patches Resolves: rhbz#2215778 --- rear-bz1747468.patch | 93 +- rear-bz1832394.patch | 351 --- rear-bz1930662.patch | 693 ------ rear-bz1945869.patch | 274 --- rear-bz1958247.patch | 2040 ----------------- rear-bz1983013.patch | 68 - rear-bz1993296.patch | 34 - rear-bz2035939.patch | 56 - rear-bz2048454.patch | 78 - rear-bz2049091.patch | 25 - rear-bz2083272.patch | 171 -- rear-bz2091163.patch | 32 +- rear-bz2096900.patch | 58 - rear-bz2096916.patch | 130 -- rear-bz2097437.patch | 37 - rear-bz2104005.patch | 65 +- rear-bz2111049.patch | 37 - rear-bz2111059.patch | 105 - rear-bz2119501.patch | 4 +- rear-bz2130945.patch | 25 +- rear-bz2131946.patch | 129 -- rear-device-shrinking-bz2223895.patch | 7 +- rear-luks-key-bz2228779.patch | 25 - rear-pr2675.patch | 60 - rear-remove-lvmdevices-bz2145014.patch | 36 +- ...-save-lvm-poolmetadatasize-RHEL-6984.patch | 52 +- rear-sfdc02772301.patch | 38 - ...useless-xfs-mount-options-RHEL-10478.patch | 35 +- rear-tmpdir.patch | 37 - rear-uefi-usb-secureboot-bz2196445.patch | 62 +- rear-usb-uefi-part-size-bz2228402.patch | 41 - rear-vg-command-not-found-bz2121476.patch | 21 - rear.spec | 105 +- rsync-output.patch | 864 ------- s390-no-clobber-disks.patch | 150 +- sources | 2 +- 36 files changed, 430 insertions(+), 5610 deletions(-) delete mode 100644 rear-bz1832394.patch delete mode 100644 rear-bz1930662.patch delete mode 100644 rear-bz1945869.patch delete mode 100644 rear-bz1958247.patch delete mode 100644 rear-bz1983013.patch delete mode 100644 rear-bz1993296.patch delete mode 100644 rear-bz2035939.patch delete mode 100644 rear-bz2048454.patch delete mode 100644 rear-bz2049091.patch delete mode 100644 rear-bz2083272.patch delete mode 100644 rear-bz2096900.patch delete mode 100644 rear-bz2096916.patch delete mode 100644 rear-bz2097437.patch delete mode 100644 rear-bz2111049.patch delete mode 100644 rear-bz2111059.patch delete mode 100644 rear-bz2131946.patch delete mode 100644 rear-luks-key-bz2228779.patch delete mode 100644 rear-pr2675.patch delete mode 100644 rear-sfdc02772301.patch delete mode 100644 rear-tmpdir.patch delete mode 100644 rear-usb-uefi-part-size-bz2228402.patch delete mode 100644 rear-vg-command-not-found-bz2121476.patch delete mode 100644 rsync-output.patch diff --git a/rear-bz1747468.patch b/rear-bz1747468.patch index a2f7bb3..a417f96 100644 --- a/rear-bz1747468.patch +++ b/rear-bz1747468.patch @@ -1,20 +1,38 @@ +From 5d5d1db3ca621eb80b9481924d1fc470571cfc09 Mon Sep 17 00:00:00 2001 +From: Pavel Cahyna +Date: Mon, 30 Aug 2021 12:00:43 +0200 +Subject: [PATCH] Avoid vgcfgrestore on thin volumes/pools + +and any other unsupported volume types. + +vgcfgrestore is not supposed to be able to restore any logical volumes +that use kernel metadata. All volume types except linear and striped use +kernel metadata. Main purpose of vgcfgrestore (with mandatory --force +option) is to let users fix existing thin-pool, not to recreate the pool +on empty disks. Do not even try vgcfgrestore on VGs that need any kernel +metadata, because it might lead to an inconsistent state (if there are +data that the kernel might interpret as LV metadata present on the disks). + +For VGs that have any volume with kernel metadata and are thus +unsupported by vgcfgrestore, switch automatically to LV creation using +lvcreate, similarly to MIGRATION_MODE. + +Avoid vgcfgrestore --force entirely, since it should not be needed now. + +This mostly reverts changes in commits +311bfb3da1d5e47a2ff144123a2457e634f67893 and +1b779abfbf56693877fe666f56253ec623599674. The former code is preserved +and gets enabled if FORCE_VGCFGRESTORE=y. This option is on purpose +undocumented though and may be removed in the future. +--- + .../prepare/GNU/Linux/110_include_lvm_code.sh | 8 +++++- + usr/share/rear/lib/layout-functions.sh | 26 +++++++++++++++++++ + 2 files changed, 33 insertions(+), 1 deletion(-) + diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -index 7cfdfcf2..1be17ba8 100644 +index 5babce228..54a55e688 100644 --- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -@@ -68,9 +68,9 @@ create_lvmgrp() { - local vg=${vgrp#/dev/} - - cat >> "$LAYOUT_CODE" <> "$LAYOUT_CODE" <&2 ; then - - LogPrint "Sleeping 3 seconds to let udev or systemd-udevd create their devices..." - sleep 3 >&2 -- create_volume_group=0 -- create_logical_volumes=0 -+ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) -+ create_logical_volumes=( \$( RmInArray "$vg" "\${create_logical_volumes[@]}" ) ) +@@ -100,6 +100,9 @@ if lvm vgcfgrestore -f "$VAR_DIR/layout/lvm/${vg}.cfg" $vg >&2 ; then + create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) + create_logical_volumes=( \$( RmInArray "$vg" "\${create_logical_volumes[@]}" ) ) +EOF + if is_true "${FORCE_VGCFGRESTORE-no}"; then @@ -39,14 +52,9 @@ index 7cfdfcf2..1be17ba8 100644 # # It failed ... restore layout using 'vgcfgrestore --force', but then remove Thin volumes, they are broken # -@@ -121,9 +124,12 @@ elif lvm vgcfgrestore --force -f "$VAR_DIR/layout/lvm/${vg}.cfg" $vg >&2 ; then - sleep 3 >&2 - - # All logical volumes have been created, except Thin volumes and pools -- create_volume_group=0 -- create_thin_volumes_only=1 -+ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) -+ create_thin_volumes_only+=( "$vg" ) +@@ -124,6 +127,9 @@ elif lvm vgcfgrestore --force -f "$VAR_DIR/layout/lvm/${vg}.cfg" $vg >&2 ; then + create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) + create_thin_volumes_only+=( "$vg" ) +EOF + fi @@ -54,32 +62,11 @@ index 7cfdfcf2..1be17ba8 100644 # # It failed also ... restore using 'vgcreate/lvcreate' commands # -@@ -138,7 +144,7 @@ EOF - local -a devices=($(awk "\$1 == \"lvmdev\" && \$2 == \"$vgrp\" { print \$3 }" "$LAYOUT_FILE")) - - cat >> "$LAYOUT_CODE" < [key:value ...] - - === LUKS Devices === - ---------------------------------- --crypt /dev/mapper/ [cipher=] [key_size=] [hash=] [uuid=] [keyfile=] [password=] -+crypt /dev/mapper/ [type=] [cipher=] [key_size=] [hash=] [uuid=] [keyfile=] [password=] - ---------------------------------- - - === DRBD === -diff --git a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh -index 05279bc8..0c662f67 100644 ---- a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh -+++ b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh -@@ -1,35 +1,75 @@ -+ - # Code to recreate and/or open LUKS volumes. - - create_crypt() { -+ # See the create_device() function in lib/layout-functions.sh what "device type" means: -+ local device_type="$1" -+ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then -+ LogPrintError "Skip recreating LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: -+ return 1 -+ fi -+ - local crypt target_device source_device options -- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE") -+ local mapping_name option key value -+ local cryptsetup_options="" keyfile="" password="" - -- local target_name=${target_device#/dev/mapper/} -+ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" ) -+ -+ # Careful! One cannot 'test -b $source_device' here at the time when this code is run -+ # because the source device is usually a disk partition block device like /dev/sda2 -+ # but disk partition block devices usually do not yet exist (in particular not on a new clean disk) -+ # because partitions are actually created later when the diskrestore.sh script is run -+ # but not here when this code is run which only generates the diskrestore.sh script: -+ if ! test $source_device ; then -+ LogPrintError "Skip recreating LUKS volume $device_type: No source device (see the 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: -+ return 1 -+ fi -+ -+ mapping_name=${target_device#/dev/mapper/} -+ if ! test $mapping_name ; then -+ LogPrintError "Skip recreating LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: -+ return 1 -+ fi - -- local cryptsetup_options="" keyfile="" password="" -- local option key value - for option in $options ; do -- key=${option%=*} -+ # $option is of the form keyword=value and -+ # we assume keyword has no '=' character but value could be anything that may have a '=' character -+ # so we split keyword=value at the leftmost '=' character so that -+ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar": -+ key=${option%%=*} - value=${option#*=} -- -+ # The "cryptseup luksFormat" command does not require any of the type, cipher, key-size, hash, uuid option values -+ # because if omitted a cryptseup default value is used so we treat those values as optional. -+ # Using plain test to ensure the value is a single non empty and non blank word -+ # without quoting because test " " would return zero exit code -+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style - case "$key" in -- cipher) -- cryptsetup_options+=" --cipher $value" -+ (type) -+ test $value && cryptsetup_options+=" --type $value" -+ ;; -+ (cipher) -+ test $value && cryptsetup_options+=" --cipher $value" -+ ;; -+ (key_size) -+ test $value && cryptsetup_options+=" --key-size $value" - ;; -- key_size) -- cryptsetup_options+=" --key-size $value" -+ (hash) -+ test $value && cryptsetup_options+=" --hash $value" - ;; -- hash) -- cryptsetup_options+=" --hash $value" -+ (uuid) -+ test $value && cryptsetup_options+=" --uuid $value" - ;; -- uuid) -- cryptsetup_options+=" --uuid $value" -+ (keyfile) -+ test $value && keyfile=$value - ;; -- keyfile) -- keyfile=$value -+ (password) -+ test $value && password=$value - ;; -- password) -- password=$value -+ (*) -+ LogPrintError "Skipping unsupported LUKS cryptsetup option '$key' in 'crypt $target_device $source_device' entry in $LAYOUT_FILE" - ;; - esac - done -@@ -37,26 +77,25 @@ create_crypt() { - cryptsetup_options+=" $LUKS_CRYPTSETUP_OPTIONS" - - ( -- echo "Log \"Creating LUKS device $target_name on $source_device\"" -+ echo "LogPrint \"Creating LUKS volume $mapping_name on $source_device\"" - if [ -n "$keyfile" ] ; then - # Assign a temporary keyfile at this stage so that original keyfiles do not leak onto the rescue medium. - # The original keyfile will be restored from the backup and then re-assigned to the LUKS device in the - # 'finalize' stage. - # The scheme for generating a temporary keyfile path must be the same here and in the 'finalize' stage. -- keyfile="${TMPDIR:-/tmp}/LUKS-keyfile-$target_name" -+ keyfile="$TMP_DIR/LUKS-keyfile-$mapping_name" - dd bs=512 count=4 if=/dev/urandom of="$keyfile" - chmod u=rw,go=- "$keyfile" -- - echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device $keyfile" -- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name" -+ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name" - elif [ -n "$password" ] ; then - echo "echo \"$password\" | cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device" -- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name" -+ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name" - else -- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\"" -+ echo "LogUserOutput \"Set the password for LUKS volume $mapping_name (for 'cryptsetup luksFormat' on $source_device):\"" - echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device" -- echo "LogPrint \"Please re-enter the password for LUKS device $target_name ($source_device):\"" -- echo "cryptsetup luksOpen $source_device $target_name" -+ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\"" -+ echo "cryptsetup luksOpen $source_device $mapping_name" - fi - echo "" - ) >> "$LAYOUT_CODE" -@@ -64,38 +103,61 @@ create_crypt() { - - # Function open_crypt() is meant to be used by the 'mountonly' workflow - open_crypt() { -+ # See the do_mount_device() function in lib/layout-functions.sh what "device type" means: -+ local device_type="$1" -+ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then -+ LogPrintError "Skip opening LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: -+ return 1 -+ fi -+ - local crypt target_device source_device options -- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE") -+ local mapping_name option key value -+ local cryptsetup_options="" keyfile="" password="" - -- local target_name=${target_device#/dev/mapper/} -+ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" ) -+ -+ if ! test -b "$source_device" ; then -+ LogPrintError "Skip opening LUKS volume $device_type on device '$source_device' that is no block device (see the 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: -+ return 1 -+ fi -+ -+ mapping_name=${target_device#/dev/mapper/} -+ if ! test $mapping_name ; then -+ LogPrintError "Skip opening LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)" -+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: -+ return 1 -+ fi - -- local cryptsetup_options="" keyfile="" password="" -- local option key value - for option in $options ; do -- key=${option%=*} -+ # $option is of the form keyword=value and -+ # we assume keyword has no '=' character but value could be anything that may have a '=' character -+ # so we split keyword=value at the leftmost '=' character so that -+ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar": -+ key=${option%%=*} - value=${option#*=} -- - case "$key" in -- keyfile) -- keyfile=$value -+ (keyfile) -+ test $value && keyfile=$value - ;; -- password) -- password=$value -+ (password) -+ test $value && password=$value - ;; - esac - done - - ( -- echo "Log \"Opening LUKS device $target_name on $source_device\"" -+ echo "LogPrint \"Opening LUKS volume $mapping_name on $source_device\"" - if [ -n "$keyfile" ] ; then - # During a 'mountonly' workflow, the original keyfile is supposed to be - # available at this point. -- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name" -+ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name" - elif [ -n "$password" ] ; then -- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name" -+ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name" - else -- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\"" -- echo "cryptsetup luksOpen $source_device $target_name" -+ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\"" -+ echo "cryptsetup luksOpen $source_device $mapping_name" - fi - echo "" - ) >> "$LAYOUT_CODE" -diff --git a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh -index c1e1cfd5..afeabf6a 100644 ---- a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh -+++ b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh -@@ -9,6 +9,8 @@ Log "Saving Encrypted volumes." - REQUIRED_PROGS+=( cryptsetup dmsetup ) - COPY_AS_IS+=( /usr/share/cracklib/\* /etc/security/pwquality.conf ) - -+local invalid_cryptsetup_option_value="no" -+ - while read target_name junk ; do - # find the target device we're mapping - if ! [ -e /dev/mapper/$target_name ] ; then -@@ -30,29 +32,96 @@ while read target_name junk ; do - source_device="$(get_device_name ${slave##*/})" - done - -- if ! cryptsetup isLuks $source_device >/dev/null 2>&1; then -+ if ! blkid -p -o export $source_device >$TMP_DIR/blkid.output ; then -+ LogPrintError "Error: Cannot get attributes for $target_name ('blkid -p -o export $source_device' failed)" - continue - fi - -- # gather crypt information -- cipher=$(cryptsetup luksDump $source_device | grep "Cipher name" | sed -r 's/^.+:\s*(.+)$/\1/') -- mode=$(cryptsetup luksDump $source_device | grep "Cipher mode" | cut -d: -f2- | awk '{printf("%s",$1)};') -- key_size=$(cryptsetup luksDump $source_device | grep "MK bits" | sed -r 's/^.+:\s*(.+)$/\1/') -- hash=$(cryptsetup luksDump $source_device | grep "Hash spec" | sed -r 's/^.+:\s*(.+)$/\1/') -- uuid=$(cryptsetup luksDump $source_device | grep "UUID" | sed -r 's/^.+:\s*(.+)$/\1/') -- keyfile_option=$([ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab) -+ if ! grep -q "TYPE=crypto_LUKS" $TMP_DIR/blkid.output ; then -+ Log "Skipping $target_name (no 'TYPE=crypto_LUKS' in 'blkid -p -o export $source_device' output)" -+ continue -+ fi - -- # LUKS version 2 is not yet suppported, see https://github.com/rear/rear/issues/2204 -- # When LUKS version 2 is used the above code fails at least to determine the hash value -- # so we use an empty hash value as a simple test if gathering crypt information was successful: -- test "$hash" || Error "No hash value for LUKS device '$target_name' at '$source_device' (only LUKS version 1 is supported)" -+ # Detect LUKS version: -+ # Remove all non-digits in particular to avoid leading or trailing spaces in the version string -+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style -+ # that could happen if the blkid output contains "VERSION = 2" so that 'cut -d= -f2' results " 2". -+ version=$( grep "VERSION" $TMP_DIR/blkid.output | cut -d= -f2 | tr -c -d '[:digit:]' ) -+ if ! test "$version" = "1" -o "$version" = "2" ; then -+ LogPrintError "Error: Unsupported LUKS version for $target_name ('blkid -p -o export $source_device' shows 'VERSION=$version')" -+ continue -+ fi -+ luks_type=luks$version - -- echo "crypt /dev/mapper/$target_name $source_device cipher=$cipher-$mode key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE --done < <( dmsetup ls --target crypt ) -+ # Gather crypt information: -+ if ! cryptsetup luksDump $source_device >$TMP_DIR/cryptsetup.luksDump ; then -+ LogPrintError "Error: Cannot get LUKS$version values for $target_name ('cryptsetup luksDump $source_device' failed)" -+ continue -+ fi -+ uuid=$( grep "UUID" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ keyfile_option=$( [ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab ) -+ if test $luks_type = "luks1" ; then -+ cipher_name=$( grep "Cipher name" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ cipher_mode=$( grep "Cipher mode" $TMP_DIR/cryptsetup.luksDump | cut -d: -f2- | awk '{printf("%s",$1)};' ) -+ cipher=$cipher_name-$cipher_mode -+ key_size=$( grep "MK bits" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ hash=$( grep "Hash spec" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ elif test $luks_type = "luks2" ; then -+ cipher=$( grep "cipher:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ # More than one keyslot may be defined - use key_size from the first slot. -+ # Depending on the version the "cryptsetup luksDump" command outputs the key_size value -+ # as a line like -+ # Key: 512 bits -+ # and/or as a line like -+ # Cipher key: 512 bits -+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198 and subsequent comments -+ # so we grep for both lines but use only the first match from the first slot: -+ key_size=$( egrep -m 1 "Key:|Cipher key:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+) bits$/\1/' ) -+ hash=$( grep "Hash" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) -+ fi - --# cryptsetup is required in the recovery system if disklayout.conf contains at least one 'crypt' entry --# see the create_crypt function in layout/prepare/GNU/Linux/160_include_luks_code.sh --# what program calls are written to diskrestore.sh --# cf. https://github.com/rear/rear/issues/1963 --grep -q '^crypt ' $DISKLAYOUT_FILE && REQUIRED_PROGS+=( cryptsetup ) || true -+ # Basic checks that the cipher key_size hash uuid values exist -+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198 -+ # because some values are needed during "rear recover" -+ # to set cryptsetup options in layout/prepare/GNU/Linux/160_include_luks_code.sh -+ # and it seems cryptsetup fails when options with empty values are specified -+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-719479724 -+ # For example a LUKS1 crypt entry in disklayout.conf looks like -+ # crypt /dev/mapper/luks1test /dev/sda7 type=luks1 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=1b4198c9-d9b0-4c57-b9a3-3433e391e706 -+ # and a LUKS1 crypt entry in disklayout.conf looks like -+ # crypt /dev/mapper/luks2test /dev/sda8 type=luks2 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=3e874a28-7415-4f8c-9757-b3f28a96c4d2 -+ # Only the keyfile_option value is optional and the luks_type value is already tested above. -+ # Using plain test to ensure a value is a single non empty and non blank word -+ # without quoting because test " " would return zero exit code -+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style -+ # Do not error out instantly here but only report errors here so the user can see all messages -+ # and actually error out at the end of this script if there was one actually invalid value: -+ if ! test $cipher ; then -+ LogPrint "No 'cipher' value for LUKS$version volume $target_name in $source_device" -+ fi -+ if test $key_size ; then -+ if ! is_positive_integer $key_size ; then -+ LogPrintError "Error: 'key_size=$key_size' is no positive integer for LUKS$version volume $target_name in $source_device" -+ invalid_cryptsetup_option_value="yes" -+ fi -+ else -+ LogPrint "No 'key_size' value for LUKS$version volume $target_name in $source_device" -+ fi -+ if ! test $hash ; then -+ LogPrint "No 'hash' value for LUKS$version volume $target_name in $source_device" -+ fi -+ if ! test $uuid ; then -+ # Report a missig uuid value as an error to have the user informed -+ # but do not error out here because things can be fixed manually during "rear recover" -+ # cf. https://github.com/rear/rear/pull/2506#issuecomment-721757810 -+ # and https://github.com/rear/rear/pull/2506#issuecomment-722315498 -+ # and https://github.com/rear/rear/issues/2509 -+ LogPrintError "Error: No 'uuid' value for LUKS$version volume $target_name in $source_device (mounting it or booting the recreated system may fail)" -+ fi -+ -+ echo "crypt /dev/mapper/$target_name $source_device type=$luks_type cipher=$cipher key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE -+ -+done < <( dmsetup ls --target crypt ) - -+# Let this script return successfully when invalid_cryptsetup_option_value is not true: -+is_true $invalid_cryptsetup_option_value && Error "Invalid or empty LUKS cryptsetup option value(s) in $DISKLAYOUT_FILE" || true diff --git a/rear-bz1930662.patch b/rear-bz1930662.patch deleted file mode 100644 index aaeae6f..0000000 --- a/rear-bz1930662.patch +++ /dev/null @@ -1,693 +0,0 @@ -diff --git a/usr/share/rear/backup/NETFS/default/500_make_backup.sh b/usr/share/rear/backup/NETFS/default/500_make_backup.sh -index 02c204c5..60c80b5f 100644 ---- a/usr/share/rear/backup/NETFS/default/500_make_backup.sh -+++ b/usr/share/rear/backup/NETFS/default/500_make_backup.sh -@@ -16,6 +16,8 @@ function set_tar_features () { - FEATURE_TAR_IS_SET=1 - } - -+local backup_prog_rc -+ - local scheme=$( url_scheme $BACKUP_URL ) - local path=$( url_path $BACKUP_URL ) - local opath=$( backup_path $scheme $path ) -diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -index c560ec94..1692ba4c 100644 ---- a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -@@ -1,5 +1,7 @@ - # Start SELinux if it was stopped - check presence of $TMP_DIR/selinux.mode - -+local backup_prog_rc -+ - [ -f $TMP_DIR/selinux.mode ] && { - touch "${TMP_DIR}/selinux.autorelabel" - cat $TMP_DIR/selinux.mode > $SELINUX_ENFORCE -@@ -13,19 +15,19 @@ - ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ - "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null -- _rc=$? -- if [ $_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" -+ backup_prog_rc=$? -+ if [ $backup_prog_rc -ne 0 ]; then -+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" - #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" - fi - ;; - - (rsync) -- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \ -+ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ - "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" -- _rc=$? -- if [ $_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" -+ backup_prog_rc=$? -+ if [ $backup_prog_rc -ne 0 ]; then -+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" - #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" - fi - ;; -diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -index cae12e38..9a17d6bb 100644 ---- a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -@@ -1,3 +1,5 @@ -+local backup_prog_rc -+ - [ -f $TMP_DIR/force.autorelabel ] && { - - > "${TMP_DIR}/selinux.autorelabel" -@@ -11,19 +13,19 @@ - ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ - "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null -- _rc=$? -- if [ $_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" -+ backup_prog_rc=$? -+ if [ $backup_prog_rc -ne 0 ]; then -+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" - #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" - fi - ;; - - (rsync) -- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \ -+ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ - "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" -- _rc=$? -- if [ $_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" -+ backup_prog_rc=$? -+ if [ $backup_prog_rc -ne 0 ]; then -+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" - #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" - fi - ;; -@@ -35,8 +37,7 @@ - # probably using the BACKUP=NETFS workflow instead - if [ -d "${opath}" ]; then - if [ ! -f "${opath}/selinux.autorelabel" ]; then -- > "${opath}/selinux.autorelabel" -- StopIfError "Failed to create selinux.autorelabel on ${opath}" -+ > "${opath}/selinux.autorelabel" || Error "Failed to create selinux.autorelabel on ${opath}" - fi - fi - ;; -diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh -index 60330007..cedee9ce 100644 ---- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh -+++ b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh -@@ -4,7 +4,7 @@ - # check for the --relative option in BACKUP_RSYNC_OPTIONS array - # for the default values see the standard definition in conf/default.conf file - --if ! grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then -+if ! grep -q relative <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then - BACKUP_RSYNC_OPTIONS+=( --relative ) - Log "Added option '--relative' to the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" - fi -diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -index 0d67d362..750a04ca 100644 ---- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -+++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -@@ -2,6 +2,9 @@ - # This file is part of Relax-and-Recover, licensed under the GNU General - # Public License. Refer to the included COPYING for full text of license. - -+local backup_prog_rc -+local backup_log_message -+ - Log "Include list:" - while read -r ; do - Log " $REPLY" -@@ -11,9 +14,9 @@ while read -r ; do - Log " $REPLY" - done < $TMP_DIR/backup-exclude.txt - --LogPrint "Creating $BACKUP_PROG archive on '${RSYNC_HOST}:${RSYNC_PATH}'" -+LogPrint "Creating $BACKUP_PROG backup on '${RSYNC_HOST}:${RSYNC_PATH}'" - --ProgressStart "Running archive operation" -+ProgressStart "Running backup operation" - ( - case "$(basename $BACKUP_PROG)" in - -@@ -37,7 +40,7 @@ ProgressStart "Running archive operation" - ;; - - (*) -- # no other backup programs foreseen then rsync so far -+ # no other backup programs foreseen than rsync so far - : - ;; - -@@ -96,7 +99,7 @@ case "$(basename $BACKUP_PROG)" in - ;; - esac - -- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]" -+ ProgressInfo "Backed up $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]" - done - ;; - -@@ -113,24 +116,23 @@ ProgressStop - wait $BackupPID - - transfertime="$((SECONDS-starttime))" --_rc="$(cat $TMP_DIR/retval)" -+backup_prog_rc="$(cat $TMP_DIR/retval)" - - sleep 1 - # everyone should see this warning, even if not verbose --test "$_rc" -gt 0 && VERBOSE=1 LogPrint "WARNING ! --There was an error (${rsync_err_msg[$_rc]}) during archive creation. --Please check the archive and see '$RUNTIME_LOGFILE' for more information. -+test "$backup_prog_rc" -gt 0 && Error " -+There was an error (${rsync_err_msg[$backup_prog_rc]}) during backup creation. -+Please check the destination and see '$RUNTIME_LOGFILE' for more information. - --Since errors are often related to files that cannot be saved by --$BACKUP_PROG, we will continue the $WORKFLOW process. However, you MUST --verify the backup yourself before trusting it ! -+If the error is related to files that cannot and should not be saved by -+$BACKUP_PROG, they should be excluded from the backup. - - " - --_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)" --if [ $_rc -eq 0 -a "$_message" ] ; then -- LogPrint "$_message in $transfertime seconds." -+backup_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)" -+if [ $backup_prog_rc -eq 0 -a "$backup_log_message" ] ; then -+ LogPrint "$backup_log_message in $transfertime seconds." - elif [ "$size" ]; then -- LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" -+ LogPrint "Backed up $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" - fi - -diff --git a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -index 01801a4e..b90d459b 100644 ---- a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -+++ b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -@@ -1,6 +1,8 @@ - - # copy the backup.log & rear.log file to remote destination with timestamp added --Timestamp=$( date +%Y%m%d.%H%M ) -+local timestamp -+ -+timestamp=$( date +%Y%m%d.%H%M ) - - # compress the log file first - gzip "$TMP_DIR/$BACKUP_PROG_ARCHIVE.log" || Error "Failed to 'gzip $TMP_DIR/$BACKUP_PROG_ARCHIVE.log'" -@@ -10,15 +12,15 @@ case $RSYNC_PROTO in - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 - $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" \ -- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz" 2>/dev/null -+ "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null - -- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${Timestamp}.log" 2>/dev/null -+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${timestamp}.log" 2>/dev/null - ;; - (rsync) -- $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" ${BACKUP_RSYNC_OPTIONS[@]} \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz" -+ $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" "${BACKUP_RSYNC_OPTIONS[@]}" \ -+ "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" - -- $BACKUP_PROG -a "$RUNTIME_LOGFILE" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${Timestamp}.log" -+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${timestamp}.log" - ;; - esac - -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 455aa3ce..0c230f38 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -1106,7 +1106,8 @@ BACKUP_ONLY_EXCLUDE="no" - MANUAL_INCLUDE=NO - # Disable SELinux policy during backup with NETFS or RSYNC (default yes) - BACKUP_SELINUX_DISABLE=1 --# Enable integrity check of the backup archive (only with BACKUP=NETFS and BACKUP_PROG=tar) -+# Enable integrity check of the backup archive (full check only with BACKUP=NETFS and BACKUP_PROG=tar, -+# with BACKUP=rsync or BACKUP_PROG=rsync it only checks whether rsync completed the restore successfully) - BACKUP_INTEGRITY_CHECK= - # Define BACKUP_TYPE. - # By default BACKUP_TYPE is empty which means "rear mkbackup" will create a full backup. -diff --git a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -index 32ac391d..519febf5 100644 ---- a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -+++ b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -@@ -2,21 +2,19 @@ - # RSYNC_PREFIX=$HOSTNAME as set in default.conf - - # create temporary local work-spaces to collect files (we already make the remote backup dir with the correct mode!!) --mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 --StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" --mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 --StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" -+mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" -+mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" - - case $RSYNC_PROTO in - - (ssh) -- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 -- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 \ -+ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" - ;; - - (rsync) -- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null -- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null \ -+ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" - ;; - - esac -diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -index c7b430d8..96b62da1 100644 ---- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -+++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -@@ -5,19 +5,19 @@ LogPrint "Copying resulting files to $OUTPUT_URL location" - - # if called as mkbackuponly then we just don't have any result files. - if test "$RESULT_FILES" ; then -- Log "Copying files '${RESULT_FILES[@]}' to $OUTPUT_URL location" -- cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" -- StopIfError "Could not copy files to local rsync location" -+ Log "Copying files '${RESULT_FILES[*]}' to $OUTPUT_URL location" -+ cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \ -+ || Error "Could not copy files to local rsync location" - fi - --echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION" --StopIfError "Could not create VERSION file on local rsync location" -+echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION" \ -+ || Error "Could not create VERSION file on local rsync location" - --cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README" --StopIfError "Could not copy usage file to local rsync location" -+cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README" \ -+ || Error "Could not copy usage file to local rsync location" - --cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" --StopIfError "Could not copy $RUNTIME_LOGFILE to local rsync location" -+cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" \ -+ || Error "Could not copy $RUNTIME_LOGFILE to local rsync location" - - case $RSYNC_PROTO in - -@@ -25,20 +25,20 @@ case $RSYNC_PROTO in - Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null -- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location" -+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ -+ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" - ;; - - (rsync) -- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" -+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null -- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location" -+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ -+ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" - ;; - - esac - - # cleanup the temporary space (need it for the log file during backup) --rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" --LogIfError "Could not cleanup temoprary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" -+rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \ -+ || Log "Could not cleanup temporary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" -diff --git a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh -index fadf9d72..3c719c44 100644 ---- a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh -+++ b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh -@@ -31,7 +31,7 @@ case $scheme in - backup_directory_mountpoint=$( df -P "$backup_directory" | tail -1 | awk '{print $6}' ) - test "/" = "$backup_directory_mountpoint" && Error "URL '$BACKUP_URL' has the backup directory '$backup_directory' in the '/' filesystem which is forbidden." - # When the mountpoint of the backup directory is not yet excluded add its mountpoint to the EXCLUDE_RECREATE array: -- if ! grep -q "$backup_directory_mountpoint" <<< $( echo ${EXCLUDE_RECREATE[@]} ) ; then -+ if ! grep -q "$backup_directory_mountpoint" <<< "${EXCLUDE_RECREATE[*]}" ; then - EXCLUDE_RECREATE+=( "fs:$backup_directory_mountpoint" ) - fi - ;; -diff --git a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -index ac26edfa..eb7df29e 100644 ---- a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -+++ b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -@@ -33,7 +33,7 @@ case $(basename $BACKUP_PROG) in - touch $TMP_DIR/force.autorelabel # after reboot the restored system do a forced SELinux relabeling - else - # if --xattrs is already set; no need to do it again -- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then -+ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then - BACKUP_RSYNC_OPTIONS+=( --xattrs ) - fi - RSYNC_SELINUX=1 # variable used in recover mode (means using xattr and not disable SELinux) -diff --git a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -index b8535352..c964a148 100644 ---- a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -+++ b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -@@ -33,22 +33,20 @@ RSYNC_PORT=873 # default port (of rsync server) - RSYNC_PATH= - - --echo $BACKUP_URL | egrep -q '(::)' # new style '::' means rsync protocol --if [[ $? -eq 0 ]]; then -+if egrep -q '(::)' <<< $BACKUP_URL ; then # new style '::' means rsync protocol - RSYNC_PROTO=rsync - else - RSYNC_PROTO=ssh - fi - --echo $host | grep -q '@' --if [[ $? -eq 0 ]]; then -+if grep -q '@' <<< $host ; then - RSYNC_USER="${host%%@*}" # grab user name - else - RSYNC_USER=root - fi - - # remove USER@ if present (we don't need it anymore) --tmp2="${host#*@}" -+local tmp2="${host#*@}" - - case "$RSYNC_PROTO" in - -@@ -56,8 +54,7 @@ case "$RSYNC_PROTO" in - # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02:: - RSYNC_HOST="${tmp2%%::*}" - # path=/gdhaese1@witsbebelnx02::backup or path=/backup -- echo $path | grep -q '::' -- if [[ $? -eq 0 ]]; then -+ if grep -q '::' <<< $path ; then - RSYNC_PATH="${path##*::}" - else - RSYNC_PATH="${path##*/}" -@@ -79,8 +76,7 @@ esac - - # check if host is reachable - if test "$PING" ; then -- ping -c 2 "$RSYNC_HOST" >/dev/null -- StopIfError "Backup host [$RSYNC_HOST] not reachable." -+ ping -c 2 "$RSYNC_HOST" >/dev/null || Error "Backup host [$RSYNC_HOST] not reachable." - else - Log "Skipping ping test" - fi -@@ -89,15 +85,15 @@ fi - case "$RSYNC_PROTO" in - - (rsync) -- Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/" -- $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null -- StopIfError "Rsync daemon not running on $RSYNC_HOST" -+ Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/" -+ $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null \ -+ || Error "Rsync daemon not running on $RSYNC_HOST" - ;; - - (ssh) - Log "Test: ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true" -- ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 -- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" -+ ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 \ -+ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" - ;; - - esac -diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -index 446dd736..e9103531 100644 ---- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -+++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -@@ -2,15 +2,17 @@ - # This file is part of Relax-and-Recover, licensed under the GNU General - # Public License. Refer to the included COPYING for full text of license. - # try to grab the rsync protocol version of rsync on the remote server -+ -+local remote_mountpoint -+ - if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then - - case $RSYNC_PROTO in - - (ssh) -- ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 -- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" -- grep -q "protocol version" "$TMP_DIR/rsync_protocol" -- if [ $? -eq 0 ]; then -+ ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \ -+ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" -+ if grep -q "protocol version" "$TMP_DIR/rsync_protocol" ; then - RSYNC_PROTOCOL_VERSION=$(grep 'protocol version' "$TMP_DIR/rsync_protocol" | awk '{print $6}') - else - RSYNC_PROTOCOL_VERSION=29 # being conservative (old rsync version < 3.0) -@@ -30,25 +32,21 @@ else - - fi - --if [ "${RSYNC_USER}" != "root" ]; then -+if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then - if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then - if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then - # no xattrs available in remote rsync, so --fake-super is not possible - Error "rsync --fake-super not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)" - else - # when using --fake-super we must have user_xattr mount options on the remote mntpt -- _mntpt=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') -- ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" -- StopIfError "Remote file system $_mntpt does not have user_xattr mount option set!" -- #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="""rsync --fake-super""" ) -+ remote_mountpoint=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') -+ ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \ -+ || Error "Remote file system $remote_mountpoint does not have user_xattr mount option set!" -+ #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="rsync --fake-super" ) - # see issue #366 for explanation of removing --xattrs -- BACKUP_RSYNC_OPTIONS+=( --rsync-path="""rsync --fake-super""" ) -+ BACKUP_RSYNC_OPTIONS+=( --rsync-path="rsync --fake-super" ) - fi - else -- if [ ${BACKUP_RSYNC_OPTIONS[@]/--fake-super/} != ${BACKUP_RSUNC_OPTIONS[@]} ]; then -- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" -- else -- Log "Warning: rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" -- fi -+ Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" - fi - fi -diff --git a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh -index 0a9c9648..220ccc57 100644 ---- a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh -+++ b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh -@@ -5,6 +5,8 @@ - # Restore from remote backup via DUPLICIY over rsync - - if [ "$BACKUP_PROG" = "duplicity" ]; then -+ local backup_prog_rc -+ local restore_log_message - - LogPrint "========================================================================" - LogPrint "Restoring backup with $BACKUP_PROG from '$BACKUP_DUPLICITY_URL'" -@@ -49,7 +51,8 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then - LogPrint "with CMD: $DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir=$DUPLICITY_TEMPDIR $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT" - $DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir="$DUPLICITY_TEMPDIR" $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT 0<&6 | tee $TMP_DIR/duplicity-restore.log - fi -- _rc=$? -+ # FIXME: this collects the exit code from "tee", not from $DUPLICITY_PROG -+ backup_prog_rc=$? - - transfertime="$((SECONDS-$starttime))" - sleep 1 -@@ -65,20 +68,20 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then - LogPrint "========================================================================" - - -- if [ "$_rc" -gt 0 ]; then -+ if [ "$backup_prog_rc" -gt 0 ]; then - LogPrint "WARNING ! - There was an error while restoring the archive. - Please check '$RUNTIME_LOGFILE' and $TMP_DIR/duplicity-restore.log for more information. - You should also manually check the restored system to see whether it is complete. - " - -- _message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)" -+ restore_log_message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)" - - LogPrint "Last 14 Lines of ${TMP_DIR}/duplicity-restore.log:" -- LogPrint "$_message" -+ LogPrint "$restore_log_message" - fi - -- if [ $_rc -eq 0 ] ; then -+ if [ $backup_prog_rc -eq 0 ] ; then - LogPrint "Restore completed in $transfertime seconds." - fi - -diff --git a/usr/share/rear/restore/RBME/default/400_restore_backup.sh b/usr/share/rear/restore/RBME/default/400_restore_backup.sh -index 28a3c354..3e97e16b 100644 ---- a/usr/share/rear/restore/RBME/default/400_restore_backup.sh -+++ b/usr/share/rear/restore/RBME/default/400_restore_backup.sh -@@ -2,6 +2,8 @@ if [[ -z "$RBME_BACKUP" ]] ; then - Error "No RBME backup selected (BACKUP_URL?). Aborting." - fi - -+local backup_prog_rc -+ - scheme=$(url_scheme "$BACKUP_URL") - - LogPrint "Restoring from backup $RBME_BACKUP." -@@ -43,11 +45,11 @@ transfertime="$((SECONDS-starttime))" - # harvest return code from background job. The kill -0 $BackupPID loop above should - # have made sure that this wait won't do any real "waiting" :-) - wait $BackupPID --_rc=$? -+backup_prog_rc=$? - - sleep 1 --test "$_rc" -gt 0 && LogPrint "WARNING ! --There was an error (${rsync_err_msg[$_rc]}) while restoring the archive. -+test "$backup_prog_rc" -gt 0 && LogPrint "WARNING ! -+There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the archive. - Please check '$RUNTIME_LOGFILE' for more information. You should also - manually check the restored system to see whether it is complete. - " -diff --git a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh -index 53915322..a792f195 100644 ---- a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh -+++ b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh -@@ -4,11 +4,11 @@ - # without the --relative option ; my feeling says it is better to remove it from array BACKUP_RSYNC_OPTIONS - # If I'm wrong please let us know (use issue mentioned above to comment) - --if grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then -+if grep -q -- "--relative" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then - BACKUP_RSYNC_OPTIONS=( $( RmInArray "--relative" "${BACKUP_RSYNC_OPTIONS[@]}" ) ) - Log "Removed option '--relative' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" - fi --if grep -q "-R" <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then -+if grep -q -- "-R" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then - BACKUP_RSYNC_OPTIONS=( $( RmInArray "-R" "${BACKUP_RSYNC_OPTIONS[@]}" ) ) - Log "Removed option '-R' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" - fi -diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -index 2a0bf15e..993088be 100644 ---- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -+++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -@@ -4,10 +4,10 @@ get_size() { - echo $( stat --format '%s' "$TARGET_FS_ROOT/$1" ) - } - --mkdir -p "${TMP_DIR}/rsync/${NETFS_PREFIX}" --StopIfError "Could not mkdir '$TMP_DIR/rsync/${NETFS_PREFIX}'" -+local backup_prog_rc -+local restore_log_message - --LogPrint "Restoring $BACKUP_PROG archive from '${RSYNC_HOST}:${RSYNC_PATH}'" -+LogPrint "Restoring $BACKUP_PROG backup from '${RSYNC_HOST}:${RSYNC_PATH}'" - - ProgressStart "Restore operation" - ( -@@ -33,9 +33,10 @@ ProgressStart "Restore operation" - ;; - - (*) -- # no other backup programs foreseen then rsync so far -+ # no other backup programs foreseen than rsync so far - : - ;; -+ - esac - echo $? >$TMP_DIR/retval - ) >"${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log" & -@@ -65,6 +66,7 @@ case "$(basename $BACKUP_PROG)" in - ProgressStep - done - ;; -+ - esac - ProgressStop - -@@ -72,20 +74,28 @@ transfertime="$((SECONDS-starttime))" - - # harvest return code from background job. The kill -0 $BackupPID loop above should - # have made sure that this wait won't do any real "waiting" :-) --wait $BackupPID --_rc=$? -+wait $BackupPID || LogPrintError "Restore job returned a nonzero exit code $?" -+# harvest the actual return code of rsync. Finishing the pipeline with an error code above is actually unlikely, -+# because rsync is not the last command in it. But error returns from rsync are common and must be handled. -+backup_prog_rc="$(cat $TMP_DIR/retval)" - - sleep 1 --test "$_rc" -gt 0 && LogPrint "WARNING ! --There was an error (${rsync_err_msg[$_rc]}) while restoring the archive. -+if test "$backup_prog_rc" -gt 0 ; then -+ # TODO: Shouldn't we tell the user to check ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log as well? -+ LogPrintError "WARNING ! -+There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the backup. - Please check '$RUNTIME_LOGFILE' for more information. You should also - manually check the restored system to see whether it is complete. - " -+ is_true "$BACKUP_INTEGRITY_CHECK" && Error "Integrity check failed, restore aborted because BACKUP_INTEGRITY_CHECK is enabled" -+fi - --_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)" -+restore_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)" - --if [ $_rc -eq 0 -a "$_message" ] ; then -- LogPrint "$_message in $transfertime seconds." -+if [ $backup_prog_rc -eq 0 -a "$restore_log_message" ] ; then -+ LogPrint "$restore_log_message in $transfertime seconds." - elif [ "$size" ]; then - LogPrint "Restored $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" - fi -+ -+return $backup_prog_rc -diff --git a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh -index 3622884a..890161f1 100644 ---- a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh -+++ b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh -@@ -3,8 +3,8 @@ - [[ $RSYNC_SELINUX ]] && { - - # if --xattrs is already set; no need to do it again -- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then -- RSYNC_OPTIONS=( "${BACKUP_RSYNC_OPTIONS[@]}" --xattrs ) -+ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then -+ BACKUP_RSYNC_OPTIONS+=( --xattrs ) - fi - - } -diff --git a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -index 47ed9e02..b2fb72f5 100644 ---- a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -+++ b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -@@ -3,12 +3,12 @@ - case $RSYNC_PROTO in - - (ssh) -- ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 -- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" -+ ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ -+ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" - ;; - - (rsync) -- $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 -- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" -+ $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ -+ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" - ;; - esac diff --git a/rear-bz1945869.patch b/rear-bz1945869.patch deleted file mode 100644 index a17a7cd..0000000 --- a/rear-bz1945869.patch +++ /dev/null @@ -1,274 +0,0 @@ -diff --git a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh -old mode 100644 -new mode 100755 -index cc646359..33d87767 ---- a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh -+++ b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh -@@ -8,6 +8,10 @@ is_true $USING_UEFI_BOOTLOADER || return 0 - # (cf. finalize/Linux-i386/610_EFISTUB_run_efibootmgr.sh): - is_true $EFI_STUB && return - -+LogPrint "Creating EFI Boot Manager entries..." -+ -+local esp_mountpoint esp_mountpoint_inside boot_efi_parts boot_efi_dev -+ - # When UEFI_BOOTLOADER is not a regular file in the restored target system - # (cf. how esp_mountpoint is set below) it means BIOS is used - # (cf. rescue/default/850_save_sysfs_uefi_vars.sh) -@@ -15,64 +19,80 @@ is_true $EFI_STUB && return - # because when UEFI_BOOTLOADER is empty the test below evaluates to - # test -f /mnt/local/ - # which also returns false because /mnt/local/ is a directory --# (cf. https://github.com/rear/rear/pull/2051/files#r258826856): --test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" || return 0 -+# (cf. https://github.com/rear/rear/pull/2051/files#r258826856) -+# but using BIOS conflicts with USING_UEFI_BOOTLOADER is true -+# i.e. we should create EFI Boot Manager entries but we cannot: -+if ! test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ; then -+ LogPrintError "Failed to create EFI Boot Manager entries (UEFI bootloader '$UEFI_BOOTLOADER' not found under target $TARGET_FS_ROOT)" -+ return 1 -+fi - - # Determine where the EFI System Partition (ESP) is mounted in the currently running recovery system: --esp_mountpoint=$( df -P "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" | tail -1 | awk '{print $6}' ) --# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint: --test "$esp_mountpoint" || esp_mountpoint="$TARGET_FS_ROOT/boot/efi" -+esp_mountpoint=$( filesystem_name "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ) -+# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint (filesystem_name returns "/" -+# if mountpoint not found otherwise): -+if [ "$esp_mountpoint" = "/" ] ; then -+ esp_mountpoint="$TARGET_FS_ROOT/boot/efi" -+ LogPrint "Mountpoint of $TARGET_FS_ROOT/$UEFI_BOOTLOADER not found, trying $esp_mountpoint" -+fi - - # Skip if there is no esp_mountpoint directory (e.g. the fallback ESP mountpoint may not exist). - # Double quotes are mandatory here because 'test -d' without any (possibly empty) argument results true: --test -d "$esp_mountpoint" || return 0 -- --BootEfiDev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )" --# /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 --Dev=$( get_device_name $BootEfiDev ) --# 1 (must anyway be a low nr <9) --ParNr=$( get_partition_number $Dev ) --# /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p --Disk=$( echo ${Dev%$ParNr} ) -- --# Strip trailing partition remainders like '_part' or '-part' or 'p' --# if we have 'mapper' in disk device name: --if [[ ${Dev/mapper//} != $Dev ]] ; then -- # we only expect mpath_partX or mpathpX or mpath-partX -- case $Disk in -- (*p) Disk=${Disk%p} ;; -- (*-part) Disk=${Disk%-part} ;; -- (*_part) Disk=${Disk%_part} ;; -- (*) Log "Unsupported kpartx partition delimiter for $Dev" -- esac -+if ! test -d "$esp_mountpoint" ; then -+ LogPrintError "Failed to create EFI Boot Manager entries (no ESP mountpoint directory $esp_mountpoint)" -+ return 1 - fi - --# For eMMC devices the trailing 'p' in the Disk value --# (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1) --# needs to be stripped (to get /dev/mmcblk0), otherwise the --# efibootmgr call fails because of a wrong disk device name. --# See also https://github.com/rear/rear/issues/2103 --if [[ $Disk = *'/mmcblk'+([0-9])p ]] ; then -- Disk=${Disk%p} --fi -+# Mount point inside the target system, -+# accounting for possible trailing slashes in TARGET_FS_ROOT -+esp_mountpoint_inside="${esp_mountpoint#${TARGET_FS_ROOT%%*(/)}}" - --# For NVMe devices the trailing 'p' in the Disk value --# (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1) --# needs to be stripped (to get /dev/nvme0n1), otherwise the --# efibootmgr call fails because of a wrong disk device name. --# See also https://github.com/rear/rear/issues/1564 --if [[ $Disk = *'/nvme'+([0-9])n+([0-9])p ]] ; then -- Disk=${Disk%p} -+boot_efi_parts=$( find_partition "fs:$esp_mountpoint_inside" fs ) -+if ! test "$boot_efi_parts" ; then -+ LogPrint "Unable to find ESP $esp_mountpoint_inside in layout" -+ LogPrint "Trying to determine device currently mounted at $esp_mountpoint as fallback" -+ boot_efi_dev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )" -+ if ! test "$boot_efi_dev" ; then -+ LogPrintError "Cannot create EFI Boot Manager entry (unable to find ESP $esp_mountpoint among mounted devices)" -+ return 1 -+ fi -+ if test $(get_component_type "$boot_efi_dev") = part ; then -+ boot_efi_parts="$boot_efi_dev" -+ else -+ boot_efi_parts=$( find_partition "$boot_efi_dev" ) -+ fi -+ if ! test "$boot_efi_parts" ; then -+ LogPrintError "Cannot create EFI Boot Manager entry (unable to find partition for $boot_efi_dev)" -+ return 1 -+ fi -+ LogPrint "Using fallback EFI boot partition(s) $boot_efi_parts (unable to find ESP $esp_mountpoint_inside in layout)" - fi - -+local bootloader partition_block_device partition_number disk efipart -+ - # EFI\fedora\shim.efi --BootLoader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) --LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" --Log efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${BootLoader}\" --if efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${BootLoader}" ; then -- # ok, boot loader has been set-up - tell rear we are done using following var. -- NOBOOTLOADER='' -- return --fi -+bootloader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) -+ -+for efipart in $boot_efi_parts ; do -+ # /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 -+ partition_block_device=$( get_device_name $efipart ) -+ # 1 or 2 or 4 for the examples above -+ partition_number=$( get_partition_number $partition_block_device ) -+ if ! disk=$( get_device_from_partition $partition_block_device $partition_number ) ; then -+ LogPrintError "Cannot create EFI Boot Manager entry for ESP $partition_block_device (unable to find the underlying disk)" -+ # do not error out - we may be able to locate other disks if there are more of them -+ continue -+ fi -+ LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER') " -+ Log efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${bootloader}\" -+ if efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${bootloader}" ; then -+ # ok, boot loader has been set-up - continue with other disks (ESP can be on RAID) -+ NOBOOTLOADER='' -+ else -+ LogPrintError "efibootmgr failed to create EFI Boot Manager entry on $disk partition $partition_number (ESP $partition_block_device )" -+ fi -+done - --LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" -+is_true $NOBOOTLOADER || return 0 -+LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" -+return 1 -diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh -index 54ddb50f..cdd81a14 100644 ---- a/usr/share/rear/lib/layout-functions.sh -+++ b/usr/share/rear/lib/layout-functions.sh -@@ -302,12 +302,20 @@ get_child_components() { - done - } - --# Return all ancestors of component $1 [ of type $2 ] -+# Return all ancestors of component $1 [ of type $2 [ skipping types $3 during resolution ] ] - get_parent_components() { -- declare -a ancestors devlist -- declare current child parent -+ declare -a ancestors devlist ignoretypes -+ declare current child parent parenttype - - devlist=( "$1" ) -+ if [[ "$3" ]] ; then -+ # third argument should, if present, be a space-separated list -+ # of types to ignore when walking up the dependency tree. -+ # Convert it to array -+ ignoretypes=( $3 ) -+ else -+ ignoretypes=() -+ fi - while (( ${#devlist[@]} )) ; do - current=${devlist[0]} - -@@ -318,6 +326,13 @@ get_parent_components() { - if IsInArray "$parent" "${ancestors[@]}" ; then - continue - fi -+ ### ...test if parent is of a correct type if requested... -+ if [[ ${#ignoretypes[@]} -gt 0 ]] ; then -+ parenttype=$(get_component_type "$parent") -+ if IsInArray "$parenttype" "${ignoretypes[@]}" ; then -+ continue -+ fi -+ fi - ### ...and add them to the list - devlist+=( "$parent" ) - ancestors+=( "$parent" ) -@@ -345,22 +360,24 @@ get_parent_components() { - } - - # find_devices -+# ${2+"$2"} in the following functions ensures that $2 gets passed down quoted if present -+# and ignored if not present - # Find the disk device(s) component $1 resides on. - find_disk() { -- get_parent_components "$1" "disk" -+ get_parent_components "$1" "disk" ${2+"$2"} - } - - find_multipath() { -- get_parent_components "$1" "multipath" -+ get_parent_components "$1" "multipath" ${2+"$2"} - } - - find_disk_and_multipath() { -- find_disk "$1" -- is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1" -+ find_disk "$1" ${2+"$2"} -+ is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1" ${2+"$2"} - } - - find_partition() { -- get_parent_components "$1" "part" -+ get_parent_components "$1" "part" ${2+"$2"} - } - - # The get_partition_number function -@@ -413,6 +430,54 @@ get_partition_number() { - echo $partition_number - } - -+# Extract the underlying device name from the full partition device name. -+# Underlying device may be a disk, a multipath device or other devices that can be partitioned. -+# Should we use the information in $LAYOUT_DEPS, like get_parent_component does, -+# instead of string munging? -+function get_device_from_partition() { -+ local partition_block_device -+ local device -+ local partition_number -+ -+ partition_block_device=$1 -+ test -b "$partition_block_device" || BugError "get_device_from_partition called with '$partition_block_device' that is no block device" -+ partition_number=${2-$(get_partition_number $partition_block_device )} -+ # /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p -+ device=${partition_block_device%$partition_number} -+ -+ # Strip trailing partition remainders like '_part' or '-part' or 'p' -+ # if we have 'mapper' in disk device name: -+ if [[ ${partition_block_device/mapper//} != $partition_block_device ]] ; then -+ # we only expect mpath_partX or mpathpX or mpath-partX -+ case $device in -+ (*p) device=${device%p} ;; -+ (*-part) device=${device%-part} ;; -+ (*_part) device=${device%_part} ;; -+ (*) Log "Unsupported kpartx partition delimiter for $partition_block_device" -+ esac -+ fi -+ -+ # For eMMC devices the trailing 'p' in the $device value -+ # (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1) -+ # needs to be stripped (to get /dev/mmcblk0), otherwise the -+ # efibootmgr call fails because of a wrong disk device name. -+ # See also https://github.com/rear/rear/issues/2103 -+ if [[ $device = *'/mmcblk'+([0-9])p ]] ; then -+ device=${device%p} -+ fi -+ -+ # For NVMe devices the trailing 'p' in the $device value -+ # (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1) -+ # needs to be stripped (to get /dev/nvme0n1), otherwise the -+ # efibootmgr call fails because of a wrong disk device name. -+ # See also https://github.com/rear/rear/issues/1564 -+ if [[ $device = *'/nvme'+([0-9])n+([0-9])p ]] ; then -+ device=${device%p} -+ fi -+ -+ test -b "$device" && echo $device -+} -+ - # Returns partition start block or 'unknown' - # sda/sda1 or - # dm-XX diff --git a/rear-bz1958247.patch b/rear-bz1958247.patch deleted file mode 100644 index c85f6ad..0000000 --- a/rear-bz1958247.patch +++ /dev/null @@ -1,2040 +0,0 @@ -diff --git a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh -index 64b7a792..6ba7d543 100644 ---- a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh -+++ b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh -@@ -1,10 +1,4 @@ --# create mount point - if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then -- mkdir -p $v "$BUILD_DIR/outputfs" >&2 -- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- -- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- - if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then - BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" - fi -diff --git a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh -index 185dbd95..8525ab1d 100644 ---- a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh -+++ b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh -@@ -6,10 +6,4 @@ if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" - fi - - umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs -- -- rmdir $v $BUILD_DIR/outputfs >&2 -- if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- fi - fi -diff --git a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh -index 5c7696db..b6a955db 100644 ---- a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh -+++ b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh -@@ -1,9 +1,3 @@ --# create mount point --mkdir -p $v "$BUILD_DIR/outputfs" >&2 --StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- --AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- - if [[ "$BACKUP_MOUNTCMD" ]] ; then - BACKUP_URL="var://BACKUP_MOUNTCMD" - fi -diff --git a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh -index d79653b4..9bf8f76a 100644 ---- a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh -+++ b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh -@@ -3,20 +3,17 @@ - [ -z "${NETFS_KEEP_OLD_BACKUP_COPY}" ] && return - - # do not do this for tapes and special attention for file:///path --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(backup_path $scheme $path) -+local scheme=$( url_scheme $BACKUP_URL ) -+local path=$( url_path $BACKUP_URL ) -+local opath=$( backup_path $scheme $path ) - - # if $opath is empty return silently (e.g. scheme tape) - [ -z "$opath" ] && return 0 - - if ! test -f "${opath}/.lockfile" ; then - if test -d "${opath}" ; then -- rm -rf $v "${opath}.old" >&2 -- StopIfError "Could not remove '${opath}.old'" -- mv -f $v "${opath}" "${opath}.old" >&2 -- StopIfError "Could not move '${opath}'" -+ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" -+ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" - fi - else - # lockfile was already made through the output workflow (hands off) -diff --git a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh -index db15bca2..43f5b651 100644 ---- a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh -+++ b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh -@@ -2,13 +2,14 @@ - # to $HOSTNAME - - # do not do this for tapes and special attention for file:///path --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(backup_path $scheme $path) -+local scheme=$( url_scheme $BACKUP_URL ) -+local path=$( url_path $BACKUP_URL ) -+local opath=$( backup_path $scheme $path ) - - # if $opath is empty return silently (e.g. scheme tape) - [ -z "$opath" ] && return 0 - --mkdir -p $v -m0750 "${opath}" >&2 --StopIfError "Could not mkdir '${opath}'" -+mkdir -p $v -m0750 "${opath}" && return -+ -+# A failure to create the $NETFS_PREFIX sub-directory is fatal: -+Error "Failed to create '$opath' directory for BACKUP_URL=$BACKUP_URL" -diff --git a/usr/share/rear/backup/NETFS/default/250_create_lock.sh b/usr/share/rear/backup/NETFS/default/250_create_lock.sh -index 59090a22..36d547ec 100644 ---- a/usr/share/rear/backup/NETFS/default/250_create_lock.sh -+++ b/usr/share/rear/backup/NETFS/default/250_create_lock.sh -@@ -2,15 +2,13 @@ - # made by a previous mkbackup run when the variable NETFS_KEEP_OLD_BACKUP_COPY has been set - - # do not do this for tapes and special attention for file:///path --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(backup_path $scheme $path) -+local scheme=$( url_scheme $BACKUP_URL ) -+local path=$( url_path $BACKUP_URL ) -+local opath=$( backup_path $scheme $path ) - - # if $opath is empty return silently (e.g. scheme tape) - [ -z "$opath" ] && return 0 - - if test -d "${opath}" ; then -- > "${opath}/.lockfile" -- StopIfError "Could not create '${opath}/.lockfile'" -+ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" - fi -diff --git a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh -index f69f7bd8..7038f5b9 100644 ---- a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh -+++ b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh -@@ -1,8 +1,7 @@ - # remove the lockfile --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(backup_path $scheme $path) -+local scheme=$( url_scheme $BACKUP_URL ) -+local path=$( url_path $BACKUP_URL ) -+local opath=$( backup_path $scheme $path ) - - # if $opath is empty return silently (e.g. scheme tape) - [ -z "$opath" ] && return 0 -diff --git a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh -index f28c6cbf..e1954dc5 100644 ---- a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh -+++ b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh -@@ -5,9 +5,3 @@ if [[ "$BACKUP_UMOUNTCMD" ]] ; then - fi - - umount_url $BACKUP_URL $BUILD_DIR/outputfs -- --rmdir $v $BUILD_DIR/outputfs >&2 --if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" --fi -diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh -deleted file mode 100644 -index 6111f89b..00000000 ---- a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh -+++ /dev/null -@@ -1,33 +0,0 @@ -- --# Backup all that is explicitly specified in BACKUP_PROG_INCLUDE: --for backup_include_item in "${BACKUP_PROG_INCLUDE[@]}" ; do -- test "$backup_include_item" && echo "$backup_include_item" --done > $TMP_DIR/backup-include.txt -- --# Implicitly also backup all local filesystems as defined in mountpoint_device --# except BACKUP_ONLY_INCLUDE or MANUAL_INCLUDE is set: --if ! is_true "$BACKUP_ONLY_INCLUDE" ; then -- if [ "${MANUAL_INCLUDE:-NO}" != "YES" ] ; then -- # Add the mountpoints that will be recovered to the backup include list -- # unless a mountpoint is excluded: -- while read mountpoint device junk ; do -- if ! IsInArray "$mountpoint" "${EXCLUDE_MOUNTPOINTS[@]}" ; then -- echo "$mountpoint" -- fi -- done <"$VAR_DIR/recovery/mountpoint_device" >> $TMP_DIR/backup-include.txt -- fi --fi -- --# Exclude all that is explicitly specified in BACKUP_PROG_EXCLUDE: --for backup_exclude_item in "${BACKUP_PROG_EXCLUDE[@]}" ; do -- test "$backup_exclude_item" && echo "$backup_exclude_item" --done > $TMP_DIR/backup-exclude.txt -- --# Implicitly also add excluded mountpoints to the backup exclude list --# except BACKUP_ONLY_EXCLUDE is set: --if ! is_true "$BACKUP_ONLY_EXCLUDE" ; then -- for excluded_mountpoint in "${EXCLUDE_MOUNTPOINTS[@]}" ; do -- test "$excluded_mountpoint" && echo "$excluded_mountpoint/" -- done >> $TMP_DIR/backup-exclude.txt --fi -- -diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh -new file mode 120000 -index 00000000..d8d12c0b ---- /dev/null -+++ b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh -@@ -0,0 +1 @@ -+../../NETFS/default/400_create_include_exclude_files.sh -\ No newline at end of file -diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh -deleted file mode 100644 -index 29d85905..00000000 ---- a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh -+++ /dev/null -@@ -1,14 +0,0 @@ --# Copied from ../../DUPLICITY/default/600_create_python_symlink.sh for YUM --# make sure we have a symbolic link to the python binary --( -- cd $ROOTFS_DIR/bin -- for py in $(find . -name "python*" ) -- do -- this_py=${py#./*} # should be without ./ -- case $this_py in -- python) break ;; -- python2*|python3*) ln -sf $v $this_py python >&2 ;; -- esac -- done --) -- -diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh -new file mode 120000 -index 00000000..d776e5aa ---- /dev/null -+++ b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh -@@ -0,0 +1 @@ -+../../DUPLICITY/default/600_create_python_symlink.sh -\ No newline at end of file -diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh -index f245861a..b5324747 100644 ---- a/usr/share/rear/lib/framework-functions.sh -+++ b/usr/share/rear/lib/framework-functions.sh -@@ -122,7 +122,7 @@ function cleanup_build_area_and_end_program () { - # Cleanup build area - Log "Finished in $((SECONDS-STARTTIME)) seconds" - if is_true "$KEEP_BUILD_DIR" ; then -- LogPrint "You should also rm -Rf $BUILD_DIR" -+ LogPrint "You should also rm -Rf --one-file-system $BUILD_DIR" - else - Log "Removing build area $BUILD_DIR" - rm -Rf $TMP_DIR -@@ -132,15 +132,11 @@ function cleanup_build_area_and_end_program () { - # in worst case it could not umount; so before remove the BUILD_DIR check if above outputfs is gone - if mountpoint -q "$BUILD_DIR/outputfs" ; then - # still mounted it seems -- LogPrint "Directory $BUILD_DIR/outputfs still mounted - trying lazy umount" - sleep 2 -- umount -f -l $BUILD_DIR/outputfs >&2 -- rm -Rf $v $BUILD_DIR/outputfs >&2 -- else -- # not mounted so we can safely delete $BUILD_DIR/outputfs -- rm -Rf $BUILD_DIR/outputfs -+ umount_mountpoint_lazy $BUILD_DIR/outputfs - fi -- rm -Rf $v $BUILD_DIR >&2 -+ remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" -+ rmdir $v $BUILD_DIR >&2 - fi - Log "End of program reached" - } -diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh -index 4264bb53..a1aec604 100644 ---- a/usr/share/rear/lib/global-functions.sh -+++ b/usr/share/rear/lib/global-functions.sh -@@ -342,7 +342,44 @@ function url_path() { - echo /${url_without_scheme#*/} - } - --backup_path() { -+### Returns true if one can upload files to the URL -+function scheme_accepts_files() { -+ local scheme=$1 -+ case $scheme in -+ (null|tape|obdr) -+ # tapes do not support uploading arbitrary files, one has to handle them -+ # as special case (usually passing the tape device as argument to tar) -+ # null means do not upload anything anywhere, leave the files under /var/lib/rear/output -+ return 1 -+ ;; -+ (*) -+ # most URL schemes support uploading files -+ return 0 -+ ;; -+ esac -+} -+ -+### Returns true if URLs with the given scheme corresponds to a path inside -+### a mountable fileystem and one can put files directly into it. -+### The actual path will be returned by backup_path() / output_path(). -+### If returns false, using backup_path() / output_path() has no sense -+### and one must use a scheme-specific method (like lftp or writing them to a tape) -+### to upload files to the destination instead of just "cp" or other direct filesystem access. -+### Returning true does not imply that the URL is currently mounted at a filesystem and usable, -+### only that it can be mounted (use mount_url() first) -+function scheme_supports_filesystem() { -+ local scheme=$1 -+ case $scheme in -+ (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) -+ return 1 -+ ;; -+ (*) -+ return 0 -+ ;; -+ esac -+} -+ -+function backup_path() { - local scheme=$1 - local path=$2 - case $scheme in -@@ -368,13 +405,21 @@ backup_path() { - echo "$path" - } - --output_path() { -+function output_path() { - local scheme=$1 - local path=$2 -+ -+ # Abort for unmountable schemes ("tape-like" or "ftp-like" schemes). -+ # Returning an empty string for them is not satisfactory: it could lead to caller putting its files -+ # under / instead of the intended location if the result is not checked for emptiness. -+ # Returning ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} for unmountable URLs is also not satisfactory: -+ # caller could put its files there expecting them to be safely at their destination, -+ # but if the directory is not a mountpoint, they would get silently lost. -+ # The caller needs to check the URL/scheme using scheme_supports_filesystem() -+ # before calling this function. -+ scheme_supports_filesystem $scheme || BugError "output_path() called with scheme $scheme that does not support filesystem access" -+ - case $scheme in -- (null|tape) # no path for tape required -- path="" -- ;; - (file) # type file needs a local path (must be mounted by user) - path="$path/${OUTPUT_PREFIX}" - ;; -@@ -387,17 +432,33 @@ output_path() { - - - ### Mount URL $1 at mountpoint $2[, with options $3] --mount_url() { -+function mount_url() { - local url=$1 - local mountpoint=$2 - local defaultoptions="rw,noatime" - local options=${3:-"$defaultoptions"} -+ local scheme -+ -+ scheme=$( url_scheme $url ) -+ -+ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. -+ # They thus need to be kept in sync with umount_url() so that RemoveExitTasks is used -+ # iff AddExitTask was used in mount_url(). -+ -+ if ! scheme_supports_filesystem $scheme ; then -+ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp -+ ### Don't need to umount anything for these. -+ ### file: supports filesystem access, but is not mounted and unmounted, -+ ### so it has to be handled specially below. -+ ### Similarly for iso: which gets mounted and unmounted only during recovery. -+ return 0 -+ fi - - ### Generate a mount command - local mount_cmd -- case $(url_scheme $url) in -- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) -- ### Don't need to mount anything for these -+ case $scheme in -+ (file) -+ ### Don't need to mount anything for file:, it is already mounted by user - return 0 - ;; - (iso) -@@ -558,22 +619,47 @@ mount_url() { - ;; - esac - -+ # create mount point -+ mkdir -p $v "$mountpoint" || Error "Could not mkdir '$mountpoint'" -+ AddExitTask "remove_temporary_mountpoint '$mountpoint'" -+ - Log "Mounting with '$mount_cmd'" - # eval is required when mount_cmd contains single quoted stuff (e.g. see the above mount_cmd for curlftpfs) - eval $mount_cmd || Error "Mount command '$mount_cmd' failed." - -- AddExitTask "umount -f $v '$mountpoint' >&2" -+ AddExitTask "perform_umount_url '$url' '$mountpoint' lazy" - return 0 - } - --### Unmount url $1 at mountpoint $2 --umount_url() { -+function remove_temporary_mountpoint() { -+ if test -d "$1" ; then -+ rmdir $v "$1" -+ fi -+} -+ -+### Unmount url $1 at mountpoint $2, perform mountpoint cleanup and exit task + error handling -+function umount_url() { - local url=$1 - local mountpoint=$2 -+ local scheme - -- case $(url_scheme $url) in -- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) -- ### Don't need to umount anything for these -+ scheme=$( url_scheme $url ) -+ -+ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. -+ # They thus need to be kept in sync with mount_url() so that RemoveExitTasks is used -+ # iff AddExitTask was used in mount_url(). -+ -+ if ! scheme_supports_filesystem $scheme ; then -+ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp -+ ### Don't need to umount anything for these. -+ ### file: supports filesystem access, but is not mounted and unmounted, -+ ### so it has to be handled specially below. -+ ### Similarly for iso: which gets mounted and unmounted only during recovery. -+ return 0 -+ fi -+ -+ case $scheme in -+ (file) - return 0 - ;; - (iso) -@@ -581,42 +667,106 @@ umount_url() { - return 0 - fi - ;; -- (sshfs) -- umount_cmd="fusermount -u $mountpoint" -- ;; -- (davfs) -- umount_cmd="umount $mountpoint" -- # Wait for 3 sek. then remove the cache-dir /var/cache/davfs -- sleep 30 -- # ToDo: put in here the cache-dir from /etc/davfs2/davfs.conf -- # and delete only the just used cache -- #rm -rf /var/cache/davfs2/** -- rm -rf /var/cache/davfs2/*outputfs* -- -- ;; -- (var) -- local var=$(url_host $url) -- umount_cmd="${!var} $mountpoint" -+ (*) -+ # Schemes that actually need nontrivial umount are handled below. -+ # We do not handle them in the default branch because in the case of iso: -+ # it depends on the current workflow whether umount is needed or not. -+ : -+ esac - -- Log "Unmounting with '$umount_cmd'" -- $umount_cmd -- StopIfError "Unmounting failed." -+ # umount_url() is a wrapper that takes care of exit tasks and error handling and mountpoint cleanup. -+ # Therefore it also determines if exit task and mountpoint handling is required and returns early if not. -+ # The actual umount job is performed inside perform_umount_url(). -+ # We do not request lazy umount here because we want umount errors to be reliably reported. -+ perform_umount_url $url $mountpoint || Error "Unmounting '$mountpoint' failed." - -- RemoveExitTask "umount -f $v '$mountpoint' >&2" -- return 0 -+ RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" -+ -+ remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" -+ return 0 -+} -+ -+### Unmount url $1 at mountpoint $2 [ lazily if $3 is set to 'lazy' and normal unmount fails ] -+function perform_umount_url() { -+ local url=$1 -+ local mountpoint=$2 -+ local lazy=${3:-} -+ -+ if test $lazy ; then -+ if test $lazy != "lazy" ; then -+ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" -+ fi -+ fi -+ -+ case $(url_scheme $url) in -+ (sshfs) -+ # does ftpfs need this special case as well? -+ fusermount -u ${lazy:+'-z'} $mountpoint -+ ;; -+ (davfs) -+ umount_davfs $mountpoint $lazy -+ ;; -+ (var) -+ local var -+ var=$(url_host $url) -+ Log "Unmounting with '${!var} $mountpoint'" -+ # lazy unmount not supported with custom umount command -+ ${!var} $mountpoint - ;; -+ (*) -+ # usual umount command -+ umount_mountpoint $mountpoint $lazy - esac -+ # The switch above must be the last statement in this function and the umount commands must be -+ # the last commands (or part of) in each branch. This ensures proper exit code propagation -+ # to the caller even when set -e is used. -+} - -- umount_mountpoint $mountpoint -- StopIfError "Unmounting '$mountpoint' failed." -+### Helper which unmounts davfs mountpoint $1 and cleans up the cache, -+### performing lazy unmount if $2 = 'lazy' and normal unmount fails. -+function umount_davfs() { -+ local mountpoint=$1 -+ local lazy="${2:-}" - -- RemoveExitTask "umount -f $v '$mountpoint' >&2" -- return 0 -+ if test $lazy ; then -+ if test $lazy != "lazy" ; then -+ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" -+ fi -+ fi -+ -+ if umount_mountpoint $mountpoint ; then -+ # Wait for 3 sek. then remove the cache-dir /var/cache/davfs -+ sleep 30 -+ # TODO: put in here the cache-dir from /etc/davfs2/davfs.conf -+ # and delete only the just used cache -+ #rm -rf /var/cache/davfs2/** -+ rm -rf /var/cache/davfs2/*outputfs* -+ else -+ local retval=$? -+ -+ if test $lazy ; then -+ # try again to unmount lazily and this time do not delete the cache, it is still in use. -+ LogPrintError "davfs cache /var/cache/davfs2/*outputfs* needs to be cleaned up manually after the lazy unmount finishes" -+ umount_mountpoint_lazy $mountpoint -+ else -+ # propagate errors from umount -+ return $retval -+ fi -+ fi - } - --### Unmount mountpoint $1 --umount_mountpoint() { -+### Unmount mountpoint $1 [ lazily if $2 = 'lazy' ] -+### Default implementation for filesystems that don't need anything fancy -+### For special umount commands use perform_umount_url() -+function umount_mountpoint() { - local mountpoint=$1 -+ local lazy=${2:-} -+ -+ if test $lazy ; then -+ if test $lazy != "lazy" ; then -+ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" -+ fi -+ fi - - ### First, try a normal unmount, - Log "Unmounting '$mountpoint'" -@@ -636,7 +786,21 @@ umount_mountpoint() { - fi - - Log "Unmounting '$mountpoint' failed." -- return 1 -+ -+ if test $lazy ; then -+ umount_mountpoint_lazy $mountpoint -+ else -+ return 1 -+ fi -+} -+ -+### Unmount mountpoint $1 lazily -+### Preferably use "umount_mountpoint $mountpoint lazy", which attempts non-lazy unmount first. -+function umount_mountpoint_lazy() { -+ local mountpoint=$1 -+ -+ LogPrint "Directory $mountpoint still mounted - trying lazy umount" -+ umount $v -f -l $mountpoint >&2 - } - - # Change $1 to user input or leave default value on empty input -diff --git a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh -index a43dff13..3e7512ee 100644 ---- a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh -+++ b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh -@@ -8,10 +8,12 @@ - if [[ ! -z "$PXE_TFTP_URL" ]] ; then - # E.g. PXE_TFTP_URL=nfs://server/export/nfs/tftpboot - local scheme=$( url_scheme $PXE_TFTP_URL ) -- local path=$( url_path $PXE_TFTP_URL ) -- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 -- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" -- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" -+ -+ # We need filesystem access to the destination (schemes like ftp:// are not supported) -+ if ! scheme_supports_filesystem $scheme ; then -+ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" -+ fi -+ - mount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS - # However, we copy under $OUTPUT_PREFIX_PXE directory (usually HOSTNAME) to have different clients on one pxe server - PXE_TFTP_LOCAL_PATH=$BUILD_DIR/tftpbootfs -@@ -74,10 +76,6 @@ fi - if [[ ! -z "$PXE_TFTP_URL" ]] ; then - LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_URL/$OUTPUT_PREFIX_PXE" - umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs -- rmdir $BUILD_DIR/tftpbootfs >&2 -- if [[ $? -eq 0 ]] ; then -- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" -- fi - else - # legacy way PXE_TFTP_PATH - LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_PATH" -diff --git a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh -index fce4bcf1..5041a3bc 100644 ---- a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh -+++ b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh -@@ -1,4 +1,4 @@ --# 81_create_pxelinux_cfg.sh -+# 810_create_pxelinux_cfg.sh - # - # create pxelinux config on PXE server for Relax-and-Recover - # -@@ -11,10 +11,12 @@ if [[ ! -z "$PXE_CONFIG_URL" ]] ; then - # E.g. PXE_CONFIG_URL=nfs://server/export/nfs/tftpboot/pxelinux.cfg - # Better be sure that on 'server' the directory /export/nfs/tftpboot/pxelinux.cfg exists - local scheme=$( url_scheme $PXE_CONFIG_URL ) -- local path=$( url_path $PXE_CONFIG_URL ) -- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 -- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" -- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" -+ -+ # We need filesystem access to the destination (schemes like ftp:// are not supported) -+ if ! scheme_supports_filesystem $scheme ; then -+ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" -+ fi -+ - mount_url $PXE_CONFIG_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS - PXE_LOCAL_PATH=$BUILD_DIR/tftpbootfs - else -@@ -105,10 +107,6 @@ popd >/dev/null - if [[ ! -z "$PXE_CONFIG_URL" ]] ; then - LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_URL" - umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs -- rmdir $BUILD_DIR/tftpbootfs >&2 -- if [[ $? -eq 0 ]] ; then -- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" -- fi - else - LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_PATH" - # Add to result files -diff --git a/usr/share/rear/output/PXE/default/820_copy_to_net.sh b/usr/share/rear/output/PXE/default/820_copy_to_net.sh -deleted file mode 100644 -index 39cd316d..00000000 ---- a/usr/share/rear/output/PXE/default/820_copy_to_net.sh -+++ /dev/null -@@ -1,41 +0,0 @@ -- --# 820_copy_to_net.sh -- --# Check if we have a target location OUTPUT_URL --test "$OUTPUT_URL" || return 0 -- --local scheme=$( url_scheme $OUTPUT_URL ) --local result_file="" --local path="" -- --case "$scheme" in -- (nfs|cifs|usb|tape|file|davfs) -- # The ISO has already been transferred by NETFS. -- return 0 -- ;; -- (fish|ftp|ftps|hftp|http|https|sftp) -- LogPrint "Transferring PXE files to $OUTPUT_URL" -- for result_file in "${RESULT_FILES[@]}" ; do -- path=$(url_path $OUTPUT_URL) -- -- # Make sure that destination directory exists, otherwise lftp would copy -- # RESULT_FILES into last available directory in the path. -- # e.g. OUTPUT_URL=sftp:///iso/server1 and have "/iso/server1" -- # directory missing, would upload RESULT_FILES into sftp:///iso/ -- lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mkdir -fp ${path}" -- -- LogPrint "Transferring file: $result_file" -- lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput $result_file" || Error "lftp failed to transfer '$result_file' to '$OUTPUT_URL' (lftp exit code: $?)" -- done -- ;; -- (rsync) -- LogPrint "Transferring PXE files to $OUTPUT_URL" -- for result_file in "${RESULT_FILES[@]}" ; do -- LogPrint "Transferring file: $result_file" -- rsync -a $v "$result_file" "$OUTPUT_URL" || Error "Problem transferring '$result_file' to $OUTPUT_URL" -- done -- ;; -- (*) Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." -- ;; --esac -- -diff --git a/usr/share/rear/output/default/100_mount_output_path.sh b/usr/share/rear/output/default/100_mount_output_path.sh -index 22ef36de..34ea8e5e 100644 ---- a/usr/share/rear/output/default/100_mount_output_path.sh -+++ b/usr/share/rear/output/default/100_mount_output_path.sh -@@ -1,9 +1,3 @@ --# create mount point --mkdir -p $v "$BUILD_DIR/outputfs" >&2 --StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- --AddExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" -- - if [[ "$OUTPUT_MOUNTCMD" ]] ; then - OUTPUT_URL="var://$OUTPUT_MOUNTCMD" - fi -diff --git a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh -index 00339a96..06326114 100644 ---- a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh -+++ b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh -@@ -3,22 +3,20 @@ - [ -z "${KEEP_OLD_OUTPUT_COPY}" ] && return - - # do not do this for tapes and special attention for file:///path --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(output_path $scheme $path) -+local scheme=$( url_scheme $OUTPUT_URL ) -+local path=$( url_path $OUTPUT_URL ) - --# if $opath is empty return silently (e.g. scheme tape) --[ -z "$opath" ] && return 0 -+# if filesystem access to url is unsupported return silently (e.g. scheme tape) -+scheme_supports_filesystem $scheme || return 0 -+ -+local opath=$( output_path $scheme $path ) - - # an old lockfile from a previous run not cleaned up by output is possible - [[ -f ${opath}/.lockfile ]] && rm -f ${opath}/.lockfile >&2 - - if test -d "${opath}" ; then -- rm -rf $v "${opath}.old" >&2 -- StopIfError "Could not remove '${opath}.old'" -+ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" - # below statement was 'cp -af' instead of 'mv -f' (see issue #192) -- mv -f $v "${opath}" "${opath}.old" >&2 -- StopIfError "Could not move '${opath}'" -+ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" - fi - # the ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} will be created by output/default/200_make_prefix_dir.sh -diff --git a/usr/share/rear/output/default/200_make_prefix_dir.sh b/usr/share/rear/output/default/200_make_prefix_dir.sh -index b8892f2f..606e1c86 100644 ---- a/usr/share/rear/output/default/200_make_prefix_dir.sh -+++ b/usr/share/rear/output/default/200_make_prefix_dir.sh -@@ -3,25 +3,21 @@ - # The $OUTPUT_PREFIX directory defaults to $HOSTNAME. - # - # This happens usually under a mounted network filesystem share --# e.g. in case of BACKUP_URL=nfs://NFS.server.IP.address/remote/nfs/share --# but it is also happens for local stuff like BACKUP_URL=usb:///dev/disk/by-label/REAR-000 -+# e.g. in case of OUTPUT_URL=nfs://NFS.server.IP.address/remote/nfs/share -+# but it is also happens for local stuff like OUTPUT_URL=usb:///dev/disk/by-label/REAR-000 - # - # Do not do this for tapes and special attention for file:///path -+local scheme=$( url_scheme $OUTPUT_URL ) -+local path=$( url_path $OUTPUT_URL ) - --# Generate url variable name that depends on the current stage, --# e.g. BACKUP_URL or OUTPUT_URL: --url="$( echo $stage | tr '[:lower:]' '[:upper:]' )_URL" -+# If filesystem access to url is unsupported return silently (e.g. scheme tape) -+scheme_supports_filesystem $scheme || return 0 - --local scheme=$( url_scheme ${!url} ) --local path=$( url_path ${!url} ) - local opath=$( output_path $scheme $path ) - --# If $opath is empty return silently (e.g. scheme tape): --test "$opath" || return 0 -- - # Create $OUTPUT_PREFIX sub-directory: - mkdir -p $v -m0750 "$opath" && return - --# A failure to cerate the $OUTPUT_PREFIX sub-directory is fatal: --Error "Failed to create '$opath' directory for $url=${!url}" -+# A failure to create the $OUTPUT_PREFIX sub-directory is fatal: -+Error "Failed to create '$opath' directory for OUTPUT_URL=$OUTPUT_URL" - -diff --git a/usr/share/rear/output/default/250_create_lock.sh b/usr/share/rear/output/default/250_create_lock.sh -index 49c75601..d792b036 100644 ---- a/usr/share/rear/output/default/250_create_lock.sh -+++ b/usr/share/rear/output/default/250_create_lock.sh -@@ -2,15 +2,14 @@ - # made by a previous mkrescue run when the variable KEEP_OLD_OUTPUT_COPY has been set - - # do not do this for tapes and special attention for file:///path --url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" --local scheme=$(url_scheme ${!url}) --local path=$(url_path ${!url}) --local opath=$(output_path $scheme $path) -+local scheme=$( url_scheme $OUTPUT_URL ) -+local path=$( url_path $OUTPUT_URL ) - --# if $opath is empty return silently (e.g. scheme tape) --[ -z "$opath" ] && return 0 -+# if filesystem access to url is unsupported return silently (e.g. scheme tape) -+scheme_supports_filesystem $scheme || return 0 -+ -+local opath=$( output_path $scheme $path ) - - if test -d "${opath}" ; then -- > "${opath}/.lockfile" -- StopIfError "Could not create '${opath}/.lockfile'" -+ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" - fi -diff --git a/usr/share/rear/output/default/950_copy_result_files.sh b/usr/share/rear/output/default/950_copy_result_files.sh -index 545b3f7d..77f54d51 100644 ---- a/usr/share/rear/output/default/950_copy_result_files.sh -+++ b/usr/share/rear/output/default/950_copy_result_files.sh -@@ -5,16 +5,25 @@ - - # For example for "rear mkbackuponly" there are usually no result files - # that would need to be copied here to the output location: --test "$RESULT_FILES" || return 0 -+test "${RESULT_FILES[*]:-}" || return 0 - - local scheme=$( url_scheme $OUTPUT_URL ) - local host=$( url_host $OUTPUT_URL ) - local path=$( url_path $OUTPUT_URL ) --local opath=$( output_path $scheme $path ) - --# if $opath is empty return silently (e.g. scheme tape) --if [[ -z "$opath" || -z "$OUTPUT_URL" || "$scheme" == "obdr" || "$scheme" == "tape" ]] ; then -- return 0 -+if [ -z "$OUTPUT_URL" ] || ! scheme_accepts_files $scheme ; then -+ if [ "$scheme" == "null" -o -z "$OUTPUT_URL" ] ; then -+ # There are result files to copy, but OUTPUT_URL=null indicates that we are not interested in them -+ # TODO: empty OUTPUT_URL seems to be equivalent to null, should we continue to allow that, -+ # or enforce setting it explicitly? -+ return 0 -+ else -+ # There are files to copy, but schemes like tape: do not allow files to be stored. The files would be lost. -+ # Do not allow that. -+ # Schemes like obdr: that store the results themselves should clear RESULT_FILES to indicate that nothing is to be done. -+ # Is this considered a bug in ReaR (BugError), or a user misconfiguration (Error) when this happens? -+ BugError "Output scheme $scheme does not accept result files ${RESULT_FILES[*]}, use OUTPUT_URL=null if you don't want to copy them anywhere." -+ fi - fi - - LogPrint "Copying resulting files to $scheme location" -@@ -38,66 +47,76 @@ RESULT_FILES+=( "$TMP_DIR/$final_logfile_name" ) - LogPrint "Saving $RUNTIME_LOGFILE as $final_logfile_name to $scheme location" - - # The real work (actually copying resulting files to the output location): -+if scheme_supports_filesystem $scheme ; then -+ # We can access the destination as a mounted filesystem. Do nothing special, -+ # simply copy the output files there. (Covers stuff like nfs|cifs|usb|file|sshfs|ftpfs|davfs.) -+ # This won't work for iso:// , but iso can't be a OUTPUT_URL scheme, this is checked in -+ # prep/default/040_check_backup_and_output_scheme.sh -+ # This covers also unknown schemes, because mount_url() will attempt to mount them and fail if this is not possible, -+ # so if we got here, the URL had been mounted successfully. -+ local opath -+ opath=$( output_path $scheme $path ) -+ LogPrint "Copying result files '${RESULT_FILES[*]}' to $opath at $scheme location" -+ # Copy each result file one by one to avoid usually false error exits as in -+ # https://github.com/rear/rear/issues/1711#issuecomment-380009044 -+ # where in case of an improper RESULT_FILES array member 'cp' can error out with something like -+ # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' -+ # See -+ # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c -+ # which is about the same for 'mv', how to reproduce it: -+ # mkdir a b c -+ # touch a/f b/f -+ # mv a/f b/f c/ -+ # mv: will not overwrite just-created 'c/f' with 'b/f' -+ # It happens because two different files with the same name would be moved to the same place with only one command. -+ # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. -+ # Accordingly it is sufficient (even without '-f') to copy each result file one by one: -+ for result_file in "${RESULT_FILES[@]}" ; do -+ -+ # note: s390 kernel copy is only through nfs -+ # -+ # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions -+ # on s390a there is an option to name the initrd and kernel in the form of -+ # file name on s390 are in the form of name type mode -+ # the name is the userid or vm name and the type is initrd or kernel -+ # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd -+ # vars needed: -+ # ZVM_NAMING - set in local.conf, if Y then enable naming override -+ # ZVM_KERNEL_NAME - keeps track of kernel name in results array -+ # ARCH - override only if ARCH is Linux-s390 -+ # -+ # initrd name override is handled in 900_create_initramfs.sh -+ # kernel name override is handled in 400_guess_kernel.sh -+ # kernel name override is handled in 950_copy_result_files.sh -+ -+ if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then -+ if [[ -z $opath ]] ; then -+ Error "Output path is not set, please check OUTPUT_URL in local.conf." -+ fi -+ -+ if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then -+ VM_UID=$(vmcp q userid |awk '{ print $1 }') -+ -+ if [[ -z $VM_UID ]] ; then -+ Error "VM UID is not set, VM UID is set from call to vmcp. Please make sure vmcp is available and 'vmcp q userid' returns VM ID" -+ fi -+ -+ LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel" -+ cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location" -+ else -+ cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" -+ fi -+ else -+ cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" -+ fi -+ done -+ -+ return 0 -+fi -+ -+# Filesystem access to output destination not supported, use a scheme-specific tool (rsync, lftp) - case "$scheme" in -- (nfs|cifs|usb|file|sshfs|ftpfs|davfs) -- LogPrint "Copying result files '${RESULT_FILES[@]}' to $opath at $scheme location" -- # Copy each result file one by one to avoid usually false error exits as in -- # https://github.com/rear/rear/issues/1711#issuecomment-380009044 -- # where in case of an improper RESULT_FILES array member 'cp' can error out with something like -- # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' -- # See -- # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c -- # which is about the same for 'mv', how to reproduce it: -- # mkdir a b c -- # touch a/f b/f -- # mv a/f b/f c/ -- # mv: will not overwrite just-created 'c/f' with 'b/f' -- # It happens because two different files with the same name would be moved to the same place with only one command. -- # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. -- # Accordingly it is sufficient (even without '-f') to copy each result file one by one: -- for result_file in "${RESULT_FILES[@]}" ; do -- -- # note: s390 kernel copy is only through nfs -- # -- # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions -- # on s390a there is an option to name the initrd and kernel in the form of -- # file name on s390 are in the form of name type mode -- # the name is the userid or vm name and the type is initrd or kernel -- # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd -- # vars needed: -- # ZVM_NAMING - set in local.conf, if Y then enable naming override -- # ZVM_KERNEL_NAME - keeps track of kernel name in results array -- # ARCH - override only if ARCH is Linux-s390 -- # -- # initrd name override is handled in 900_create_initramfs.sh -- # kernel name override is handled in 400_guess_kernel.sh -- # kernel name override is handled in 950_copy_result_files.sh -- -- if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then -- if [[ -z $opath ]] ; then -- Error "Output path is not set, please check OUTPUT_URL in local.conf." -- fi -- -- if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then -- VM_UID=$(vmcp q userid |awk '{ print $1 }') -- -- if [[ -z $VM_UID ]] ; then -- Error "VM UID is not set, VM UID is set from call to vmcp. Please make sure vmcp is available and 'vmcp q userid' returns VM ID" -- fi -- -- LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel" -- cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location" -- else -- cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" -- fi -- else -- cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" -- fi -- done -- ;; - (fish|ftp|ftps|hftp|http|https|sftp) -- # FIXME: Verify if usage of $array[*] instead of "${array[@]}" is actually intended here -- # see https://github.com/rear/rear/issues/1068 - LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" - Log "lftp -c $OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput ${RESULT_FILES[*]}" - -@@ -111,12 +130,15 @@ case "$scheme" in - (rsync) - # If BACKUP = RSYNC output/RSYNC/default/900_copy_result_files.sh took care of it: - test "$BACKUP" = "RSYNC" && return 0 -- LogPrint "Copying result files '${RESULT_FILES[@]}' to $scheme location" -- Log "rsync -a $v ${RESULT_FILES[@]} ${host}:${path}" -+ LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" -+ Log "rsync -a $v ${RESULT_FILES[*]} ${host}:${path}" - rsync -a $v "${RESULT_FILES[@]}" "${host}:${path}" || Error "Problem transferring result files to $OUTPUT_URL" - ;; - (*) -- Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." -+ # Should be unreachable, if we got here, it is a bug. -+ # Unknown schemes are handled in mount_url(), which tries to mount them and aborts if they are unsupported. -+ # If they can be mounted, they fall under the scheme_supports_filesystem branch above. -+ BugError "Invalid scheme '$scheme' in '$OUTPUT_URL'." - ;; - esac - -diff --git a/usr/share/rear/output/default/970_remove_lock.sh b/usr/share/rear/output/default/970_remove_lock.sh -index 56640839..3b1b97cc 100644 ---- a/usr/share/rear/output/default/970_remove_lock.sh -+++ b/usr/share/rear/output/default/970_remove_lock.sh -@@ -1,10 +1,11 @@ - # remove the lockfile - local scheme=$(url_scheme $OUTPUT_URL) - local path=$(url_path $OUTPUT_URL) --local opath=$(output_path $scheme $path) - --# if $opath is empty return silently (e.g. scheme tape) --[ -z "$opath" ] && return 0 -+# if filesystem access to url is unsupported return silently (e.g. scheme tape) -+scheme_supports_filesystem $scheme || return 0 -+ -+local opath=$( output_path $scheme $path ) - - # when OUTPUT_URL=BACKUP_URL we keep the lockfile to avoid double moves of the directory - [[ "$OUTPUT_URL" != "$BACKUP_URL" ]] && rm -f $v "${opath}/.lockfile" >&2 -diff --git a/usr/share/rear/output/default/980_umount_output_dir.sh b/usr/share/rear/output/default/980_umount_output_dir.sh -index 9a9995bd..abf0cd53 100644 ---- a/usr/share/rear/output/default/980_umount_output_dir.sh -+++ b/usr/share/rear/output/default/980_umount_output_dir.sh -@@ -9,12 +9,3 @@ if [[ -z "$OUTPUT_URL" ]] ; then - fi - - umount_url $OUTPUT_URL $BUILD_DIR/outputfs -- --[[ -d $BUILD_DIR/outputfs/$NETFS_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$NETFS_PREFIX --[[ -d $BUILD_DIR/outputfs/$RSYNC_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$RSYNC_PREFIX -- --rmdir $v $BUILD_DIR/outputfs >&2 --if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" --fi -diff --git a/usr/share/rear/prep/BORG/default/250_mount_usb.sh b/usr/share/rear/prep/BORG/default/250_mount_usb.sh -index c13fd088..05be0179 100644 ---- a/usr/share/rear/prep/BORG/default/250_mount_usb.sh -+++ b/usr/share/rear/prep/BORG/default/250_mount_usb.sh -@@ -8,10 +8,5 @@ - # When BORGBACKUP_HOST is set, we don't need to mount anything as SSH - # backup destination will be handled internally by Borg it self. - if [[ -z $BORGBACKUP_HOST ]]; then -- # Has to be $verbose, not "$verbose", since it's used as option. -- # shellcheck disable=SC2086,SC2154 -- mkdir -p $verbose "$borg_dst_dev" >&2 -- StopIfError "Could not mkdir '$borg_dst_dev'" -- - mount_url "usb://$USB_DEVICE" "$borg_dst_dev" - fi -diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh -deleted file mode 100644 -index 2fbcc6cd..00000000 ---- a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh -+++ /dev/null -@@ -1,300 +0,0 @@ --# Copied from ../../NETFS/default/070_set_backup_archive.sh for YUM --### Determine the name of the backup archive --### This needs to be after we special case USB devices. -- --# FIXME: backuparchive is no local variable (regardless that it is lowercased) -- --# If TAPE_DEVICE is specified, use that: --if test "$TAPE_DEVICE" ; then -- backuparchive="$TAPE_DEVICE" -- LogPrint "Using backup archive '$backuparchive'" -- return --fi -- --local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" --local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" -- --local scheme=$( url_scheme $BACKUP_URL ) --local path=$( url_path $BACKUP_URL ) --case "$scheme" in -- (file|iso) -- # Define the output path according to the scheme -- local outputpath=$( backup_path $scheme $path ) -- backuparchive="$outputpath/$backup_file_name" -- LogPrint "Using backup archive '$backuparchive'" -- return -- ;; -- (tape) -- # TODO: Check if that case is really needed. -- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. -- backuparchive=$path -- LogPrint "Using backup archive '$backuparchive'" -- return -- ;; --esac -- --local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX -- --# Normal (i.e. non-incremental/non-differential) backup: --if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then -- # In case of normal (i.e. non-incremental) backup there is only one restore archive -- # and its name is the same as the backup archive (usually 'backup.tar.gz'): -- backuparchive="$backup_directory/$backup_file_name" -- LogPrint "Using backup archive '$backuparchive'" -- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. -- local backup_restore_workflows=( "recover" "restoreonly" ) -- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then -- # Only set RESTORE_ARCHIVES the backup archive is actually accessible -- # cf. https://github.com/rear/rear/issues/1166 -- if test -r "$backuparchive" ; then -- RESTORE_ARCHIVES=( "$backuparchive" ) -- else -- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script -- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. -- if test "usb" = "$scheme" ; then -- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." -- else -- Error "Backup archive '$backuparchive' not readable." -- fi -- fi -- fi -- return --fi -- --# Incremental or differential backup: --set -e -u -o pipefail --# Incremental or differential backup only works for the NETFS backup method --# and only with the 'tar' backup program: --if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then -- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" --fi --# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. --# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup --# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): --if test "usb" = "$scheme" ; then -- # When USB_SUFFIX is set the compliance mode is used where -- # backup on USB works in compliance with backup on NFS which means -- # a fixed backup directory where incremental or differential backups work. -- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks -- # test "$USB_SUFFIX" would result true because test " " results true: -- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" --fi --# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) --# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: --if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then -- NETFS_KEEP_OLD_BACKUP_COPY="" -- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" --fi --# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed --# that must be consistent for one single point of the current time which means --# one cannot call the 'date' command several times because then there would be --# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match --# one single point in time (in particular when midnight passes in between). --# Therefore the output of one single 'date' call is storend in an array and --# the array elements are then assinged to individual variables as needed: --local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) --local current_weekday="${current_date_output[0]}" --local current_yyyy_mm_dd="${current_date_output[1]}" --local current_hhmm="${current_date_output[2]}" --# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. --# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. --# This separated call of the 'date' command which is technically needed because it is --# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' --# command for the current time to be on the safe side when midnight passes in between --# both 'date' commands which would then result that a new full backup is made --# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because --# the stored date of the latest full backup is the current date at the time when it was made. --# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): --# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). --# One week later this script runs again while midnight passes between the two 'date' calls --# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) --# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then --# Sunday January 10 is older than Monday January 11 so that a new full backup is made: --test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" --local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) --# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz --# where the 'F' denotes a full backup: --local full_backup_marker="F" --# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz --# where the 'I' denotes an incremental backup: --local incremental_backup_marker="I" --# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz --# where the last 'D' denotes a differential backup: --local differential_backup_marker="D" --# In case of incremental or differential backup the RESTORE_ARCHIVES contains --# first the latest full backup file. --# In case of incremental backup the RESTORE_ARCHIVES contains --# after the latest full backup file each incremental backup --# in the ordering how they must be restored. --# For example when the latest full backup was made on Sunday --# plus each subsequent weekday a separated incremental backup was made, --# then during a "rear recover" on Wednesday morning --# first the full backup from Sunday has to be restored, --# then the incremental backup from Monday, and --# finally the incremental backup from Tuesday. --# In case of differential backup the RESTORE_ARCHIVES contains --# after the latest full backup file the latest differential backup. --# For example when the latest full backup was made on Sunday --# plus each subsequent weekday a separated differential backup was made, --# then during a "rear recover" on Wednesday morning --# first the full backup from Sunday has to be restored, --# and finally the differential backup from Tuesday --# (i.e. the differential backup from Monday is skipped). --# The date format YYYY-MM-DD that is used here is crucial. --# It is the ISO 8601 format 'year-month-day' to specify a day of a year --# that is accepted by 'tar' for the '--newer' option, --# see the GNU tar manual section "Operating Only on New Files" --# at https://www.gnu.org/software/tar/manual/html_node/after.html --# and the GNU tar manual section "Calendar date items" --# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 --local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" --local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" --# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' --# (the empty default means it is undecided what kind of backup must be created): --local create_backup_type="" --# Code regarding creating a backup is useless during "rear recover" and --# messages about creating a backup are misleading during "rear recover": --local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) --if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # When today is a specified full backup day, do a full backup in any case -- # (regardless if there is already a full backup of this day): -- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then -- create_backup_type="full" -- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" -- fi --fi --# Get the latest full backup (if exists): --local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" --# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because --# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' --# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' --# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) --# so that then plain 'ls' would result nonsense. --local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) --# A latest full backup is found: --if test "$latest_full_backup" ; then -- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) -- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs -- # to set the right variables for creating an incremental backup: -- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" -- # Code regarding creating a backup is useless during "rear recover" and -- # messages about creating a backup are misleading during "rear recover": -- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # There is nothing to do here if it is already decided that -- # a full backup must be created (see "full backup day" above"): -- if ! test "full" = "$create_backup_type" ; then -- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) -- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) -- # Check if the latest full backup is too old: -- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then -- create_backup_type="full" -- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" -- else -- # When a latest full backup is found that is not too old -- # a BACKUP_TYPE (incremental or differential) backup will be created: -- create_backup_type="$BACKUP_TYPE" -- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" -- fi -- fi -- else -- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: -- case "$BACKUP_TYPE" in -- (incremental) -- # When a latest full backup is found use that plus all later incremental backups for restore: -- # The following command is a bit tricky: -- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them -- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that -- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): -- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) -- ;; -- (differential) -- # For differential backup use the latest full backup plus the one latest differential backup for restore: -- # The following command is a bit tricky: -- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them -- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that -- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) -- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line -- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": -- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" -- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) -- ;; -- (*) -- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" -- ;; -- esac -- # Tell the user what will be restored: -- local restore_archives_file_names="" -- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do -- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" -- done -- LogPrint "For backup restore using $restore_archives_file_names" -- fi --# No latest full backup is found: --else -- # Code regarding creating a backup is useless during "rear recover" and -- # messages about creating a backup are misleading during "rear recover": -- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # If no latest full backup is found create one during "rear mkbackup": -- create_backup_type="full" -- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" -- else -- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: -- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) -- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup -- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). -- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive -- # and its name is the same as the backup archive (usually 'backup.tar.gz'). -- # This is only a fallback setting to be more on the safe side for "rear recover". -- # Initially for the very fist run of incremental backup during "rear mkbackup" -- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. -- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) -- LogPrint "Using $backup_file_name for backup restore" -- fi --fi --# Code regarding creating a backup is useless during "rear recover" and --# messages about creating a backup are misleading during "rear recover": --if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # Set the right variables for creating a backup (but do not actually do anything at this point): -- case "$create_backup_type" in -- (full) -- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_full_backup_file_name" -- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" -- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" -- ;; -- (incremental) -- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_incremental_backup_file_name" -- # Get the latest latest incremental backup that is based on the latest full backup (if exists): -- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" -- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) -- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): -- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) -- if test "$latest_incremental_backup" ; then -- # A latest incremental backup that is based on the latest full backup is found: -- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) -- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" -- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" -- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" -- else -- # When there is not yet an incremental backup that is based on the latest full backup -- # the new created incremental backup must be based on the latest full backup: -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" -- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" -- fi -- ;; -- (differential) -- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_differential_backup_file_name" -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" -- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" -- ;; -- (*) -- BugError "Unexpected create_backup_type '$create_backup_type'" -- ;; -- esac --fi --# Go back from "set -e -u -o pipefail" to the defaults: --apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" -- -diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh -new file mode 120000 -index 00000000..cdbdc31f ---- /dev/null -+++ b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh -@@ -0,0 +1 @@ -+../../NETFS/default/070_set_backup_archive.sh -\ No newline at end of file -diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh -deleted file mode 100644 -index 64b7a792..00000000 ---- a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh -+++ /dev/null -@@ -1,15 +0,0 @@ --# create mount point --if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then -- mkdir -p $v "$BUILD_DIR/outputfs" >&2 -- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- -- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- -- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then -- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" -- fi -- -- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS -- -- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" --fi -diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh -new file mode 120000 -index 00000000..7f558c5d ---- /dev/null -+++ b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh -@@ -0,0 +1 @@ -+../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh -deleted file mode 100644 -index 60aa811e..00000000 ---- a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh -+++ /dev/null -@@ -1,15 +0,0 @@ --# umount mountpoint --if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then -- -- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then -- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" -- fi -- -- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs -- -- rmdir $v $BUILD_DIR/outputfs >&2 -- if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- fi --fi -diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh -new file mode 120000 -index 00000000..b7e47be1 ---- /dev/null -+++ b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh -@@ -0,0 +1 @@ -+../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh -deleted file mode 100644 -index 7de92af4..00000000 ---- a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh -+++ /dev/null -@@ -1,13 +0,0 @@ --# Copied from ../../NETFS/default/100_mount_NETFS_path.sh a.k.a. ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM -- --# create mount point --mkdir -p $v "$BUILD_DIR/outputfs" >&2 --StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- --AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- --if [[ "$BACKUP_MOUNTCMD" ]] ; then -- BACKUP_URL="var://BACKUP_MOUNTCMD" --fi -- --mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS -diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh -new file mode 120000 -index 00000000..60e0f83f ---- /dev/null -+++ b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh -@@ -0,0 +1 @@ -+../../NETFS/default/100_mount_NETFS_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh -deleted file mode 100644 -index d02dcf34..00000000 ---- a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh -+++ /dev/null -@@ -1,15 +0,0 @@ --# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM -- --# umount NETFS mountpoint -- --if [[ "$BACKUP_UMOUNTCMD" ]] ; then -- BACKUP_URL="var://BACKUP_UMOUNTCMD" --fi -- --umount_url $BACKUP_URL $BUILD_DIR/outputfs -- --rmdir $v $BUILD_DIR/outputfs >&2 --if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" --fi -diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh -new file mode 120000 -index 00000000..2c29cb57 ---- /dev/null -+++ b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh -@@ -0,0 +1 @@ -+../../NETFS/default/980_umount_NETFS_dir.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh -deleted file mode 100644 -index 64b7a792..00000000 ---- a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh -+++ /dev/null -@@ -1,15 +0,0 @@ --# create mount point --if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then -- mkdir -p $v "$BUILD_DIR/outputfs" >&2 -- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- -- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- -- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then -- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" -- fi -- -- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS -- -- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" --fi -diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh -new file mode 120000 -index 00000000..7f558c5d ---- /dev/null -+++ b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh -@@ -0,0 +1 @@ -+../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh -deleted file mode 100644 -index 60aa811e..00000000 ---- a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh -+++ /dev/null -@@ -1,15 +0,0 @@ --# umount mountpoint --if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then -- -- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then -- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" -- fi -- -- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs -- -- rmdir $v $BUILD_DIR/outputfs >&2 -- if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- fi --fi -diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh -new file mode 120000 -index 00000000..b7e47be1 ---- /dev/null -+++ b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh -@@ -0,0 +1 @@ -+../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh -deleted file mode 100644 -index cfd70026..00000000 ---- a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh -+++ /dev/null -@@ -1,116 +0,0 @@ --# Copied from ../../../prep/NETFS/default/050_check_NETFS_requirements.sh for YUM --# BACKUP_URL=[proto]://[host]/[share] --# example: nfs://lucky/temp/backup --# example: cifs://lucky/temp --# example: usb:///dev/sdb1 --# example: tape:///dev/nst0 --# example: file:///path --# example: iso://backup/ --# example: sshfs://user@host/G/rear/ --# example: ftpfs://user:password@host/rear/ (the password part is optional) -- --[[ "$BACKUP_URL" || "$BACKUP_MOUNTCMD" ]] --# FIXME: The above test does not match the error message below. --# To match the the error message the test should be --# [[ "$BACKUP_URL" || ( "$BACKUP_MOUNTCMD" && "$BACKUP_UMOUNTCMD" ) ]] --# but I cannot decide if there is a subtle reason for the omission. --StopIfError "You must specify either BACKUP_URL or BACKUP_MOUNTCMD and BACKUP_UMOUNTCMD !" -- --if [[ "$BACKUP_URL" ]] ; then -- local scheme=$( url_scheme $BACKUP_URL ) -- local hostname=$( url_hostname $BACKUP_URL ) -- local path=$( url_path $BACKUP_URL ) -- -- ### check for vaild BACKUP_URL schemes -- ### see https://github.com/rear/rear/issues/842 -- case $scheme in -- (nfs|cifs|usb|tape|file|iso|sshfs|ftpfs) -- # do nothing for vaild BACKUP_URL schemes -- : -- ;; -- (*) -- Error "Invalid scheme '$scheme' in BACKUP_URL '$BACKUP_URL' valid schemes: nfs cifs usb tape file iso sshfs ftpfs" -- ;; -- esac -- -- ### set other variables from BACKUP_URL -- if [[ "usb" = "$scheme" ]] ; then -- # if USB_DEVICE is not explicitly specified it is the path from BACKUP_URL -- [[ -z "$USB_DEVICE" ]] && USB_DEVICE="$path" -- fi -- -- ### check if host is reachable -- if [[ "$PING" && "$hostname" ]] ; then -- # Only LogPrintIfError but no StopIfError because it is not a fatal error -- # (i.e. not a reason to abort) when a host does not respond to a 'ping' -- # because hosts can be accessible via certain ports but do not respond to a 'ping' -- # cf. https://bugzilla.opensuse.org/show_bug.cgi?id=616706 -- # TODO: it would be better to test if it is accessible via the actually needed port(s) -- ping -c 2 "$hostname" >/dev/null -- LogPrintIfError "Host '$hostname' in BACKUP_URL '$BACKUP_URL' does not respond to a 'ping'." -- else -- Log "Skipping 'ping' test for host '$hostname' in BACKUP_URL '$BACKUP_URL'" -- fi -- --fi -- --# some backup progs require a different backuparchive name --case "$(basename $BACKUP_PROG)" in -- (rsync) -- # rsync creates a target directory instead of a file -- BACKUP_PROG_SUFFIX= -- BACKUP_PROG_COMPRESS_SUFFIX= -- ;; -- (*) -- : -- ;; --esac -- --# include required programs --# the code below includes mount.* and umount.* programs for all non-empty schemes --# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) --# and it includes 'mount.' for empty schemes (e.g. if BACKUP_URL is not set) --# which is o.k. because it is a catch all rule so we do not miss any --# important executable needed a certain scheme and it does not hurt --# see https://github.com/rear/rear/pull/859 --PROGS+=( --showmount --mount.$(url_scheme $BACKUP_URL) --umount.$(url_scheme $BACKUP_URL) --$( test "$BACKUP_MOUNTCMD" && echo "${BACKUP_MOUNTCMD%% *}" ) --$( test "$BACKUP_UMOUNTCMD" && echo "${BACKUP_UMOUNTCMD%% *}" ) --$BACKUP_PROG --gzip --bzip2 --xz --) -- --# include required stuff for sshfs or ftpfs (via CurlFtpFS) --if [[ "sshfs" = "$scheme" || "ftpfs" = "$scheme" ]] ; then -- # both sshfs and ftpfs (via CurlFtpFS) are based on FUSE -- PROGS+=( fusermount mount.fuse ) -- MODULES+=( fuse ) -- MODULES_LOAD+=( fuse ) -- COPY_AS_IS+=( /etc/fuse.conf ) -- # include what is specific for sshfs -- if [[ "sshfs" = "$scheme" ]] ; then -- # see http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SshfsFaq -- REQUIRED_PROGS+=( sshfs ssh ) -- # relying on 500_ssh.sh to take a long the SSH related files -- fi -- # include what is specific for ftpfs -- if [[ "ftpfs" = "$scheme" ]] ; then -- # see http://curlftpfs.sourceforge.net/ -- # and https://github.com/rear/rear/issues/845 -- REQUIRED_PROGS+=( curlftpfs ) -- fi --fi -- --# include required modules, like nfs cifs ... --# the code below includes modules for all non-empty schemes --# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) --# which is o.k. because this must been seen as a catch all rule --# (one never knows what one could miss) --# see https://github.com/rear/rear/pull/859 --MODULES+=( $(url_scheme $BACKUP_URL) ) -- -diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh -new file mode 120000 -index 00000000..af1512d6 ---- /dev/null -+++ b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh -@@ -0,0 +1 @@ -+../../NETFS/default/050_check_NETFS_requirements.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh -deleted file mode 100644 -index f7e31ed6..00000000 ---- a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh -+++ /dev/null -@@ -1,12 +0,0 @@ --# Copied from ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM --# create mount point --mkdir -p $v "$BUILD_DIR/outputfs" >&2 --StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" -- --AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" -- --if [[ "$BACKUP_MOUNTCMD" ]] ; then -- BACKUP_URL="var://BACKUP_MOUNTCMD" --fi -- --mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS -diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh -new file mode 120000 -index 00000000..73dd4697 ---- /dev/null -+++ b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh -@@ -0,0 +1 @@ -+../../../restore/YUM/default/100_mount_YUM_path.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh -deleted file mode 100644 -index 86d1708d..00000000 ---- a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh -+++ /dev/null -@@ -1,300 +0,0 @@ --# Copied from ../../../prep/NETFS/default/070_set_backup_archive.sh for YUM --### Determine the name of the backup archive --### This needs to be after we special case USB devices. -- --# FIXME: backuparchive is no local variable (regardless that it is lowercased) -- --# If TAPE_DEVICE is specified, use that: --if test "$TAPE_DEVICE" ; then -- backuparchive="$TAPE_DEVICE" -- LogPrint "Using backup archive '$backuparchive'" -- return --fi -- --local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" --local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" -- --local scheme=$( url_scheme $BACKUP_URL ) --local path=$( url_path $BACKUP_URL ) --case "$scheme" in -- (file|iso) -- # Define the output path according to the scheme -- local outputpath=$( backup_path $scheme $path ) -- backuparchive="$outputpath/$backup_file_name" -- LogPrint "Using backup archive '$backuparchive'" -- return -- ;; -- (tape) -- # TODO: Check if that case is really needed. -- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. -- backuparchive=$path -- LogPrint "Using backup archive '$backuparchive'" -- return -- ;; --esac -- --local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX -- --# Normal (i.e. non-incremental/non-differential) backup: --if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then -- # In case of normal (i.e. non-incremental) backup there is only one restore archive -- # and its name is the same as the backup archive (usually 'backup.tar.gz'): -- backuparchive="$backup_directory/$backup_file_name" -- LogPrint "Using backup archive '$backuparchive'" -- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. -- local backup_restore_workflows=( "recover" "restoreonly" ) -- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then -- # Only set RESTORE_ARCHIVES the backup archive is actually accessible -- # cf. https://github.com/rear/rear/issues/1166 -- if test -r "$backuparchive" ; then -- RESTORE_ARCHIVES=( "$backuparchive" ) -- else -- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script -- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. -- if test "usb" = "$scheme" ; then -- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." -- else -- Error "Backup archive '$backuparchive' not readable." -- fi -- fi -- fi -- return --fi -- --# Incremental or differential backup: --set -e -u -o pipefail --# Incremental or differential backup only works for the NETFS backup method --# and only with the 'tar' backup program: --if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then -- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" --fi --# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. --# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup --# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): --if test "usb" = "$scheme" ; then -- # When USB_SUFFIX is set the compliance mode is used where -- # backup on USB works in compliance with backup on NFS which means -- # a fixed backup directory where incremental or differential backups work. -- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks -- # test "$USB_SUFFIX" would result true because test " " results true: -- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" --fi --# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) --# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: --if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then -- NETFS_KEEP_OLD_BACKUP_COPY="" -- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" --fi --# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed --# that must be consistent for one single point of the current time which means --# one cannot call the 'date' command several times because then there would be --# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match --# one single point in time (in particular when midnight passes in between). --# Therefore the output of one single 'date' call is storend in an array and --# the array elements are then assinged to individual variables as needed: --local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) --local current_weekday="${current_date_output[0]}" --local current_yyyy_mm_dd="${current_date_output[1]}" --local current_hhmm="${current_date_output[2]}" --# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. --# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. --# This separated call of the 'date' command which is technically needed because it is --# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' --# command for the current time to be on the safe side when midnight passes in between --# both 'date' commands which would then result that a new full backup is made --# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because --# the stored date of the latest full backup is the current date at the time when it was made. --# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): --# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). --# One week later this script runs again while midnight passes between the two 'date' calls --# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) --# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then --# Sunday January 10 is older than Monday January 11 so that a new full backup is made: --test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" --local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) --# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz --# where the 'F' denotes a full backup: --local full_backup_marker="F" --# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz --# where the 'I' denotes an incremental backup: --local incremental_backup_marker="I" --# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz --# where the last 'D' denotes a differential backup: --local differential_backup_marker="D" --# In case of incremental or differential backup the RESTORE_ARCHIVES contains --# first the latest full backup file. --# In case of incremental backup the RESTORE_ARCHIVES contains --# after the latest full backup file each incremental backup --# in the ordering how they must be restored. --# For example when the latest full backup was made on Sunday --# plus each subsequent weekday a separated incremental backup was made, --# then during a "rear recover" on Wednesday morning --# first the full backup from Sunday has to be restored, --# then the incremental backup from Monday, and --# finally the incremental backup from Tuesday. --# In case of differential backup the RESTORE_ARCHIVES contains --# after the latest full backup file the latest differential backup. --# For example when the latest full backup was made on Sunday --# plus each subsequent weekday a separated differential backup was made, --# then during a "rear recover" on Wednesday morning --# first the full backup from Sunday has to be restored, --# and finally the differential backup from Tuesday --# (i.e. the differential backup from Monday is skipped). --# The date format YYYY-MM-DD that is used here is crucial. --# It is the ISO 8601 format 'year-month-day' to specify a day of a year --# that is accepted by 'tar' for the '--newer' option, --# see the GNU tar manual section "Operating Only on New Files" --# at https://www.gnu.org/software/tar/manual/html_node/after.html --# and the GNU tar manual section "Calendar date items" --# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 --local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" --local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" --# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' --# (the empty default means it is undecided what kind of backup must be created): --local create_backup_type="" --# Code regarding creating a backup is useless during "rear recover" and --# messages about creating a backup are misleading during "rear recover": --local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) --if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # When today is a specified full backup day, do a full backup in any case -- # (regardless if there is already a full backup of this day): -- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then -- create_backup_type="full" -- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" -- fi --fi --# Get the latest full backup (if exists): --local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" --# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because --# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' --# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' --# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) --# so that then plain 'ls' would result nonsense. --local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) --# A latest full backup is found: --if test "$latest_full_backup" ; then -- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) -- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs -- # to set the right variables for creating an incremental backup: -- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" -- # Code regarding creating a backup is useless during "rear recover" and -- # messages about creating a backup are misleading during "rear recover": -- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # There is nothing to do here if it is already decided that -- # a full backup must be created (see "full backup day" above"): -- if ! test "full" = "$create_backup_type" ; then -- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) -- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) -- # Check if the latest full backup is too old: -- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then -- create_backup_type="full" -- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" -- else -- # When a latest full backup is found that is not too old -- # a BACKUP_TYPE (incremental or differential) backup will be created: -- create_backup_type="$BACKUP_TYPE" -- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" -- fi -- fi -- else -- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: -- case "$BACKUP_TYPE" in -- (incremental) -- # When a latest full backup is found use that plus all later incremental backups for restore: -- # The following command is a bit tricky: -- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them -- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that -- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): -- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) -- ;; -- (differential) -- # For differential backup use the latest full backup plus the one latest differential backup for restore: -- # The following command is a bit tricky: -- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them -- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that -- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) -- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line -- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": -- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" -- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) -- ;; -- (*) -- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" -- ;; -- esac -- # Tell the user what will be restored: -- local restore_archives_file_names="" -- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do -- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" -- done -- LogPrint "For backup restore using $restore_archives_file_names" -- fi --# No latest full backup is found: --else -- # Code regarding creating a backup is useless during "rear recover" and -- # messages about creating a backup are misleading during "rear recover": -- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # If no latest full backup is found create one during "rear mkbackup": -- create_backup_type="full" -- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" -- else -- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: -- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) -- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup -- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). -- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive -- # and its name is the same as the backup archive (usually 'backup.tar.gz'). -- # This is only a fallback setting to be more on the safe side for "rear recover". -- # Initially for the very fist run of incremental backup during "rear mkbackup" -- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. -- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) -- LogPrint "Using $backup_file_name for backup restore" -- fi --fi --# Code regarding creating a backup is useless during "rear recover" and --# messages about creating a backup are misleading during "rear recover": --if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then -- # Set the right variables for creating a backup (but do not actually do anything at this point): -- case "$create_backup_type" in -- (full) -- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_full_backup_file_name" -- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" -- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" -- ;; -- (incremental) -- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_incremental_backup_file_name" -- # Get the latest latest incremental backup that is based on the latest full backup (if exists): -- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" -- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) -- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): -- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) -- if test "$latest_incremental_backup" ; then -- # A latest incremental backup that is based on the latest full backup is found: -- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) -- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" -- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" -- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" -- else -- # When there is not yet an incremental backup that is based on the latest full backup -- # the new created incremental backup must be based on the latest full backup: -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" -- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" -- fi -- ;; -- (differential) -- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" -- backuparchive="$backup_directory/$new_differential_backup_file_name" -- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" -- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" -- ;; -- (*) -- BugError "Unexpected create_backup_type '$create_backup_type'" -- ;; -- esac --fi --# Go back from "set -e -u -o pipefail" to the defaults: --apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" -- -diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh -new file mode 120000 -index 00000000..b8de3d9e ---- /dev/null -+++ b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh -@@ -0,0 +1 @@ -+../../../prep/YUM/default/070_set_backup_archive.sh -\ No newline at end of file -diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh -deleted file mode 100644 -index dc719e38..00000000 ---- a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh -+++ /dev/null -@@ -1,14 +0,0 @@ --# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM --# umount NETFS mountpoint -- --if [[ "$BACKUP_UMOUNTCMD" ]] ; then -- BACKUP_URL="var://BACKUP_UMOUNTCMD" --fi -- --umount_url $BACKUP_URL $BUILD_DIR/outputfs -- --rmdir $v $BUILD_DIR/outputfs >&2 --if [[ $? -eq 0 ]] ; then -- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask -- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" --fi -diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh -new file mode 120000 -index 00000000..ada5ea50 ---- /dev/null -+++ b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh -@@ -0,0 +1 @@ -+../../../restore/YUM/default/980_umount_YUM_dir.sh -\ No newline at end of file diff --git a/rear-bz1983013.patch b/rear-bz1983013.patch deleted file mode 100644 index f8032bb..0000000 --- a/rear-bz1983013.patch +++ /dev/null @@ -1,68 +0,0 @@ -diff --git a/usr/share/rear/conf/Linux-ppc64.conf b/usr/share/rear/conf/Linux-ppc64.conf -index 7e20ddc7..d7774062 100644 ---- a/usr/share/rear/conf/Linux-ppc64.conf -+++ b/usr/share/rear/conf/Linux-ppc64.conf -@@ -1,18 +1,26 @@ --REQUIRED_PROGS+=( sfdisk ) -+REQUIRED_PROGS+=( sfdisk ofpathname ) - - PROGS+=( - mkofboot - ofpath - ybin - yabootconfig --bootlist - pseries_platform - nvram --ofpathname - bc - agetty - ) - -+if grep -q "emulated by qemu" /proc/cpuinfo ; then -+ # Qemu/KVM virtual machines don't need bootlist - don't complain if -+ # it is missing -+ PROGS+=( bootlist ) -+else -+ # PowerVM environment, we need to run bootlist, otherwise -+ # we can't make the system bpotable. Be strict about requiring it -+ REQUIRED_PROGS+=( bootlist ) -+fi -+ - COPY_AS_IS+=( - /usr/lib/yaboot/yaboot - /usr/lib/yaboot/ofboot -diff --git a/usr/share/rear/conf/Linux-ppc64le.conf b/usr/share/rear/conf/Linux-ppc64le.conf -index d00154a2..df8066ea 100644 ---- a/usr/share/rear/conf/Linux-ppc64le.conf -+++ b/usr/share/rear/conf/Linux-ppc64le.conf -@@ -1,10 +1,8 @@ - REQUIRED_PROGS+=( sfdisk ) - - PROGS+=( --bootlist - pseries_platform - nvram --ofpathname - bc - agetty - ) -@@ -17,4 +15,18 @@ agetty - if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) != PowerNV ]] ; then - # No firmware files when ppc64le Linux is not run in BareMetal Mode (PowerNV): - test "${FIRMWARE_FILES[*]}" || FIRMWARE_FILES=( 'no' ) -+ # grub2-install for powerpc-ieee1275 calls ofpathname, so without it, -+ # the rescue system can't make the recovered system bootable -+ REQUIRED_PROGS+=( ofpathname ) -+ if grep -q "emulated by qemu" /proc/cpuinfo ; then -+ # Qemu/KVM virtual machines don't need bootlist - don't complain if -+ # it is missing -+ PROGS+=( bootlist ) -+ else -+ # PowerVM environment, we need to run bootlist, otherwise -+ # we can't make the system bpotable. Be strict about requiring it -+ REQUIRED_PROGS+=( bootlist ) -+ fi -+else -+ PROGS+=( ofpathname bootlist ) - fi diff --git a/rear-bz1993296.patch b/rear-bz1993296.patch deleted file mode 100644 index 15e65a2..0000000 --- a/rear-bz1993296.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 4233fe30b315737ac8c4d857e2b04e021c2e2886 Mon Sep 17 00:00:00 2001 -From: Pavel Cahyna -Date: Mon, 16 Aug 2021 10:10:38 +0300 -Subject: [PATCH] Revert the main part of PR #2299 - -multipath -l is very slow with many multipath devices. As it will be -called for every multipath device, it leads to quadratic time complexity -in the number of multipath devices. For thousands of devices, ReaR can -take hours to scan and exclude them. We therefore have to comment -multipath -l out, as it is a huge performance regression, and find -another solution to bug #2298. ---- - usr/share/rear/lib/layout-functions.sh | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh -index cdd81a14..8c8be74b 100644 ---- a/usr/share/rear/lib/layout-functions.sh -+++ b/usr/share/rear/lib/layout-functions.sh -@@ -771,7 +771,10 @@ function is_multipath_path { - # so that no "multipath -l" output could clutter the log (the "multipath -l" output is irrelevant here) - # in contrast to e.g. test "$( multipath -l )" that would falsely succeed with blank output - # and the output would appear in the log in 'set -x' debugscript mode: -- multipath -l | grep -q '[[:alnum:]]' || return 1 -+ # -+ # Unfortunately, multipat -l is quite slow with many multipath devices -+ # and becomes a performance bottleneck, so we must comment it out for now. -+ #multipath -l | grep -q '[[:alnum:]]' || return 1 - # Check if a block device should be a path in a multipath device: - multipath -c /dev/$1 &>/dev/null - } --- -2.26.3 - diff --git a/rear-bz2035939.patch b/rear-bz2035939.patch deleted file mode 100644 index 30771c9..0000000 --- a/rear-bz2035939.patch +++ /dev/null @@ -1,56 +0,0 @@ -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 0c230f38..f231bf3d 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -2707,6 +2707,15 @@ WARN_MISSING_VOL_ID=1 - USE_CFG2HTML= - # The SKIP_CFG2HTML variable is no longer supported since ReaR 1.18 - -+# IP addresses that are present on the system but must be excluded when -+# building the network configuration used in recovery mode; this is typically -+# used when floating IP addresses are used on the system -+EXCLUDE_IP_ADDRESSES=() -+ -+# Network interfaces that are present on the system but must be excluded when -+# building the network configuration used in recovery mode -+EXCLUDE_NETWORK_INTERFACES=() -+ - # Simplify bonding setups by configuring always the first active device of a - # bond, except when mode is 4 (IEEE 802.3ad policy) - SIMPLIFY_BONDING=no -diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -index f806bfbf..2385f5b6 100644 ---- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh -@@ -355,6 +355,11 @@ function is_interface_up () { - local network_interface=$1 - local sysfspath=/sys/class/net/$network_interface - -+ if IsInArray "$network_interface" "${EXCLUDE_NETWORK_INTERFACES[@]}"; then -+ LogPrint "Excluding '$network_interface' per EXCLUDE_NETWORK_INTERFACES directive." -+ return 1 -+ fi -+ - local state=$( cat $sysfspath/operstate ) - if [ "$state" = "down" ] ; then - return 1 -@@ -403,11 +408,19 @@ function ipaddr_setup () { - if [ -n "$ipaddrs" ] ; then - # If some IP is found for the network interface, then use them - for ipaddr in $ipaddrs ; do -+ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then -+ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive even through it's defined in mapping file '$CONFIG_DIR/mappings/ip_addresses'." -+ continue -+ fi - echo "ip addr add $ipaddr dev $mapped_as" - done - else - # Otherwise, collect IP addresses for the network interface on the system - for ipaddr in $( ip a show dev $network_interface scope global | grep "inet.*\ " | tr -s " " | cut -d " " -f 3 ) ; do -+ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then -+ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive." -+ continue -+ fi - echo "ip addr add $ipaddr dev $mapped_as" - done - fi diff --git a/rear-bz2048454.patch b/rear-bz2048454.patch deleted file mode 100644 index 428505e..0000000 --- a/rear-bz2048454.patch +++ /dev/null @@ -1,78 +0,0 @@ -diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh -index 35be1721..d3c9ae86 100644 ---- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh -+++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh -@@ -103,12 +103,7 @@ local lvs_exit_code - pdev=$( get_device_name $pdev ) - - # Output lvmdev entry to DISKLAYOUT_FILE: -- # With the above example the output is: -- # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992 -- echo "lvmdev /dev/$vgrp $pdev $uuid $size" -- -- # After the 'lvmdev' line was written to disklayout.conf so that the user can inspect it -- # check that the required positional parameters in the 'lvmdev' line are non-empty -+ # Check that the required positional parameters in the 'lvmdev' line are non-empty - # because an empty positional parameter would result an invalid 'lvmdev' line - # which would cause invalid parameters are 'read' as input during "rear recover" - # cf. "Verifying ... 'lvm...' entries" in layout/save/default/950_verify_disklayout_file.sh -@@ -117,13 +112,24 @@ local lvs_exit_code - # so that this also checks that the variables do not contain blanks or more than one word - # because blanks (actually $IFS characters) are used as field separators in disklayout.conf - # which means the positional parameter values must be exactly one non-empty word. -- # Two separated simple 'test $vgrp && test $pdev' commands are used here because -- # 'test $vgrp -a $pdev' does not work when $vgrp is empty or only blanks -- # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test") -- # so that when $vgrp is empty 'test $vgrp -a $pdev' tests if file $pdev exists -- # which is usually true because $pdev is usually a partition device node (e.g. /dev/sda1) -- # so that when $vgrp is empty 'test $vgrp -a $pdev' would falsely succeed: -- test $vgrp && test $pdev || Error "LVM 'lvmdev' entry in $DISKLAYOUT_FILE where volume_group or device is empty or more than one word" -+ test $pdev || Error "Cannot make 'lvmdev' entry in disklayout.conf (PV device '$pdev' empty or more than one word)" -+ if ! test $vgrp ; then -+ # Valid $pdev but invalid $vgrp (empty or more than one word): -+ # When $vgrp is empty it means it is a PV that is not part of a VG so the PV exists but it is not used. -+ # PVs that are not part of a VG are documented as comment in disklayout.conf but they are not recreated -+ # because they were not used on the original system so there is no need to recreate them by "rear recover" -+ # (the user can manually recreate them later in his recreated system when needed) -+ # cf. https://github.com/rear/rear/issues/2596 -+ DebugPrint "Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word)" -+ echo "# Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word):" -+ contains_visible_char "$vgrp" || vgrp='' -+ echo "# lvmdev /dev/$vgrp $pdev $uuid $size" -+ # Continue with the next line in the output of "lvm pvdisplay -c" -+ continue -+ fi -+ # With the above example the output is: -+ # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992 -+ echo "lvmdev /dev/$vgrp $pdev $uuid $size" - - done - # Check the exit code of "lvm pvdisplay -c" -@@ -161,8 +167,15 @@ local lvs_exit_code - # lvmgrp /dev/system 4096 5119 20967424 - echo "lvmgrp /dev/$vgrp $extentsize $nrextents $size" - -- # Check that the required positional parameters in the 'lvmgrp' line are non-empty -- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty": -+ # Check that the required positional parameters in the 'lvmgrp' line are non-empty. -+ # The tested variables are intentionally not quoted here, cf. the code above to -+ # "check that the required positional parameters in the 'lvmdev' line are non-empty". -+ # Two separated simple 'test $vgrp && test $extentsize' commands are used here because -+ # 'test $vgrp -a $extentsize' does not work when $vgrp is empty or only blanks -+ # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test") -+ # so with empty $vgrp it becomes 'test -a $extentsize' that tests if a file $extentsize exists -+ # which is unlikely to be true but it is not impossible that a file $extentsize exists -+ # so when $vgrp is empty (or blanks) 'test $vgrp -a $extentsize' might falsely succeed: - test $vgrp && test $extentsize || Error "LVM 'lvmgrp' entry in $DISKLAYOUT_FILE where volume_group or extentsize is empty or more than one word" - - done -@@ -305,7 +318,8 @@ local lvs_exit_code - fi - already_processed_lvs+=( "$vg/$lv" ) - # Check that the required positional parameters in the 'lvmvol' line are non-empty -- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty": -+ # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty" -+ # and the code above to "check that the required positional parameters in the 'lvmgrp' line are non-empty": - test $vg && test $lv && test $size && test $layout || Error "LVM 'lvmvol' entry in $DISKLAYOUT_FILE where volume_group or name or size or layout is empty or more than one word" - fi - diff --git a/rear-bz2049091.patch b/rear-bz2049091.patch deleted file mode 100644 index 9f5e12d..0000000 --- a/rear-bz2049091.patch +++ /dev/null @@ -1,25 +0,0 @@ -diff --git a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh -index 040e9eec..e731c994 100644 ---- a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh -+++ b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh -@@ -19,9 +19,9 @@ while read lvmdev name mpdev junk ; do - # Remember, multipath devices from a volume group that is "excluded" should be 'commented out' - device=$(echo $mpdev | cut -c1-45) - while read LINE ; do -- # Now we need to comment all lines that contain "$devices" in the LAYOUT_FILE -+ # Now we need to comment all lines that contain "$device" in the LAYOUT_FILE - sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE" -- done < <(grep "$device" $LAYOUT_FILE | grep -v "^#") -+ done < <(grep " $device " $LAYOUT_FILE | grep -v "^#") - Log "Excluding multipath device $device" - done < <(grep "^#lvmdev" $LAYOUT_FILE) - -@@ -31,7 +31,7 @@ done < <(grep "^#lvmdev" $LAYOUT_FILE) - while read LINE ; do - # multipath /dev/mapper/360060e8007e2e3000030e2e300002065 /dev/sdae,/dev/sdat,/dev/sdbi,/dev/sdp - device=$(echo $LINE | awk '{print $2}' | cut -c1-45) -- num=$(grep "$device" $LAYOUT_FILE | grep -v "^#" | wc -l) -+ num=$(grep " $device " $LAYOUT_FILE | grep -v "^#" | wc -l) - if [ $num -lt 2 ] ; then - # If the $device is only seen once (in a uncommented line) then the multipath is not in use - sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE" diff --git a/rear-bz2083272.patch b/rear-bz2083272.patch deleted file mode 100644 index 03c8a8a..0000000 --- a/rear-bz2083272.patch +++ /dev/null @@ -1,171 +0,0 @@ -commit 3d1bcf1b50ca8201a3805bc7cab6ca69c14951a1 -Author: pcahyna -Date: Thu May 5 12:11:55 2022 +0200 - - Merge pull request #2795 from pcahyna/recover-check-sums - - Verify file hashes at the end of recover after file restore from backup - -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index f231bf3d..881a0af0 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -313,8 +313,30 @@ CDROM_SIZE=20 - # which exits with non-zero exit code when the disk layout or those files changed - # (cf. https://github.com/rear/rear/issues/1134) but the checklayout workflow - # does not automatically recreate the rescue/recovery system. -+# Files matching FILES_TO_PATCH_PATTERNS are added to this list automatically. - CHECK_CONFIG_FILES=( '/etc/drbd/' '/etc/drbd.conf' '/etc/lvm/lvm.conf' '/etc/multipath.conf' '/etc/rear/' '/etc/udev/udev.conf' ) - -+# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns. -+# Files that match are eligible for a final migration of UUIDs and other -+# identifiers after recovery (if the layout recreation process has led -+# to a change of an UUID or a device name and a corresponding change needs -+# to be performed on restored configuration files ). -+# See finalize/GNU/Linux/280_migrate_uuid_tags.sh -+# The [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist -+ -+FILES_TO_PATCH_PATTERNS="[b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \ -+ [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \ -+ [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ -+ [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ -+ [e]tc/lilo.conf [e]tc/elilo.conf \ -+ [e]tc/yaboot.conf \ -+ [e]tc/mtab [e]tc/fstab \ -+ [e]tc/mtools.conf \ -+ [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ -+ [e]tc/sysconfig/rawdevices \ -+ [e]tc/security/pam_mount.conf.xml \ -+ [b]oot/efi/*/*/grub.cfg" -+ - ## - # Relax-and-Recover recovery system update during "rear recover" - # -diff --git a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh -index 1a91a0e3..e869e5e9 100644 ---- a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh -+++ b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh -@@ -29,19 +29,9 @@ LogPrint "The original restored files get saved in $save_original_file_dir (in $ - - local symlink_target="" - local restored_file="" --# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist --# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why? -- --for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* [b]oot/grub/{grub.conf,menu.lst,device.map} \ -- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ -- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ -- [e]tc/lilo.conf \ -- [e]tc/yaboot.conf \ -- [e]tc/mtab [e]tc/fstab \ -- [e]tc/mtools.conf \ -- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ -- [e]tc/sysconfig/rawdevices \ -- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg -+# The variable expansion is deliberately not quoted in order to perform -+# pathname expansion on the variable value. -+for restored_file in $FILES_TO_PATCH_PATTERNS - do - # Silently skip directories and file not found: - test -f "$restored_file" || continue -diff --git a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh -index 074689a1..d994ce8e 100644 ---- a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh -+++ b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh -@@ -23,18 +23,9 @@ LogPrint "Migrating filesystem UUIDs in certain restored files in $TARGET_FS_ROO - - local symlink_target="" - local restored_file="" --# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist --# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why? --for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \ -- [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \ -- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ -- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ -- [e]tc/lilo.conf [e]tc/elilo.conf \ -- [e]tc/mtab [e]tc/fstab \ -- [e]tc/mtools.conf \ -- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ -- [e]tc/sysconfig/rawdevices \ -- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg -+# The variable expansion is deliberately not quoted in order to perform -+# pathname expansion on the variable value. -+for restored_file in $FILES_TO_PATCH_PATTERNS - do - # Silently skip directories and file not found: - test -f "$restored_file" || continue -diff --git a/usr/share/rear/finalize/default/060_compare_files.sh b/usr/share/rear/finalize/default/060_compare_files.sh -new file mode 100644 -index 00000000..6947fda9 ---- /dev/null -+++ b/usr/share/rear/finalize/default/060_compare_files.sh -@@ -0,0 +1,6 @@ -+if [ -e $VAR_DIR/layout/config/files.md5sum ] ; then -+ if ! chroot $TARGET_FS_ROOT md5sum -c --quiet < $VAR_DIR/layout/config/files.md5sum 1>> >( tee -a "$RUNTIME_LOGFILE" 1>&7 ) 2>> >( tee -a "$RUNTIME_LOGFILE" 1>&8 ) ; then -+ LogPrintError "Error: Restored files do not match the recreated system in $TARGET_FS_ROOT" -+ return 1 -+ fi -+fi -diff --git a/usr/share/rear/layout/save/default/490_check_files_to_patch.sh b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh -new file mode 100644 -index 00000000..ee717063 ---- /dev/null -+++ b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh -@@ -0,0 +1,43 @@ -+# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns. -+# Files that match are eligible for a final migration of UUIDs and other -+# identifiers after recovery (if the layout recreation process has led -+# to a change of an UUID or a device name and a corresponding change needs -+# to be performed on restored configuration files ). -+# See finalize/GNU/Linux/280_migrate_uuid_tags.sh -+# We should add all such files to CHECK_CONFIG_FILES - if they change, -+# we risk inconsistencies between the restored files and recreated layout, -+# or failures of UUID migration. -+ -+local file final_file symlink_target -+ -+# The patterns are relative to /, change directory there -+# so that the shell finds the files during pathname expansion -+pushd / >/dev/null -+# The variable expansion is deliberately not quoted in order to perform -+# pathname expansion on the variable value. -+for file in $FILES_TO_PATCH_PATTERNS ; do -+ final_file="/$file" -+ IsInArray "$final_file" "${CHECK_CONFIG_FILES[@]}" && continue -+ # Symlink handling (partially from 280_migrate_uuid_tags.sh): -+ # avoid dead symlinks, and symlinks to files on dynamic filesystems -+ # ( /proc etc.) - they are expected to change and validating -+ # their checksums has no sense -+ if test -L "$final_file" ; then -+ if symlink_target="$( readlink -e "$final_file" )" ; then -+ # If the symlink target contains /proc/ /sys/ /dev/ or /run/ we skip it because then -+ # the symlink target is considered to not be a restored file that needs to be patched -+ # and thus we don't need to generate and check its hash, either -+ # cf. https://github.com/rear/rear/pull/2047#issuecomment-464846777 -+ if echo $symlink_target | egrep -q '/proc/|/sys/|/dev/|/run/' ; then -+ Log "Skip adding symlink $final_file target $symlink_target on /proc/ /sys/ /dev/ or /run/ to CHECK_CONFIG_FILES" -+ continue -+ fi -+ Debug "Adding symlink $final_file with target $symlink_target to CHECK_CONFIG_FILES" -+ else -+ LogPrint "Skip adding dead symlink $final_file to CHECK_CONFIG_FILES" -+ continue -+ fi -+ fi -+ CHECK_CONFIG_FILES+=( "$final_file" ) -+done -+popd >/dev/null -diff --git a/usr/share/rear/layout/save/default/600_snapshot_files.sh b/usr/share/rear/layout/save/default/600_snapshot_files.sh -index 0ebf197c..3ac6b07e 100644 ---- a/usr/share/rear/layout/save/default/600_snapshot_files.sh -+++ b/usr/share/rear/layout/save/default/600_snapshot_files.sh -@@ -3,7 +3,8 @@ if [ "$WORKFLOW" = "checklayout" ] ; then - return 0 - fi - --config_files=() -+local obj -+local config_files=() - for obj in "${CHECK_CONFIG_FILES[@]}" ; do - if [ -d "$obj" ] ; then - config_files+=( $( find "$obj" -type f ) ) diff --git a/rear-bz2091163.patch b/rear-bz2091163.patch index 3a68a34..991a147 100644 --- a/rear-bz2091163.patch +++ b/rear-bz2091163.patch @@ -1,5 +1,35 @@ +From 29e739ae7c0651f8f77c60846bfbe2b6c91baa29 Mon Sep 17 00:00:00 2001 +From: Pavel Cahyna +Date: Sat, 31 Dec 2022 17:40:39 +0100 +Subject: [PATCH] Protect against colons in pvdisplay output + +LVM can be configured to show device names under /dev/disk/by-path +in command output. These names often contain colons that separate fields +like channel and target (for example /dev/disk/by-path/pci-*-scsi-0:0:1:0-*, +similarly the pci-* part, which contains colon-separated PCI bus and +device numbers). Since the "pvdisplay -c" output also uses colons as +field separators and does not escape embedded colons in any way, +embedded colons break parsing of this output. + +As a fix, use the pipe character '|' as the field separator in pvdisplay +output. (This would break if a PV device has a '|' in its name, but this +is very much less likely than having a ':' .) + +Also, configure explicitly what fields to output - "pvdisplay -c" +prints many fields, but I have not found documentation about what fields +is it using exactly, so one had to guess what the output means. Using +"pvdisplay -C" and selecting the fields explicitly is much clearer. + +This also changes the PV size field to match documentation, the comment +says that size is in bytes, but it actually was not in bytes. As nothing +is actually using the PV size field, this inconsistency has not caused +any problem in practice, and no code needs adjusting for the change. +--- + .../layout/save/GNU/Linux/220_lvm_layout.sh | 24 ++++++++++++------- + 1 file changed, 15 insertions(+), 9 deletions(-) + diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh -index d3c9ae86..f21845df 100644 +index e01dbf465..7400c586e 100644 --- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh @@ -70,14 +70,20 @@ local lvs_exit_code diff --git a/rear-bz2096900.patch b/rear-bz2096900.patch deleted file mode 100644 index 595c147..0000000 --- a/rear-bz2096900.patch +++ /dev/null @@ -1,58 +0,0 @@ -commit 389e5026df575ad98695191044257cf2b33d565b -Author: pcahyna -Date: Mon Jul 4 15:48:43 2022 +0200 - - Merge pull request #2825 from lzaoral/replace-mkinitrd-with-dracut - - Replace `mkinitrd` with `dracut` on Fedora and RHEL - -diff --git a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh -index 3476b77f..f296e624 100644 ---- a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh -+++ b/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh -@@ -61,7 +61,7 @@ NEW_INITRD_MODULES=( $(tr " " "\n" <<< "${NEW_INITRD_MODULES[*]}" | sort | uniq - Log "New INITRD_MODULES='${OLD_INITRD_MODULES[@]} ${NEW_INITRD_MODULES[@]}'" - INITRD_MODULES="${OLD_INITRD_MODULES[@]} ${NEW_INITRD_MODULES[@]}" - --WITH_INITRD_MODULES=$( printf '%s\n' ${INITRD_MODULES[@]} | awk '{printf "--with=%s ", $1}' ) -+WITH_INITRD_MODULES=$( printf '%s\n' ${INITRD_MODULES[@]} | awk '{printf "--add-drivers=%s ", $1}' ) - - # Recreate any initrd or initramfs image under $TARGET_FS_ROOT/boot/ with new drivers - # Images ignored: -@@ -76,19 +76,19 @@ for INITRD_IMG in $( ls $TARGET_FS_ROOT/boot/initramfs-*.img $TARGET_FS_ROOT/boo - # Do not use KERNEL_VERSION here because that is readonly in the rear main script: - kernel_version=$( basename $( echo $INITRD_IMG ) | cut -f2- -d"-" | sed s/"\.img"// ) - INITRD=$( echo $INITRD_IMG | egrep -o "/boot/.*" ) -- LogPrint "Running mkinitrd..." -- # Run mkinitrd directly in chroot without a login shell in between (see https://github.com/rear/rear/issues/862). -- # We need the mkinitrd binary in the chroot environment i.e. the mkinitrd binary in the recreated system. -- # Normally we would use a login shell like: chroot $TARGET_FS_ROOT /bin/bash --login -c 'type -P mkinitrd' -+ LogPrint "Running dracut..." -+ # Run dracut directly in chroot without a login shell in between (see https://github.com/rear/rear/issues/862). -+ # We need the dracut binary in the chroot environment i.e. the dracut binary in the recreated system. -+ # Normally we would use a login shell like: chroot $TARGET_FS_ROOT /bin/bash --login -c 'type -P dracut' - # because otherwise there is no useful PATH (PATH is only /bin) so that 'type -P' won't find it - # but we cannot use a login shell because that contradicts https://github.com/rear/rear/issues/862 - # so that we use a plain (non-login) shell and set a (hopefully) reasonable PATH: -- local mkinitrd_binary=$( chroot $TARGET_FS_ROOT /bin/bash -c 'PATH=/sbin:/usr/sbin:/usr/bin:/bin type -P mkinitrd' ) -- # If there is no mkinitrd in the chroot environment plain 'chroot $TARGET_FS_ROOT' will hang up endlessly -+ local dracut_binary=$( chroot $TARGET_FS_ROOT /bin/bash -c 'PATH=/sbin:/usr/sbin:/usr/bin:/bin type -P dracut' ) -+ # If there is no dracut in the chroot environment plain 'chroot $TARGET_FS_ROOT' will hang up endlessly - # and then "rear recover" cannot be aborted with the usual [Ctrl]+[C] keys. - # Use plain $var because when var contains only blanks test "$var" results true because test " " results true: -- if test $mkinitrd_binary ; then -- if chroot $TARGET_FS_ROOT $mkinitrd_binary -v -f ${WITH_INITRD_MODULES[@]} $INITRD $kernel_version >&2 ; then -+ if test $dracut_binary ; then -+ if chroot $TARGET_FS_ROOT $dracut_binary -v -f ${WITH_INITRD_MODULES[@]} $INITRD $kernel_version >&2 ; then - LogPrint "Updated initrd with new drivers for kernel $kernel_version." - else - LogPrint "WARNING: -@@ -99,7 +99,7 @@ and decide yourself, whether the system will boot or not. - fi - else - LogPrint "WARNING: --Cannot create initrd (found no mkinitrd in the recreated system). -+Cannot create initrd (found no dracut in the recreated system). - Check the recreated system (mounted at $TARGET_FS_ROOT) - and decide yourself, whether the system will boot or not. - " diff --git a/rear-bz2096916.patch b/rear-bz2096916.patch deleted file mode 100644 index 6a8a62f..0000000 --- a/rear-bz2096916.patch +++ /dev/null @@ -1,130 +0,0 @@ -commit b06d059108db9b0c46cba29cc174f60e129164f1 -Author: Johannes Meixner -Date: Tue Mar 9 14:40:59 2021 +0100 - - Merge pull request #2580 from rear/jsmeix-load-nvram-module - - In etc/scripts/system-setup.d/41-load-special-modules.sh - load the nvram kernel module if possible to make /dev/nvram appear - because /dev/nvram should be there when installing GRUB, - see https://github.com/rear/rear/issues/2554 - and include the nvram kernel module in the recovery system - because nvram could be a module in particular on POWER architecture - see https://github.com/rear/rear/issues/2554#issuecomment-764720180 - -diff --git a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh -index d8d733d2..a0ca9084 100644 ---- a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh -+++ b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh -@@ -116,8 +116,12 @@ for dummy in "once" ; do - # As a way out of this dilemma we add the below listed modules no longer via conf/GNU/Linux.conf - # but here after the user config files were sourced so that now the user can specify - # MODULES=( 'moduleX' 'moduleY' ) in etc/rear/local.conf to get additional kernel modules -- # included in the recovery system in addition to the ones via an empty MODULES=() setting: -- MODULES+=( vfat -+ # included in the recovery system in addition to the ones via an empty MODULES=() setting. -+ # nvram could be a module in particular on POWER architecture, -+ # cf. https://github.com/rear/rear/issues/2554#issuecomment-764720180 -+ # and https://github.com/rear/rear/pull/2580#issuecomment-791344794 -+ MODULES+=( nvram -+ vfat - nls_iso8859_1 nls_utf8 nls_cp437 - af_packet - unix -diff --git a/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh b/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh -index 4c2698f3..0cb3ee41 100644 ---- a/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh -+++ b/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh -@@ -104,9 +104,39 @@ fi - # Do not update nvram when system is running in PowerNV mode (BareMetal). - # grub2-install will fail if not run with the --no-nvram option on a PowerNV system, - # see https://github.com/rear/rear/pull/1742 --grub2_install_option="" -+grub2_no_nvram_option="" - if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) == PowerNV ]] ; then -- grub2_install_option="--no-nvram" -+ grub2_no_nvram_option="--no-nvram" -+fi -+# Also do not update nvram when no character device node /dev/nvram exists. -+# On POWER architecture the nvram kernel driver could be also built as a kernel module -+# that gets loaded via etc/scripts/system-setup.d/41-load-special-modules.sh -+# but whether or not the nvram kernel driver will then create /dev/nvram -+# depends on whether or not the hardware platform supports nvram. -+# I asked on a SUSE internal mailing list -+# and got the following reply (excerpts): -+# ---------------------------------------------------------------- -+# > I would like to know when /dev/nvram exists and when not. -+# > I assume /dev/nvram gets created as other device nodes -+# > by the kernel (probably together with udev). -+# > I would like to know under what conditions /dev/nvram -+# > gets created and when it is not created. -+# > It seems on PPC /dev/nvram usually exist but sometimes not. -+# In case of powerpc, it gets created by nvram driver -+# (nvram_module_init) whenever the powerpc platform driver -+# has ppc_md.nvram_size greater than zero in it's machine -+# description structure. -+# How exactly ppc_md.nvram_size gets gets populated by platform -+# code depends on the platform, e.g. on most modern systems -+# it gets populated from 'nvram' device tree node -+# (and only if such node has #bytes > 0). -+# ---------------------------------------------------------------- -+# So /dev/nvram may not exist regardless that the nvram kernel driver is there -+# and then grub2-install must be called with the '--no-nvram' option -+# because otherwise installing the bootloader fails -+# cf. https://github.com/rear/rear/issues/2554 -+if ! test -c /dev/nvram ; then -+ grub2_no_nvram_option="--no-nvram" - fi - - # When GRUB2_INSTALL_DEVICES is specified by the user -@@ -134,7 +164,7 @@ if test "$GRUB2_INSTALL_DEVICES" ; then - else - LogPrint "Installing GRUB2 on $grub2_install_device (specified in GRUB2_INSTALL_DEVICES)" - fi -- if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_option $grub2_install_device" ; then -+ if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_no_nvram_option $grub2_install_device" ; then - LogPrintError "Failed to install GRUB2 on $grub2_install_device" - grub2_install_failed="yes" - fi -@@ -170,7 +200,7 @@ for part in $part_list ; do - LogPrint "Found PPC PReP boot partition $part - installing GRUB2 there" - # Erase the first 512 bytes of the PPC PReP boot partition: - dd if=/dev/zero of=$part -- if chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_option $part" ; then -+ if chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_no_nvram_option $part" ; then - # In contrast to the above behaviour when GRUB2_INSTALL_DEVICES is specified - # consider it here as a successful bootloader installation when GRUB2 - # got installed on at least one PPC PReP boot partition: -diff --git a/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh b/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh -index 9b0b3b8a..2e1d1912 100644 ---- a/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh -+++ b/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh -@@ -1,6 +1,24 @@ --# some things are special -+# Special cases of kernel module loading. - --# XEN PV does not autoload some modules --if [ -d /proc/xen ] ; then -- modprobe xenblk -+# XEN PV does not autoload some modules: -+test -d /proc/xen && modprobe xenblk -+ -+# On POWER architecture the nvram kernel driver may be no longer built into the kernel -+# but nowadays it could be also built as a kernel module that needs to be loaded -+# cf. https://github.com/rear/rear/issues/2554#issuecomment-764720180 -+# because normally grub2-install gets called without the '--no-nvram' option -+# e.g. see finalize/Linux-ppc64le/620_install_grub2.sh -+# which is how grub2-install should be called when the hardware supports nvram. -+# Nothing to do when the character device node /dev/nvram exists -+# because then the nvram kernel driver is already there: -+if ! test -c /dev/nvram ; then -+ # Nothing can be done when there is no nvram kernel module. -+ # Suppress the possible 'modprobe -n nvram' error message like -+ # "modprobe: FATAL: Module nvram not found in directory /lib/modules/..." -+ # to avoid a possible "FATAL" false alarm message that would appear -+ # on the user's terminal during recovery system startup -+ # cf. https://github.com/rear/rear/pull/2537#issuecomment-741825046 -+ # but when there is a nvram kernel module show possible 'modprobe nvram' -+ # (error) messages on the user's terminal during recovery system startup: -+ modprobe -n nvram 2>/dev/null && modprobe nvram - fi diff --git a/rear-bz2097437.patch b/rear-bz2097437.patch deleted file mode 100644 index 8d58ef2..0000000 --- a/rear-bz2097437.patch +++ /dev/null @@ -1,37 +0,0 @@ -commit 2922b77e950537799fdadf5b3ebf6a05d97f6f2f -Author: pcahyna -Date: Mon Jun 20 17:42:58 2022 +0200 - - Merge pull request #2822 from pcahyna/fix-vim-symlink - - Fix vi in the rescue system on Fedora and RHEL 9 - -diff --git a/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh b/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh -index df75e07d..55f25bef 100644 ---- a/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh -+++ b/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh -@@ -8,7 +8,6 @@ - ln -sf $v bin/init $ROOTFS_DIR/init >&2 - ln -sf $v bin $ROOTFS_DIR/sbin >&2 - ln -sf $v bash $ROOTFS_DIR/bin/sh >&2 --ln -sf $v vi $ROOTFS_DIR/bin/vim >&2 - ln -sf $v true $ROOTFS_DIR/bin/pam_console_apply >&2 # RH/Fedora with udev needs this - ln -sf $v ../bin $ROOTFS_DIR/usr/bin >&2 - ln -sf $v ../bin $ROOTFS_DIR/usr/sbin >&2 -diff --git a/usr/share/rear/conf/GNU/Linux.conf b/usr/share/rear/conf/GNU/Linux.conf -index 89aedd4c..0c97594a 100644 ---- a/usr/share/rear/conf/GNU/Linux.conf -+++ b/usr/share/rear/conf/GNU/Linux.conf -@@ -206,6 +206,12 @@ LIBS+=( - ) - - COPY_AS_IS+=( /dev /etc/inputr[c] /etc/protocols /etc/services /etc/rpc /etc/termcap /etc/terminfo /lib*/terminfo /usr/share/terminfo /etc/netconfig /etc/mke2fs.conf /etc/*-release /etc/localtime /etc/magic /usr/share/misc/magic /etc/dracut.conf /etc/dracut.conf.d /usr/lib/dracut /sbin/modprobe.ksplice-orig /etc/sysctl.conf /etc/sysctl.d /etc/e2fsck.conf ) -+ -+# Needed by vi on Fedora and derived distributions -+# where vi is a shell script that executes /usr/libexec/vi -+# see https://github.com/rear/rear/pull/2822 -+COPY_AS_IS+=( /usr/libexec/vi ) -+ - # Required by curl with https: - # There are stored the distribution provided certificates - # installed from packages, nothing confidential. diff --git a/rear-bz2104005.patch b/rear-bz2104005.patch index db2c9dc..b3159e9 100644 --- a/rear-bz2104005.patch +++ b/rear-bz2104005.patch @@ -1,21 +1,60 @@ -commit 40ec3bf072a51229e81bfbfa7cedb8a7c7902dbd -Author: Johannes Meixner -Date: Fri Jun 24 15:11:27 2022 +0200 +commit bca0e7a92af16cb7fb82ef04401cdb3286068081 +Merge: d2d2300b f36bfe9b +Author: pcahyna +Date: Thu Jul 28 12:11:04 2022 +0200 - Merge pull request #2827 from rear/jsmeix-fail-safe-yes-pipe-lvcreate + Merge pull request #2839 from pcahyna/lvm-y - and commit b3fd58fc871e00bd713a0cb081de54d746ffffb3 from pull request #2839 + Pass -y to lvcreate instead of piping the output of yes +diff --git a/usr/share/rear/conf/GNU/Linux.conf b/usr/share/rear/conf/GNU/Linux.conf +index 82007719..7e47b912 100644 +--- a/usr/share/rear/conf/GNU/Linux.conf ++++ b/usr/share/rear/conf/GNU/Linux.conf +@@ -5,7 +5,7 @@ ip + less + parted + readlink +-# For noninteractive confirmation in lvm commands during layout recreation ++# For noninteractive confirmation in commands + yes + ) + diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -index 1be17ba8..d34ab335 100644 +index 0bd863ac..6089cc09 100644 --- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -@@ -263,7 +263,7 @@ $ifline +@@ -287,28 +287,16 @@ create_lvmvol() { + # so e.g. 'lvcreate -L 123456b -n LV VG' becomes 'lvcreate -l 100%FREE -n LV VG' + fallbacklvopts="$( sed -e 's/-L [0-9b]*/-l 100%FREE/' <<< "$lvopts" )" - LogPrint "Creating LVM volume '$vg/$lvname'; Warning: some properties may not be preserved..." +- # In SLES11 "man lvcreate" does not show '-y' or '--yes' +- # so we cannot use "lvm lvcreate -y ..." +- # see https://github.com/rear/rear/issues/2820#issuecomment-1161934013 +- # instead we input as many 'y' as asked for by "lvm lvcreate" +- # see https://github.com/rear/rear/issues/513 +- # and https://github.com/rear/rear/issues/2820 +- # plus be safe against possible 'set -o pipefail' non-zero exit code of 'yes' via '( yes || true ) | ...' +- # see https://github.com/rear/rear/issues/2820#issuecomment-1162804476 +- # because 'yes' may get terminated by SIGPIPE when plain 'yes | ...' is used +- # see https://github.com/rear/rear/issues/2820#issuecomment-1162772415 +- # and suppress needless "yes: standard output: Broken pipe" stderr messages +- # that appear at least with newer 'yes' in coreutils-8.32 in openSUSE Leap 15.3 + cat >> "$LAYOUT_CODE" </dev/null || true ) | lvm lvcreate $lvopts $vg ; then +- LogPrintError "Failed to create LVM volume '$vg/$lvname' with lvcreate $lvopts $vg" +- if ( yes 2>/dev/null || true ) | lvm lvcreate $fallbacklvopts $vg ; then +- LogPrintError "Created LVM volume '$vg/$lvname' using fallback options lvcreate $fallbacklvopts $vg" ++ if ! lvm lvcreate -y $lvopts $vg ; then ++ LogPrintError "Failed to create LVM volume '$vg/$lvname' with lvcreate -y $lvopts $vg" ++ if lvm lvcreate -y $fallbacklvopts $vg ; then ++ LogPrintError "Created LVM volume '$vg/$lvname' using fallback options lvcreate -y $fallbacklvopts $vg" + else +- LogPrintError "Also failed to create LVM volume '$vg/$lvname' with lvcreate $fallbacklvopts $vg" ++ LogPrintError "Also failed to create LVM volume '$vg/$lvname' with lvcreate -y $fallbacklvopts $vg" + # Explicit 'false' is needed to let the whole 'if then else fi' command exit with non zero exit state + # to let diskrestore.sh abort here as usual when a command fails (diskrestore.sh runs with 'set -e'): + false diff --git a/rear-bz2111049.patch b/rear-bz2111049.patch deleted file mode 100644 index d7e19bc..0000000 --- a/rear-bz2111049.patch +++ /dev/null @@ -1,37 +0,0 @@ -commit 1447530f502305ed08149d9b2a56a51fb91af875 -Author: Johannes Meixner -Date: Wed May 25 13:51:14 2022 +0200 - - Merge pull request #2808 from rear/jsmeix-exclude-watchdog - - Exclude dev/watchdog* from the ReaR recovery system: - In default.conf add dev/watchdog* to COPY_AS_IS_EXCLUDE - because watchdog functionality is not wanted in the recovery system - because we do not want any automated reboot functionality - while disaster recovery happens via "rear recover", - see https://github.com/rear/rear/pull/2808 - Furthermore having a copy of dev/watchdog* - during "rear mkrescue" in ReaR's build area - may even trigger a system crash that is caused by a - buggy TrendMicro ds_am module touching dev/watchdog - in ReaR's build area (/var/tmp/rear.XXX/rootfs), - see https://github.com/rear/rear/issues/2798 - -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 881a0af0..cb14da8b 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -1414,7 +1414,12 @@ COPY_AS_IS=( $SHARE_DIR $VAR_DIR ) - # We let them being recreated by device mapper in the recovery system during the recovery process. - # Copying them into the recovery system would let "rear recover" avoid the migration process. - # See https://github.com/rear/rear/pull/1393 for details. --COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper ) -+# /dev/watchdog /dev/watchdog\* functionality is not wanted in the ReaR rescue/recovery system -+# because we do not want any automated reboot while disaster recovery happens via "rear recover". -+# Furthermore having dev/watchdog* during "rear mkrescue" may even trigger a system "crash" that is -+# caused by TrendMicro ds_am module touching dev/watchdog in ReaR's build area (/var/tmp/rear.XXX/rootfs). -+# See https://github.com/rear/rear/issues/2798 -+COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper dev/watchdog\* ) - # Array of user names that are trusted owners of files where RequiredSharedObjects calls ldd (cf. COPY_AS_IS) - # and where a ldd test is run inside the recovery system that tests all binaries for 'not found' libraries. - # The default is 'root' plus those standard system users that have a 'bin' or 'sbin' or 'root' home directory diff --git a/rear-bz2111059.patch b/rear-bz2111059.patch deleted file mode 100644 index fff1437..0000000 --- a/rear-bz2111059.patch +++ /dev/null @@ -1,105 +0,0 @@ -commit 552dd6bfb20fdb3dc712b5243656d147392c27c3 -Author: Johannes Meixner -Date: Thu Jun 2 15:25:52 2022 +0200 - - Merge pull request #2811 from rear/jsmeix-RECOVERY_COMMANDS - - Add PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS - as alternative to PRE_RECOVERY_SCRIPT and POST_RECOVERY_SCRIPT - see the description in default.conf how to use them and how they work. - See https://github.com/rear/rear/pull/2811 and see also - https://github.com/rear/rear/pull/2735 therein in particular - https://github.com/rear/rear/pull/2735#issuecomment-1134686196 - Additionally use LogPrint to show the user the executed commands, - see https://github.com/rear/rear/pull/2789 - -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index cb14da8b..b14525da 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -3117,14 +3117,37 @@ ELILO_BIN= - ################ ---- custom scripts - # - # NOTE: The scripts can be defined as an array to better handly spaces in parameters. --# The scripts are called like this: eval "${PRE_RECOVERY_SCRIPT[@]}" -+# The scripts are called like this: -+# eval "${PRE_RECOVERY_SCRIPT[@]}" -+# -+# Alternatively, commands can be executed by using the corresponding -+# PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS array variables -+# which evaluate like this: -+# for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do -+# eval "$command" -+# done -+# -+# Using PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS -+# is simpler when multiple commands should be executed. -+# For example, -+# PRE_RECOVERY_SCRIPT=( 'echo Hello' ';' 'sleep 3' ) -+# can be rewritten as -+# PRE_RECOVERY_COMMANDS=( 'echo Hello' 'sleep 3' ) -+# or -+# PRE_RECOVERY_COMMANDS=( 'echo Hello' ) -+# PRE_RECOVERY_COMMANDS+=( 'sleep 3' ) -+ -+# Those get called at the very beginning of "rear recover". -+# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT. -+# Nothing was recreated and you have only the plain ReaR rescue/recovery system: -+PRE_RECOVERY_COMMANDS=() -+PRE_RECOVERY_SCRIPT= - --# Call this after Relax-and-Recover did everything in the recover workflow. --# Use $TARGET_FS_ROOT (by default '/mnt/local') to refer to the recovered system. -+# Those get called at the very end of "rear recover". -+# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT. -+# Use $TARGET_FS_ROOT (by default '/mnt/local') to access the recreated target system. - POST_RECOVERY_SCRIPT= -- --# Call this before Relax-and-Recover starts to do anything in the recover workflow. You have the rescue system but nothing else --PRE_RECOVERY_SCRIPT= -+POST_RECOVERY_COMMANDS=() - - # PRE/POST Backup scripts will provide the ability to run certain tasks before and after a ReaR backup. - # for example: -diff --git a/usr/share/rear/setup/default/010_pre_recovery_script.sh b/usr/share/rear/setup/default/010_pre_recovery_script.sh -index 005107cc..8b4e4a36 100644 ---- a/usr/share/rear/setup/default/010_pre_recovery_script.sh -+++ b/usr/share/rear/setup/default/010_pre_recovery_script.sh -@@ -1,4 +1,14 @@ -+ -+# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT -+# so PRE_RECOVERY_COMMANDS can also be used to prepare things for the PRE_RECOVERY_SCRIPT: -+ -+local command -+for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do -+ LogPrint "Running PRE_RECOVERY_COMMANDS '$command'" -+ eval "$command" -+done -+ - if test "$PRE_RECOVERY_SCRIPT" ; then -- Log "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'" -- eval "${PRE_RECOVERY_SCRIPT[@]}" -+ LogPrint "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'" -+ eval "${PRE_RECOVERY_SCRIPT[@]}" - fi -diff --git a/usr/share/rear/wrapup/default/500_post_recovery_script.sh b/usr/share/rear/wrapup/default/500_post_recovery_script.sh -index 77751800..866c9368 100644 ---- a/usr/share/rear/wrapup/default/500_post_recovery_script.sh -+++ b/usr/share/rear/wrapup/default/500_post_recovery_script.sh -@@ -1,4 +1,14 @@ -+ -+# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT -+# so POST_RECOVERY_COMMANDS can also be used to clean up things after the POST_RECOVERY_SCRIPT: -+ - if test "$POST_RECOVERY_SCRIPT" ; then -- Log "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'" -- eval "${POST_RECOVERY_SCRIPT[@]}" -+ LogPrint "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'" -+ eval "${POST_RECOVERY_SCRIPT[@]}" - fi -+ -+local command -+for command in "${POST_RECOVERY_COMMANDS[@]}" ; do -+ LogPrint "Running POST_RECOVERY_COMMANDS '$command'" -+ eval "$command" -+done diff --git a/rear-bz2119501.patch b/rear-bz2119501.patch index 71b4d47..21e3253 100644 --- a/rear-bz2119501.patch +++ b/rear-bz2119501.patch @@ -7,8 +7,8 @@ index 5bace664..cf960be8 100644 # and https://github.com/rear/rear/pull/1734 +# Some broken symlinks are expected. The 'build' and 'source' symlinks in kernel modules point to kernel sources -+# and are broken untol one installs the kernel-debug-devel or kernel-devel packages (on Fedora) and even then -+# the targets are jot included in the rescue system by default. ++# and are broken until one installs the kernel-debug-devel or kernel-devel packages (on Fedora) and even then ++# the targets are not included in the rescue system by default. +# Do not warn about those, it is just noise. +local irrelevant_symlinks=( '*/lib/modules/*/build' '*/lib/modules/*/source' ) +function symlink_is_irrelevant () { diff --git a/rear-bz2130945.patch b/rear-bz2130945.patch index 5291d13..9d19586 100644 --- a/rear-bz2130945.patch +++ b/rear-bz2130945.patch @@ -1,10 +1,31 @@ +From 6d1e5ab96213a0d79489c4296cd1f5a4be645597 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Zaoral?= +Date: Thu, 29 Sep 2022 15:32:22 +0200 +Subject: [PATCH] Fix initrd regeneration on s390x and Fedora/RHEL + +For some reason, the 550_rebuild_initramfs.sh script was not included +for s390x on Fedora/RHEL so the initrd was not regenerated after backup +restore on this architecture. + +Since all other architectures were actually using the same script, +let's just move it one level up to fix this bug and to also simplify +the directory structure a bit. +--- + .../rear/finalize/Fedora/{i386 => }/550_rebuild_initramfs.sh | 0 + usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh | 1 - + usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh | 1 - + 3 files changed, 2 deletions(-) + rename usr/share/rear/finalize/Fedora/{i386 => }/550_rebuild_initramfs.sh (100%) + delete mode 120000 usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh + delete mode 120000 usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh + diff --git a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh similarity index 100% rename from usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh rename to usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh diff --git a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh deleted file mode 120000 -index 22eede59..00000000 +index 22eede59d..000000000 --- a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh +++ /dev/null @@ -1 +0,0 @@ @@ -12,7 +33,7 @@ index 22eede59..00000000 \ No newline at end of file diff --git a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh deleted file mode 120000 -index 22eede59..00000000 +index 22eede59d..000000000 --- a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh +++ /dev/null @@ -1 +0,0 @@ diff --git a/rear-bz2131946.patch b/rear-bz2131946.patch deleted file mode 100644 index 1ee90ba..0000000 --- a/rear-bz2131946.patch +++ /dev/null @@ -1,129 +0,0 @@ -diff --git a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh -index 172ac032..9cff63a0 100644 ---- a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh -+++ b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh -@@ -143,9 +143,9 @@ function create_fs () { - # unless the user has explicitly specified XFS filesystem options: - local xfs_opts - local xfs_device_basename="$( basename $device )" -- local xfs_info_filename="$LAYOUT_XFS_OPT_DIR/$xfs_device_basename.xfs" -+ local xfs_info_filename="$LAYOUT_XFS_OPT_DIR_RESTORE/$xfs_device_basename.xfs" - # Only uppercase letters and digits are used to ensure mkfs_xfs_options_variable_name is a valid bash variable name -- # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3 -+ # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3 - # cf. current_orig_device_basename_alnum_uppercase in layout/prepare/default/300_map_disks.sh - local xfs_device_basename_alnum_uppercase="$( echo $xfs_device_basename | tr -d -c '[:alnum:]' | tr '[:lower:]' '[:upper:]' )" - # cf. predefined_input_variable_name in the function UserInput in lib/_input-output-functions.sh -diff --git a/usr/share/rear/layout/prepare/default/010_prepare_files.sh b/usr/share/rear/layout/prepare/default/010_prepare_files.sh -index 85964712..7a980e63 100644 ---- a/usr/share/rear/layout/prepare/default/010_prepare_files.sh -+++ b/usr/share/rear/layout/prepare/default/010_prepare_files.sh -@@ -5,6 +5,7 @@ LAYOUT_DEPS="$VAR_DIR/layout/diskdeps.conf" - LAYOUT_TODO="$VAR_DIR/layout/disktodo.conf" - LAYOUT_CODE="$VAR_DIR/layout/diskrestore.sh" - LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs" -+LAYOUT_XFS_OPT_DIR_RESTORE="$LAYOUT_XFS_OPT_DIR/restore" - - FS_UUID_MAP="$VAR_DIR/layout/fs_uuid_mapping" - LUN_WWID_MAP="$VAR_DIR/layout/lun_wwid_mapping" -diff --git a/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh -new file mode 100644 -index 00000000..406afa61 ---- /dev/null -+++ b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh -@@ -0,0 +1,83 @@ -+# Cleanup directory which hold XFS configuration file for `rear recover'. -+# This will avoid possible mess in LAYOUT_XFS_OPT_DIR_RESTORE if `rear recover' -+# would be launched multiple times, where user will choose different disk -+# mapping each time. -+# Removing and creating LAYOUT_XFS_OPT_DIR_RESTORE will ensure that ReaR will -+# have only current files available during current session. -+rm -rf "$LAYOUT_XFS_OPT_DIR_RESTORE" -+mkdir -p "$LAYOUT_XFS_OPT_DIR_RESTORE" -+ -+local excluded_configs=() -+ -+# Read $MAPPING_FILE (disk_mappings) to discover final disk mapping. -+# Once mapping is known, configuration files can be renamed. -+# (e.g. sds2.xfs to sdb2.xfs, ...) -+while read source target junk ; do -+ # Disks in MAPPING_FILE are listed with full device path. Since XFS config -+ # files are created in format e.g. sda2.xfs strip prefixed path to have -+ # only short device name available. -+ base_source=$(basename "$source") -+ base_target=$(basename "$target") -+ -+ # Check if XFS configuration file for whole device (unpartitioned) -+ # is available (sda, sdb, ...). If so, rename and copy it to -+ # LAYOUT_XFS_OPT_DIR_RESTORE. -+ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" ]; then -+ Log "Migrating XFS configuration file $base_source.xfs to $base_target.xfs" -+ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" \ -+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs" -+ -+ # Replace old device name in meta-data= option in XFS -+ # configuration file as well. -+ sed -i s#"meta-data=${source}\(\s\)"#"meta-data=${target}\1"# \ -+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs" -+ -+ # Mark XFS config file as processed to avoid copying it again later. -+ # More details on why are configs excluded can be found near the -+ # end of this script (near `tar' command). -+ excluded_configs+=("--exclude=$base_source.xfs") -+ fi -+ -+ # Find corresponding partitions to source disk in LAYOUT_FILE -+ # and migrate/rename them too if necessary. -+ while read _ layout_device _ _ _ _ layout_partition; do -+ if [[ "$source" = "$layout_device" ]]; then -+ base_src_layout_partition=$(basename "$layout_partition") -+ base_dst_layout_partition=${base_src_layout_partition//$base_source/$base_target} -+ dst_layout_partition=${layout_partition//$base_source/$base_target} -+ -+ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" ]; then -+ Log "Migrating XFS configuration $base_src_layout_partition.xfs to $base_dst_layout_partition.xfs" -+ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" \ -+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs" -+ -+ # Replace old device name in meta-data= option in XFS -+ # configuration file as well. -+ sed -i s#"meta-data=${layout_partition}\(\s\)"#"meta-data=${dst_layout_partition}\1"# \ -+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs" -+ -+ # Mark XFS config file as processed to avoid copying it again later. -+ # More details on why are configs excluded can be found near the -+ # end of this script (near `tar' command). -+ excluded_configs+=("--exclude=$base_src_layout_partition.xfs") -+ fi -+ fi -+ done < <( grep -E "^part " "$LAYOUT_FILE" ) -+done < <( grep -v '^#' "$MAPPING_FILE" ) -+ -+pushd "$LAYOUT_XFS_OPT_DIR" >/dev/null -+# Copy remaining files -+# We need to copy remaining files into LAYOUT_XFS_OPT_DIR_RESTORE which will -+# serve as base dictionary where ReaR will look for XFS config files. -+# It is necessary to copy only files that were not previously processed, -+# because in LAYOUT_XFS_OPT_DIR they are still listed with -+# original name and copy to LAYOUT_XFS_OPT_DIR_RESTORE could overwrite -+# XFS configs already migrated. -+# e.g. with following disk mapping situation: -+# /dev/sda2 => /dev/sdb2 -+# /dev/sdb2 => /dev/sda2 -+# Files in LAYOUT_XFS_OPT_DIR_RESTORE would be overwritten by XFS configs with -+# wrong names. -+# tar is used to take advantage of its exclude feature. -+tar cf - --exclude=restore "${excluded_configs[@]}" . | tar xfp - -C "$LAYOUT_XFS_OPT_DIR_RESTORE" -+popd >/dev/null -diff --git a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh -index 7895e4ee..fc0fa8fc 100644 ---- a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh -+++ b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh -@@ -10,6 +10,7 @@ mkdir -p $v $VAR_DIR/layout/config - # We need directory for XFS options only if XFS is in use: - if test "$( mount -t xfs )" ; then - LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs" -+ rm -rf $LAYOUT_XFS_OPT_DIR - mkdir -p $v $LAYOUT_XFS_OPT_DIR - fi - diff --git a/rear-device-shrinking-bz2223895.patch b/rear-device-shrinking-bz2223895.patch index 4da263c..1964255 100644 --- a/rear-device-shrinking-bz2223895.patch +++ b/rear-device-shrinking-bz2223895.patch @@ -1,4 +1,5 @@ -commit 4f03a10d4866efc9b6920a3878e6397d170742f9 +commit 41c2d9b1fbcece4b0890ab92e9f5817621917ad3 (from 23977a19101b6e6eaeebbe8ce013332ddf9ea517) +Merge: 23977a19 686012cb Author: Johannes Meixner Date: Thu Jul 20 15:11:52 2023 +0200 @@ -12,10 +13,10 @@ Date: Thu Jul 20 15:11:52 2023 +0200 cf. https://github.com/rear/rear/pull/3027 diff --git a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh -index ec55f331..0e402b01 100644 +index e8f2be20..2169efaa 100644 --- a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh +++ b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh -@@ -92,9 +92,13 @@ done >$copy_as_is_exclude_file +@@ -110,9 +110,13 @@ done >$copy_as_is_exclude_file # COPY_AS_IS+=( /path/to/directory/* ) # which are used in our scripts and by users in their etc/rear/local.conf # cf. https://github.com/rear/rear/pull/2405#issuecomment-633512932 diff --git a/rear-luks-key-bz2228779.patch b/rear-luks-key-bz2228779.patch deleted file mode 100644 index 56559d9..0000000 --- a/rear-luks-key-bz2228779.patch +++ /dev/null @@ -1,25 +0,0 @@ -commit 2aa7b47354bdf5863071c8b479d29c99aad05ecb -Author: Johannes Meixner -Date: Fri Jul 24 13:02:45 2020 +0200 - - Update 240_reassign_luks_keyfiles.sh - - Use ReaR specific TMP_DIR (not TMPDIR or hardcoded /tmp) - -diff --git a/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh b/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh -index d989c3fb..358f3950 100644 ---- a/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh -+++ b/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh -@@ -24,9 +24,9 @@ awk ' - while read target_name source_device original_keyfile; do - Log "Re-assigning keyfile $original_keyfile to LUKS device $target_name ($source_device)" - -- # The scheme for generating a temporary keyfile path must be the same here and in the 'layout/prepare' stage. -- temp_keyfile="${TMPDIR:-/tmp}/LUKS-keyfile-$target_name" -- [ -f "$temp_keyfile" ] || BugError "temporary keyfile $temp_keyfile not found" -+ # The scheme for generating a temporary keyfile path must be the same here and in the 'layout/prepare' stage: -+ temp_keyfile="$TMP_DIR/LUKS-keyfile-$target_name" -+ test -f "$temp_keyfile" || BugError "temporary LUKS keyfile $temp_keyfile not found" - - target_keyfile="$TARGET_FS_ROOT/$original_keyfile" - diff --git a/rear-pr2675.patch b/rear-pr2675.patch deleted file mode 100644 index 7d11071..0000000 --- a/rear-pr2675.patch +++ /dev/null @@ -1,60 +0,0 @@ -diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh -index 4878216b..e919bdbf 100644 ---- a/usr/share/rear/lib/framework-functions.sh -+++ b/usr/share/rear/lib/framework-functions.sh -@@ -121,7 +121,7 @@ function cleanup_build_area_and_end_program () { - sleep 2 - umount_mountpoint_lazy $BUILD_DIR/outputfs - fi -- remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" -+ remove_temporary_mountpoint "$BUILD_DIR/outputfs" || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" - rmdir $v $BUILD_DIR >&2 - fi - Log "End of program reached" -diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh -index c1a11615..0f8f362d 100644 ---- a/usr/share/rear/lib/global-functions.sh -+++ b/usr/share/rear/lib/global-functions.sh -@@ -317,7 +317,20 @@ function url_path() { - - ### Returns true if one can upload files to the URL - function scheme_accepts_files() { -- local scheme=$1 -+ # Be safe against 'set -eu' which would exit 'rear' with "bash: $1: unbound variable" -+ # when scheme_accepts_files is called without an argument -+ # by bash parameter expansion with using an empty default value if $1 is unset or null. -+ # Bash parameter expansion with assigning a default value ${1:=} does not work -+ # (then it would still exit with "bash: $1: cannot assign in this way") -+ # but using a default value is practicable here because $1 is used only once -+ # cf. https://github.com/rear/rear/pull/2675#discussion_r705018956 -+ local scheme=${1:-} -+ # Return false if scheme is empty or blank (e.g. when OUTPUT_URL is unset or empty or blank) -+ # cf. https://github.com/rear/rear/issues/2676 -+ # and https://github.com/rear/rear/issues/2667#issuecomment-914447326 -+ # also return false if scheme is more than one word (so no quoted "$scheme" here) -+ # cf. https://github.com/rear/rear/pull/2675#discussion_r704401462 -+ test $scheme || return 1 - case $scheme in - (null|tape|obdr) - # tapes do not support uploading arbitrary files, one has to handle them -@@ -341,7 +354,10 @@ function scheme_accepts_files() { - ### Returning true does not imply that the URL is currently mounted at a filesystem and usable, - ### only that it can be mounted (use mount_url() first) - function scheme_supports_filesystem() { -- local scheme=$1 -+ # Be safe against 'set -eu' exit if scheme_supports_filesystem is called without argument -+ local scheme=${1:-} -+ # Return false if scheme is empty or blank or more than one word, cf. scheme_accepts_files() above -+ test $scheme || return 1 - case $scheme in - (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) - return 1 -@@ -560,7 +576,7 @@ function umount_url() { - - RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" - -- remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" -+ remove_temporary_mountpoint "$mountpoint" && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" - return 0 - } - diff --git a/rear-remove-lvmdevices-bz2145014.patch b/rear-remove-lvmdevices-bz2145014.patch index 87d4b72..cad9bcd 100644 --- a/rear-remove-lvmdevices-bz2145014.patch +++ b/rear-remove-lvmdevices-bz2145014.patch @@ -1,21 +1,24 @@ -commit ad720ad788be1d653da31be36fca5e886e314ddb -Author: Pavel Cahyna -Date: Thu Aug 24 11:41:25 2023 +0200 +From 5a8c5086bf3fc28236436ff3ef27196509f0375d Mon Sep 17 00:00:00 2001 +From: Pavel Cahyna +Date: Thu, 24 Aug 2023 11:41:25 +0200 +Subject: [PATCH] Remove the lvmdevices file at the end of recovery - Remove the lvmdevices file at the end of recovery - - The file /etc/lvm/devices/system.devices restricts LVM to disks with - given (hardware) IDs (serial numbers, WWNs). See lvmdevices(8). - - Unfortunately, when restoring to different disks than in the original - system, it will mean that LVM is broken in the recovered system (it - won't find any disks). Therefore it is safer to remove the file to - force the old behavior where LVM scans all disks. This used to be the - LVM default (use_devicesfile=0). +The file /etc/lvm/devices/system.devices restricts LVM to disks with +given (hardware) IDs (serial numbers, WWNs). See lvmdevices(8). + +Unfortunately, when restoring to different disks than in the original +system, it will mean that LVM is broken in the recovered system (it +won't find any disks). Therefore it is safer to remove the file to +force the old behavior where LVM scans all disks. This used to be the +LVM default (use_devicesfile=0). +--- + .../GNU/Linux/230_remove_lvmdevices.sh | 25 +++++++++++++++++++ + 1 file changed, 25 insertions(+) + create mode 100644 usr/share/rear/finalize/GNU/Linux/230_remove_lvmdevices.sh diff --git a/usr/share/rear/finalize/GNU/Linux/230_remove_lvmdevices.sh b/usr/share/rear/finalize/GNU/Linux/230_remove_lvmdevices.sh new file mode 100644 -index 00000000..a51e6bca +index 0000000000..9392c9f52f --- /dev/null +++ b/usr/share/rear/finalize/GNU/Linux/230_remove_lvmdevices.sh @@ -0,0 +1,25 @@ @@ -39,8 +42,9 @@ index 00000000..a51e6bca +realfile="$TARGET_FS_ROOT/$file" +# OK if file not found +test -f "$realfile" || return 0 -+mv $v "$realfile" "${realfile}.rearbak" -+LogPrint "Renamed LVM devices file $realfile to ${realfile}.rearbak ++mv $v "$realfile" "$realfile.rearbak" ++LogPrint "Renamed LVM devices file $realfile to $realfile.rearbak +to prevent LVM problems in the recovered system, verify that the file +is correct after booting the recovered system and move it back, or +regenerate it using vgimportdevices." + diff --git a/rear-save-lvm-poolmetadatasize-RHEL-6984.patch b/rear-save-lvm-poolmetadatasize-RHEL-6984.patch index 8754e6d..91c6dfc 100644 --- a/rear-save-lvm-poolmetadatasize-RHEL-6984.patch +++ b/rear-save-lvm-poolmetadatasize-RHEL-6984.patch @@ -1,16 +1,14 @@ -From e7b84271536782fbc8673ef4573e155e1dfa850e Mon Sep 17 00:00:00 2001 -From: pcahyna -Date: Wed, 1 Nov 2023 12:53:33 +0100 -Subject: [PATCH] Merge pull request #3061 from - pcahyna/save-lvm-poolmetadatasize +commit f6af518baf3b5a4dc06bf8cfea262e627eee3e07 +Merge: ed4c78d5 75a86fc3 +Author: pcahyna +Date: Wed Nov 1 12:53:33 2023 +0100 -Save LVM pool metadata volume size in disk layout ---- - .../layout/save/GNU/Linux/220_lvm_layout.sh | 39 ++++++++++++------- - 1 file changed, 24 insertions(+), 15 deletions(-) + Merge pull request #3061 from pcahyna/save-lvm-poolmetadatasize + + Save LVM pool metadata volume size in disk layout diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh -index f21845df9..42f0e4126 100644 +index 7400c586..b12cff1f 100644 --- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh @@ -18,7 +18,7 @@ local already_processed_lvs=() @@ -45,7 +43,7 @@ index f21845df9..42f0e4126 100644 # Get the volume group configuration: # Format: lvmgrp [] [] -@@ -200,17 +200,17 @@ local lvs_exit_code +@@ -200,18 +200,18 @@ local lvs_exit_code # Specify the fields for the lvs command depending on whether or not the 'lv_layout' field is supported: if is_true $lv_layout_supported ; then @@ -61,14 +59,29 @@ index f21845df9..42f0e4126 100644 - # with lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size" + # with lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size,lv_metadata_size" # i.e. when the 'lv_layout' field is supported: -- # :root:system:19927138304:linear::0:1:0:19927138304 -- # :swap:system:1535115264:linear::0:1:0:1535115264 -+ # :root:system:19927138304:linear::0:1:0:19927138304: -+ # :swap:system:1535115264:linear::0:1:0:1535115264: - # There are two leading blanks in the output (at least on SLES12-SP4 with LVM 2.02.180). - lvm lvs --separator=':' --noheadings --units b --nosuffix -o $lvs_fields | while read line ; do +- # :home:system:6148849664:linear::0:1:0:6148849664 +- # :root:system:14050918400:linear::0:1:0:14050918400 +- # :swap:system:1262485504:linear::0:1:0:1262485504 ++ # :home:system:6148849664:linear::0:1:0:6148849664: ++ # :root:system:14050918400:linear::0:1:0:14050918400: ++ # :swap:system:1262485504:linear::0:1:0:1262485504: + # There are two leading blanks in the output (at least on SLES12-SP4 with LVM 2.02.180 and SLES15-SP3 with LVM 2.03.05). + # The 'lvs' output lines ordering does not match the ordering of the LVs kernel device nodes /dev/dm-N + # # lsblk -ipbo NAME,KNAME,TYPE,FSTYPE,SIZE,MOUNTPOINT /dev/sda2 +@@ -251,9 +251,9 @@ local lvs_exit_code + # that the recreated system did not boot (boot screen showed GRUB but there it hung with constant 100% CPU usage) + # so automatically shrinking only the biggest LVs avoids that a relatively small 'swap' LV gets shrinked. + # With 'sort -n -t ':' -k 4' the above 'lvs' output lines become +- # :swap:system:1262485504:linear::0:1:0:1262485504 +- # :home:system:6148849664:linear::0:1:0:6148849664 +- # :root:system:14050918400:linear::0:1:0:14050918400 ++ # :swap:system:1262485504:linear::0:1:0:1262485504: ++ # :home:system:6148849664:linear::0:1:0:6148849664: ++ # :root:system:14050918400:linear::0:1:0:14050918400: + # so only the 'root' LV may get automatically shrinked if needed. + lvm lvs --separator=':' --noheadings --units b --nosuffix -o $lvs_fields | sort -n -t ':' -k 4 | while read line ; do -@@ -261,14 +261,23 @@ local lvs_exit_code +@@ -304,14 +304,23 @@ local lvs_exit_code # With the above example segmentsize=19927138304 and segmentsize=1535115264 segmentsize="$( echo "$line" | awk -F ':' '{ print $10 }' )" @@ -97,6 +110,3 @@ index f21845df9..42f0e4126 100644 [ $chunksize -eq 0 ] || kval="${kval:+$kval }chunksize:${chunksize}b" [ $stripesize -eq 0 ] || kval="${kval:+$kval }stripesize:${stripesize}b" [ $segmentsize -eq $size ] || infokval="${infokval:+$infokval }segmentsize:${segmentsize}b" --- -2.43.0 - diff --git a/rear-sfdc02772301.patch b/rear-sfdc02772301.patch deleted file mode 100644 index 74456dd..0000000 --- a/rear-sfdc02772301.patch +++ /dev/null @@ -1,38 +0,0 @@ -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 9ada92c3..455aa3ce 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -1813,7 +1813,7 @@ OBDR_BLOCKSIZE=2048 - # BACKUP=NBU stuff (Symantec/Veritas NetBackup) - ## - # --COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt ) -+COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf ) - COPY_AS_IS_EXCLUDE_NBU=( /usr/openv/netbackup/logs "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) - # See https://github.com/rear/rear/issues/2105 why /usr/openv/netbackup/sec/at/lib/ is needed: - NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/" -diff --git a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh -index cd48b8d9..ae5a3ccc 100644 ---- a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh -+++ b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh -@@ -7,6 +7,12 @@ - - [[ $NBU_version -lt 7 ]] && return # NBU is using xinetd when version <7.x - -+if [ -e "/etc/init.d/vxpbx_exchanged" ]; then -+ cp $v /etc/init.d/vxpbx_exchanged $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real -+ chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real -+ echo "( /etc/scripts/system-setup.d/vxpbx_exchanged.real start )" > $ROOTFS_DIR/etc/scripts/system-setup.d/89-vxpbx_exchanged.sh -+fi -+ - if [ -e "/etc/init.d/netbackup" ]; then - cp $v /etc/init.d/netbackup $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real - chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real -diff --git a/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore -new file mode 100644 -index 00000000..d6b7ef32 ---- /dev/null -+++ b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore -@@ -0,0 +1,2 @@ -+* -+!.gitignore diff --git a/rear-skip-useless-xfs-mount-options-RHEL-10478.patch b/rear-skip-useless-xfs-mount-options-RHEL-10478.patch index 2863131..38e3c03 100644 --- a/rear-skip-useless-xfs-mount-options-RHEL-10478.patch +++ b/rear-skip-useless-xfs-mount-options-RHEL-10478.patch @@ -1,5 +1,16 @@ +commit ed4c78d5fe493ea368989d0086a733653692f5cb +Merge: 3c9398bb 0cdcab02 +Author: pcahyna +Date: Mon Oct 30 18:31:01 2023 +0100 + + Merge pull request #3058 from pcahyna/skip-useless-xfs-mount-options + + Skip useless xfs mount options when mounting during recovery + + Cherry-picked-by: Lukáš Zaoral + diff --git a/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh -index d57077791..87ab5d691 100644 +index d5707779..f7115f55 100644 --- a/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh +++ b/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh @@ -29,6 +29,7 @@ mount_fs() { @@ -10,7 +21,23 @@ index d57077791..87ab5d691 100644 value=${value//nodev/dev} # btrfs mount options like subvolid=259 or subvol=/@/.snapshots/1/snapshot # from the old system cannot work here for recovery because btrfs subvolumes -@@ -147,6 +148,27 @@ mount_fs() { +@@ -36,13 +37,8 @@ mount_fs() { + # so that those mount options are removed here. All btrfs subvolume handling + # happens in the btrfs_subvolumes_setup_SLES function in 136_include_btrfs_subvolumes_SLES_code.sh + # or in the btrfs_subvolumes_setup_generic function in 135_include_btrfs_subvolumes_generic_code.sh +- # First add a comma at the end so that it is easier to remove a mount option at the end: +- value=${value/%/,} +- # Remove all subvolid= and subvol= mount options (the extglob shell option is enabled in rear): +- value=${value//subvolid=*([^,]),/} +- value=${value//subvol=*([^,]),/} +- # Remove all commas at the end: +- mountopts=${value/%,/} ++ # Remove all subvolid= and subvol= mount options: ++ mountopts="$( remove_mount_options_values $value subvolid subvol )" + ;; + esac + done +@@ -147,6 +143,27 @@ mount_fs() { echo "mount $mountopts,remount,user_xattr $device $TARGET_FS_ROOT$mountpoint" ) >> "$LAYOUT_CODE" ;; @@ -39,10 +66,10 @@ index d57077791..87ab5d691 100644 ( echo "mkdir -p $TARGET_FS_ROOT$mountpoint" diff --git a/usr/share/rear/lib/filesystems-functions.sh b/usr/share/rear/lib/filesystems-functions.sh -index afdd3f24c..658d757f4 100644 +index f459c204..f0547706 100644 --- a/usr/share/rear/lib/filesystems-functions.sh +++ b/usr/share/rear/lib/filesystems-functions.sh -@@ -239,3 +239,40 @@ function xfs_parse +@@ -256,3 +256,40 @@ function total_target_fs_used_disk_space() { # Output xfs options for further use echo "$xfs_opts" } diff --git a/rear-tmpdir.patch b/rear-tmpdir.patch deleted file mode 100644 index ec5ba71..0000000 --- a/rear-tmpdir.patch +++ /dev/null @@ -1,37 +0,0 @@ -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 9ada92c3..3bdb5497 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -57,10 +57,16 @@ - # - # where /prefix/for/rear/working/directory must already exist. - # This is useful for example when there is not sufficient free space --# in /tmp or $TMPDIR for the ISO image or even the backup archive. --# TMPDIR cannot be set to a default value here, otherwise /usr/sbin/rear -+# in /var/tmp or $TMPDIR for the ISO image or even the backup archive. -+# TMPDIR cannot be set to a default value here unconditionally but only -+# if it is not set before calling the program, otherwise /usr/sbin/rear - # would not work in compliance with the Linux/Unix standards regarding TMPDIR - # see https://github.com/rear/rear/issues/968 -+# The default is /var/tmp instead of the more usual /tmp (the system default), -+# because /tmp is not intended for such large amounts of data that ReaR usually -+# produces when creating the image (see file-hierarchy(7)). In particular, -+# /tmp can be a tmpfs, and thus restricted by the available RAM/swap. -+export TMPDIR="${TMPDIR-/var/tmp}" - - ## - # ROOT_HOME_DIR -diff --git a/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh b/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh -deleted file mode 100644 -index 84d0cabb..00000000 ---- a/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh -+++ /dev/null -@@ -1,8 +0,0 @@ --cat - <> "$ROOTFS_DIR/etc/rear/rescue.conf" --# TMPDIR variable may be defined in local.conf file as prefix dir for mktemp command --# e.g. by defining TMPDIR=/var we would get our BUILD_DIR=/var/tmp/rear.XXXXXXXXXXXX --# However, in rescue we want our BUILD_DIR=/tmp/rear.XXXXXXX as we are not sure that --# the user defined TMPDIR would exist in our rescue image --# by 'unset TMPDIR' we achieve above goal (as rescue.conf is read after local.conf)! --unset TMPDIR --EOF diff --git a/rear-uefi-usb-secureboot-bz2196445.patch b/rear-uefi-usb-secureboot-bz2196445.patch index 2fc7035..c20ab8c 100644 --- a/rear-uefi-usb-secureboot-bz2196445.patch +++ b/rear-uefi-usb-secureboot-bz2196445.patch @@ -1,4 +1,5 @@ -commit 4af486794d45adbda7567361d8dcc658599dcd2c +commit 46b29195bff7f93cf5bd4c2dd83d69e5676800cb +Merge: 2611da2b efb37fb9 Author: Johannes Meixner Date: Tue Aug 8 14:44:16 2023 +0200 @@ -15,14 +16,18 @@ Date: Tue Aug 8 14:44:16 2023 +0200 instead of let ReaR make its own EFI binary via build_bootx86_efi() see https://github.com/rear/rear/pull/3031 + Cherry-picked-by: Lukáš Zaoral + diff --git a/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh b/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh -index f4659306..fd631c44 100644 +index 8ad4d97e..123442cc 100644 --- a/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh +++ b/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh -@@ -29,6 +29,44 @@ mount $EFI_PART $EFI_MPT || Error "Failed to mount EFI partition '$EFI_PART' at - mkdir -p $EFI_DST || Error "Failed to create directory '$EFI_DST'" - - # Copy boot loader +@@ -51,8 +51,44 @@ mkdir -p $efi_dst || Error "Failed to create directory '$efi_dst'" + # "cp: failed to preserve ownership for '/tmp/rear-efi.XXXXXXXXXX/EFI/BOOT/kernel': Operation not permitted" + # because it copies to a VFAT filesystem on the EFI partition (see format/USB/default/300_format_usb_disk.sh) + # cf. https://github.com/rear/rear/issues/2683 +-# Copy boot loader: +-cp -L $v "$UEFI_BOOTLOADER" "$efi_dst/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $efi_dst/BOOTX64.efi" +# The SECURE_BOOT_BOOTLOADER related code below is based on the code in output/ISO/Linux-i386/250_populate_efibootimg.sh +# because I noticed that Secure Boot works with ISO at least for me, cf. +# https://github.com/rear/rear/pull/3025#issuecomment-1635876186 @@ -46,7 +51,7 @@ index f4659306..fd631c44 100644 + # then Shim (usually shim.efi) must be copied as EFI/BOOT/BOOTX64.efi + # and Shim's second stage bootloader must be also copied where Shim already is. + DebugPrint "Using '$SECURE_BOOT_BOOTLOADER' as first stage Secure Boot bootloader BOOTX64.efi" -+ cp -L $v "$SECURE_BOOT_BOOTLOADER" "$EFI_DST/BOOTX64.efi" || Error "Failed to copy SECURE_BOOT_BOOTLOADER '$SECURE_BOOT_BOOTLOADER' to $EFI_DST/BOOTX64.efi" ++ cp -L $v "$SECURE_BOOT_BOOTLOADER" "$efi_dst/BOOTX64.efi" || Error "Failed to copy SECURE_BOOT_BOOTLOADER '$SECURE_BOOT_BOOTLOADER' to $efi_dst/BOOTX64.efi" + # When Shim is used, its second stage bootloader can be actually anything + # named grub*.efi (second stage bootloader is Shim compile time option), see + # http://www.rodsbooks.com/efi-bootloaders/secureboot.html#initial_shim @@ -57,26 +62,27 @@ index f4659306..fd631c44 100644 + # cf. https://github.com/rear/rear/issues/1921 + test "$second_stage_UEFI_bootloader_files" || Error "Could not find second stage Secure Boot bootloader $uefi_bootloader_dirname/grub*.efi" + DebugPrint "Using second stage Secure Boot bootloader files: $second_stage_UEFI_bootloader_files" -+ cp -L $v $second_stage_UEFI_bootloader_files $EFI_DST/ || Error "Failed to copy second stage Secure Boot bootloader files" ++ cp -L $v $second_stage_UEFI_bootloader_files $efi_dst/ || Error "Failed to copy second stage Secure Boot bootloader files" +else -+ cp -L $v "$UEFI_BOOTLOADER" "$EFI_DST/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $EFI_DST/BOOTX64.efi" ++ cp -L $v "$UEFI_BOOTLOADER" "$efi_dst/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $efi_dst/BOOTX64.efi" +fi - cp $v $UEFI_BOOTLOADER "$EFI_DST/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $EFI_DST/BOOTX64.efi" - - # Copy kernel -@@ -93,7 +131,14 @@ EOF - create_grub2_cfg ${EFI_DIR}/kernel ${EFI_DIR}/$REAR_INITRD_FILENAME > ${EFI_DST}/grub.cfg - - # Create bootloader, this overwrite BOOTX64.efi copied in previous step ... -- build_bootx86_efi ${EFI_DST}/BOOTX64.efi ${EFI_DST}/grub.cfg "/boot" "$UEFI_BOOTLOADER" -+ # Create BOOTX86.efi but only if we are NOT secure booting. -+ # We are not able to create signed boot loader -+ # so we need to reuse existing one. -+ # See issue #1374 -+ # build_bootx86_efi () can be safely used for other scenarios. -+ if ! test -f "$SECURE_BOOT_BOOTLOADER" ; then -+ build_bootx86_efi ${EFI_DST}/BOOTX64.efi ${EFI_DST}/grub.cfg "/boot" "$UEFI_BOOTLOADER" -+ fi - ;; - *) - BugError "Neither grub 0.97 nor 2.0" + # Copy kernel: + cp -L $v "$KERNEL_FILE" "$efi_dst/kernel" || Error "Failed to copy KERNEL_FILE '$KERNEL_FILE' to $efi_dst/kernel" + # Copy initrd: +@@ -101,8 +137,14 @@ EOF + [[ -z "$GRUB2_SEARCH_ROOT_COMMAND" ]] && GRUB2_SEARCH_ROOT_COMMAND="search --no-floppy --set=root --label $efi_label" + # Create config for GRUB 2 + create_grub2_cfg $efi_dir/kernel $efi_dir/$REAR_INITRD_FILENAME > $efi_dst/grub.cfg +- # Create bootloader, this overwrite BOOTX64.efi copied in previous step ... +- build_bootx86_efi $efi_dst/BOOTX64.efi $efi_dst/grub.cfg "/boot" "$UEFI_BOOTLOADER" ++ # Create BOOTX86.efi but only if we are NOT secure booting. ++ # We are not able to create signed boot loader ++ # so we need to reuse existing one. ++ # See issue #1374 ++ # build_bootx86_efi () can be safely used for other scenarios. ++ if ! test -f "$SECURE_BOOT_BOOTLOADER" ; then ++ build_bootx86_efi $efi_dst/BOOTX64.efi $efi_dst/grub.cfg "/boot" "$UEFI_BOOTLOADER" ++ fi + ;; + (*) + Error "GRUB version '$grub_version' is neither '0' (legacy GRUB) nor '2' (GRUB 2)" diff --git a/rear-usb-uefi-part-size-bz2228402.patch b/rear-usb-uefi-part-size-bz2228402.patch deleted file mode 100644 index 68cf13e..0000000 --- a/rear-usb-uefi-part-size-bz2228402.patch +++ /dev/null @@ -1,41 +0,0 @@ -commit 1cd41052f7a7cd42ea14ea53b7280c73624aba3f -Author: Johannes Meixner -Date: Mon Mar 21 12:14:21 2022 +0100 - - Merge pull request #2774 from rear/jsmeix-1024-USB_UEFI_PART_SIZE - - In default.conf increase USB_UEFI_PART_SIZE to 1024 MiB, - cf. https://github.com/rear/rear/pull/1205 - in particular to also make things work by default when additional - third-party kernel modules and firmware (e.g. from Nvidia) are used, - cf. https://github.com/rear/rear/issues/2770#issuecomment-1068935688 - -diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 8faa56aa..17a764cb 100644 ---- a/usr/share/rear/conf/default.conf -+++ b/usr/share/rear/conf/default.conf -@@ -872,13 +872,20 @@ USB_PARTITION_ALIGN_BLOCK_SIZE="8" - # in MiB when formatting a medium by the format workflow. - # If USB_UEFI_PART_SIZE is empty or invalid (i.e. not an unsigned integer larger than 0) - # the user must interactively enter a valid value while running the format workflow. --# The default value of 400 MiB should be sufficiently big and it is in compliance --# with the 8 MiB partition alignment default value ( 400 = 8 * 50 ) --# and even with a 16 MiB partition alignment value ( 400 = 16 * 25 ) -+# The default value of 1024 MiB should be sufficiently big - # cf. https://github.com/rear/rear/pull/1205 -+# in particular when third-party kernel modules and firmware (e.g. from Nvidia) are used -+# cf. https://github.com/rear/rear/issues/2770#issuecomment-1068935688 -+# and 1024 MiB is in compliance with the 8 MiB partition alignment value ( 1024 = 8 * 128 ) -+# and also with higher 2^n MiB partition alignment values. -+# Furthermore the default value of 1024 MiB results that the FAT filesystem of the ESP -+# will be in compliance with that the ESP should officially use a FAT32 filesystem -+# because mkfs.vfat automatically makes a FAT32 filesystem starting at 512 MiB -+# (a FAT16 ESP works in most cases but causes issues with certain UEFI firmware) -+# cf. https://github.com/rear/rear/issues/2575 - # The value of USB_UEFI_PART_SIZE will be rounded to the nearest - # USB_PARTITION_ALIGN_BLOCK_SIZE chunk: --USB_UEFI_PART_SIZE="400" -+USB_UEFI_PART_SIZE="1024" - # - # Default boot option (i.e. what gets booted automatically after some timeout) - # when EXTLINUX boots the USB stick or USB disk or other disk device on BIOS systems. diff --git a/rear-vg-command-not-found-bz2121476.patch b/rear-vg-command-not-found-bz2121476.patch deleted file mode 100644 index 6d6ab1d..0000000 --- a/rear-vg-command-not-found-bz2121476.patch +++ /dev/null @@ -1,21 +0,0 @@ -commit ead05a460d3b219372f47be888ba6011c7fd3318 -Author: Pavel Cahyna -Date: Tue Aug 22 12:32:04 2023 +0200 - - Fix downstream only bug - - \$IsInArray -> IsInArray - it is a shell function, not a variable. - -diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -index d34ab335..a65a9c8e 100644 ---- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -+++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh -@@ -246,7 +246,7 @@ create_lvmvol() { - local warnraidline - - if [ $is_thin -eq 0 ] ; then -- ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" && ! \$IsInArray $vg \"\${create_thin_volumes_only[@]}\" ; then" -+ ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" && ! IsInArray $vg \"\${create_thin_volumes_only[@]}\" ; then" - else - ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" ; then" - fi diff --git a/rear.spec b/rear.spec index 2343380..e0e7474 100644 --- a/rear.spec +++ b/rear.spec @@ -2,8 +2,8 @@ %global debug_package %{nil} Name: rear -Version: 2.6 -Release: 14%{?dist} +Version: 2.7 +Release: 1%{?dist} Summary: Relax-and-Recover is a Linux disaster recovery and system migration tool URL: http://relax-and-recover.org/ License: GPLv3 @@ -13,46 +13,62 @@ Source0: https://github.com/rear/rear/archive/%{version}.tar.gz#/rear-%{version} Source1: rear.cron Source2: rear.service Source3: rear.timer -# Skip buildin modules, RHBZ#1831311 -Patch0: 0001-skip-kernel-buildin-modules.patch -Patch4: rear-bz1492177-warning.patch -Patch29: rear-bz1832394.patch -Patch30: rear-sfdc02772301.patch -Patch31: rear-bz1945869.patch -Patch32: rear-bz1958247.patch -Patch33: rear-bz1930662.patch -Patch34: rear-tmpdir.patch -Patch35: rear-bz1983013.patch -Patch36: rear-bz1993296.patch -Patch37: rear-bz1747468.patch -Patch38: rear-bz2049091.patch -Patch39: rear-pr2675.patch -Patch40: rear-bz2048454.patch -Patch41: rear-bz2035939.patch -Patch42: rear-bz2083272.patch -Patch43: rear-bz2111049.patch -Patch44: rear-bz2104005.patch -Patch45: rear-bz2097437.patch -Patch46: rear-bz2096916.patch -Patch47: rear-bz2096900.patch -Patch48: rear-bz2111059.patch -Patch49: rsync-output.patch -Patch50: rear-bz2119501.patch -Patch51: rear-bz2120736.patch -Patch52: rear-bz2117937.patch -Patch53: rear-bz2091163.patch -Patch54: rear-bz2130945.patch -Patch55: rear-bz2131946.patch -Patch56: s390-no-clobber-disks.patch -Patch57: rear-bz2188593-nbu-systemd.patch -Patch58: rear-device-shrinking-bz2223895.patch -Patch59: rear-usb-uefi-part-size-bz2228402.patch -Patch60: rear-luks-key-bz2228779.patch -Patch61: rear-uefi-usb-secureboot-bz2196445.patch -Patch62: rear-vg-command-not-found-bz2121476.patch -Patch63: rear-remove-lvmdevices-bz2145014.patch -Patch64: rear-save-lvm-poolmetadatasize-RHEL-6984.patch -Patch65: rear-skip-useless-xfs-mount-options-RHEL-10478.patch + +###################### +# upstream backports # +###################### +# pass -y to lvcreate instead of piping the output of yes +# https://github.com/rear/rear/commit/bca0e7a92af16cb7fb82ef04401cdb3286068081 +Patch101: rear-bz2104005.patch + +# fix initrd generation on s390x +# https://github.com/rear/rear/commit/6d1e5ab96213a0d79489c4296cd1f5a4be645597 +Patch102: rear-bz2130945.patch + +# do not use ':' as a field separator in pvdisplay output +# https://github.com/rear/rear/commit/29e739ae7c0651f8f77c60846bfbe2b6c91baa29 +Patch103: rear-bz2091163.patch + +# do not autoformat DASDs on s390x +# https://github.com/rear/rear/commit/015c1ffd9fa96b01882b068714d3bc3aae3b5168 +Patch104: s390-no-clobber-disks.patch + +# continue when extracting shrank files with tar +# https://github.com/rear/rear/commit/41c2d9b1fbcece4b0890ab92e9f5817621917ad3 +Patch105: rear-device-shrinking-bz2223895.patch + +# add secure boot support for OUTPUT=USB +# https://github.com/rear/rear/commit/46b29195bff7f93cf5bd4c2dd83d69e5676800cb +Patch106: rear-uefi-usb-secureboot-bz2196445.patch + +# remove the lvmdevices file at the end of recovery +# https://github.com/rear/rear/commit/5a8c5086bf3fc28236436ff3ef27196509f0375d +Patch107: rear-remove-lvmdevices-bz2145014.patch + +# save LVM pool metadata volume size in disk layout +# https://github.com/rear/rear/commit/f6af518baf3b5a4dc06bf8cfea262e627eee3e07 +Patch108: rear-save-lvm-poolmetadatasize-RHEL-6984.patch + +# skip useless xfs mount options when mounting during recovery +# https://github.com/rear/rear/commit/ed4c78d5fe493ea368989d0086a733653692f5cb +Patch109: rear-skip-useless-xfs-mount-options-RHEL-10478.patch + +###################### +# downstream patches # +###################### +# suggest to install grub-efi-x64-modules on x86_64 UEFI Fedora/RHEL machines +Patch201: rear-bz1492177-warning.patch + +# avoid vgcfgrestore on unsupported volume types +# https://github.com/pcahyna/rear/commit/5d5d1db3ca621eb80b9481924d1fc470571cfc09 +Patch202: rear-bz1747468.patch + +# skip deliberately broken symlinks in initrd on Fedora/RHEL +Patch203: rear-bz2119501.patch + +# additional fixes for NBU support +Patch204: rear-bz2120736.patch +Patch205: rear-bz2188593-nbu-systemd.patch # rear contains only bash scripts plus documentation so that on first glance it could be "BuildArch: noarch" # but actually it is not "noarch" because it only works on those architectures that are explicitly supported. @@ -178,6 +194,11 @@ install -m 0644 %{SOURCE3} %{buildroot}%{_docdir}/%{name}/ #-- CHANGELOG -----------------------------------------------------------------# %changelog +* Fri Feb 02 2024 Lukáš Zaoral - 2.7-1 +- rebase to version 2.7 (rhbz#2215778) +- drop obsolete patches +- rebase remaining patches + * Fri Feb 2 2024 Lukáš Zaoral - 2.6-14 - Sync with patches in CentOS Stream 9 (kudos to @pcahyna!) chronologically from the latest: diff --git a/rsync-output.patch b/rsync-output.patch deleted file mode 100644 index 42d3ece..0000000 --- a/rsync-output.patch +++ /dev/null @@ -1,864 +0,0 @@ -commit e6a9c973dbb7be6e46ed9a7fe34df0635635fed6 -Author: Johannes Meixner -Date: Tue Jul 12 13:59:28 2022 +0200 - - Merge pull request #2831 from pcahyna/rsync-url-fix-refactor - - Refactor rsync URL support, fixes rsync OUTPUT_URL: - The code to parse rsync:// URLs was BACKUP_URL specific. - If one specified BACKUP=RSYNC and an OUTPUT_URL different from BACKUP_URL, - the OUTPUT_URL was ignored and the output files went to BACKUP_URL. - Fix by introducing generic functions for rsync URL parsing and - use them for both BACKUP_URL and OUTPUT_URL, as appropriate. - Replace all uses of global RSYNC_* variables derived - from BACKUP_URL by those functions. - There also was inconsistent special handling for OUTPUT=PXE which is now removed: - An rsync OUTPUT_URL with OUTPUT=PXE now creates the RSYNC_PREFIX directory - at the destination and the URL is interpreted as in all other cases. - See https://github.com/rear/rear/pull/2831 - and https://github.com/rear/rear/issues/2781 - -diff --git a/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh -deleted file mode 120000 -index 336b83f5..00000000 ---- a/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh -+++ /dev/null -@@ -1 +0,0 @@ --../../RSYNC/default/200_check_rsync_relative_option.sh -\ No newline at end of file -diff --git a/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh b/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh -new file mode 120000 -index 00000000..0570eb44 ---- /dev/null -+++ b/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh -@@ -0,0 +1 @@ -+../../RSYNC/default/210_check_rsync_relative_option.sh -\ No newline at end of file -diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -index 1692ba4c..dd198ede 100644 ---- a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh -@@ -6,29 +6,29 @@ local backup_prog_rc - touch "${TMP_DIR}/selinux.autorelabel" - cat $TMP_DIR/selinux.mode > $SELINUX_ENFORCE - Log "Restored original SELinux mode" -- case $RSYNC_PROTO in -+ case $(rsync_proto "$BACKUP_URL") in - - (ssh) - # for some reason rsync changes the mode of backup after each run to 666 - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null -+ ssh $(rsync_remote_ssh "$BACKUP_URL") "chmod $v 755 $(rsync_path_full "$BACKUP_URL")/backup" 2>/dev/null - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ -- "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null -+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" 2>/dev/null - backup_prog_rc=$? - if [ $backup_prog_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" -- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]" -+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup" - fi - ;; - - (rsync) - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" -+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" - backup_prog_rc=$? - if [ $backup_prog_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" -- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]" -+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup" - fi - ;; - -diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -index 9a17d6bb..de57d571 100644 ---- a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh -@@ -4,29 +4,29 @@ local backup_prog_rc - - > "${TMP_DIR}/selinux.autorelabel" - -- case $RSYNC_PROTO in -+ case $(rsync_proto "$BACKUP_URL") in - - (ssh) - # for some reason rsync changes the mode of backup after each run to 666 - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null -+ ssh $(rsync_remote_ssh "$BACKUP_URL") "chmod $v 755 $(rsync_path_full "$BACKUP_URL")/backup" 2>/dev/null - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ -- "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null -+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" 2>/dev/null - backup_prog_rc=$? - if [ $backup_prog_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" -- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]" -+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup" - fi - ;; - - (rsync) - $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" -+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" - backup_prog_rc=$? - if [ $backup_prog_rc -ne 0 ]; then -- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" -- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]" -+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup" - fi - ;; - -diff --git a/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh -new file mode 100644 -index 00000000..81aa6879 ---- /dev/null -+++ b/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh -@@ -0,0 +1,28 @@ -+# Create RSYNC_PREFIX/backup on remote rsync server -+# RSYNC_PREFIX=$HOSTNAME as set in default.conf -+ -+local proto host -+ -+proto="$(rsync_proto "$BACKUP_URL")" -+host="$(rsync_host "$BACKUP_URL")" -+ -+mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" -+mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" -+ -+case $proto in -+ -+ (ssh) -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "$(rsync_remote "$BACKUP_URL")" >/dev/null 2>&1 \ -+ || Error "Could not create '$(rsync_path_full "$BACKUP_URL")' on remote ${host}" -+ ;; -+ -+ (rsync) -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote "$BACKUP_URL")/" >/dev/null \ -+ || Error "Could not create '$(rsync_path_full "$BACKUP_URL")' on remote ${host}" -+ ;; -+ -+esac -+ -+# We don't need it anymore, from now we operate on the remote copy -+rmdir $v "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" -+rmdir $v "${TMP_DIR}/rsync/${RSYNC_PREFIX}" -diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh -similarity index 91% -rename from usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh -rename to usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh -index cedee9ce..692616b7 100644 ---- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh -+++ b/usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh -@@ -1,4 +1,4 @@ --# 200_check_rsync_relative_option.sh -+# 210_check_rsync_relative_option.sh - # See issue #871 for details - - # check for the --relative option in BACKUP_RSYNC_OPTIONS array -diff --git a/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh b/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh -index eb99dbf6..037e49c0 100644 ---- a/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh -+++ b/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh -@@ -1,6 +1,12 @@ - # here we will calculate the space required to hold the backup archive on the remote rsync system - # This file is part of Relax-and-Recover, licensed under the GNU General - # Public License. Refer to the included COPYING for full text of license. -+local proto host path -+ -+proto="$(rsync_proto "$BACKUP_URL")" -+host="$(rsync_host "$BACKUP_URL")" -+path="$(rsync_path "$BACKUP_URL")" -+ - _local_size=0 - _remote_size=0 - while read -r ; do -@@ -13,17 +19,17 @@ while read -r ; do - done < $TMP_DIR/backup-include.txt - LogPrint "Estimated size of local file systems is $(( _local_size / 1024 )) MB" - --case $RSYNC_PROTO in -+case $proto in - (ssh) -- LogPrint "Calculating size of $RSYNC_HOST:$RSYNC_PATH" -- ssh -l $RSYNC_USER $RSYNC_HOST "df -P $RSYNC_PATH" >$TMP_DIR/rs_size -- StopIfError "Failed to determine size of $RSYNC_PATH" -+ LogPrint "Calculating size of ${host}:${path}" -+ ssh $(rsync_remote_ssh "$BACKUP_URL") "df -P ${path}" >$TMP_DIR/rs_size -+ StopIfError "Failed to determine size of ${path}" - _div=1 # 1024-blocks - grep -q "512-blocks" $TMP_DIR/rs_size && _div=2 # HPUX: divide with 2 to get kB size - _remote_size=$( tail -n 1 $TMP_DIR/rs_size | awk '{print $2}' ) - _remote_size=$(( _remote_size / _div )) - [[ $_remote_size -gt $_local_size ]] -- StopIfError "Not enough disk space available on $RSYNC_HOST:$RSYNC_PATH ($_remote_size < $_local_size)" -+ StopIfError "Not enough disk space available on ${host}:${path} ($_remote_size < $_local_size)" - ;; - (rsync) - # TODO: how can we calculate the free size on remote system via rsync protocol?? -diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -index 750a04ca..aa8192c0 100644 ---- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -+++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh -@@ -5,6 +5,11 @@ - local backup_prog_rc - local backup_log_message - -+local host path -+ -+host="$(rsync_host "$BACKUP_URL")" -+path="$(rsync_path "$BACKUP_URL")" -+ - Log "Include list:" - while read -r ; do - Log " $REPLY" -@@ -14,26 +19,27 @@ while read -r ; do - Log " $REPLY" - done < $TMP_DIR/backup-exclude.txt - --LogPrint "Creating $BACKUP_PROG backup on '${RSYNC_HOST}:${RSYNC_PATH}'" -+LogPrint "Creating $BACKUP_PROG backup on '${host}:${path}'" - - ProgressStart "Running backup operation" - ( - case "$(basename $BACKUP_PROG)" in - - (rsync) -+ # We are in a subshell, so this change will not propagate to later scripts - BACKUP_RSYNC_OPTIONS+=( --one-file-system --delete --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded ) - -- case $RSYNC_PROTO in -+ case $(rsync_proto "$BACKUP_URL") in - - (ssh) -- Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) "$(rsync_remote_full "$BACKUP_URL")/backup" - $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) \ -- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ "$(rsync_remote_full "$BACKUP_URL")/backup" - ;; - - (rsync) - $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" -+ "$(rsync_remote_full "$BACKUP_URL")/backup" - ;; - - esac -@@ -57,11 +63,11 @@ get_size() { - } - - check_remote_df() { -- echo $(ssh ${RSYNC_USER}@${RSYNC_HOST} df -P ${RSYNC_PATH} 2>/dev/null | tail -1 | awk '{print $5}' | sed -e 's/%//') -+ echo $(ssh $(rsync_remote_ssh "$BACKUP_URL") df -P ${path} 2>/dev/null | tail -1 | awk '{print $5}' | sed -e 's/%//') - } - - check_remote_du() { -- x=$(ssh ${RSYNC_USER}@${RSYNC_HOST} du -sb ${RSYNC_PATH}/${RSYNC_PREFIX}/backup 2>/dev/null | awk '{print $1}') -+ x=$(ssh $(rsync_remote_ssh "$BACKUP_URL") du -sb $(rsync_path_full "$BACKUP_URL")/backup 2>/dev/null | awk '{print $1}') - [[ -z "${x}" ]] && x=0 - echo $x - } -@@ -81,7 +87,7 @@ case "$(basename $BACKUP_PROG)" in - case $i in - - 300) -- [[ $(check_remote_df) -eq 100 ]] && Error "Disk is full on system ${RSYNC_HOST}" -+ [[ $(check_remote_df) -eq 100 ]] && Error "Disk is full on system ${host}" - ;; - - 15|30|45|60|75|90|105|120|135|150|165|180|195|210|225|240|255|270|285) -diff --git a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -index b90d459b..76b9f971 100644 ---- a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -+++ b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh -@@ -1,26 +1,27 @@ - - # copy the backup.log & rear.log file to remote destination with timestamp added --local timestamp -+local timestamp proto - - timestamp=$( date +%Y%m%d.%H%M ) -+proto="$(rsync_proto "$BACKUP_URL")" - - # compress the log file first - gzip "$TMP_DIR/$BACKUP_PROG_ARCHIVE.log" || Error "Failed to 'gzip $TMP_DIR/$BACKUP_PROG_ARCHIVE.log'" - --case $RSYNC_PROTO in -+case $proto in - (ssh) - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 - $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" \ -- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null -+ "$(rsync_remote_full "$BACKUP_URL")/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null - -- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${timestamp}.log" 2>/dev/null -+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "$(rsync_remote_full "$BACKUP_URL")/rear-${timestamp}.log" 2>/dev/null - ;; - (rsync) - $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" "${BACKUP_RSYNC_OPTIONS[@]}" \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" -+ "$(rsync_remote_full "$BACKUP_URL")/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" - -- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${timestamp}.log" -+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$BACKUP_URL")//rear-${timestamp}.log" - ;; - esac - -diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh -index 32aeb8ca..2edb64a6 100644 ---- a/usr/share/rear/lib/global-functions.sh -+++ b/usr/share/rear/lib/global-functions.sh -@@ -259,7 +259,7 @@ function url_scheme() { - # the scheme is the leading part up to '://' - local scheme=${url%%://*} - # rsync scheme does not have to start with rsync:// it can also be scp style -- # see the comments in usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -+ # see the comments in usr/share/rear/lib/rsync-functions.sh - echo $scheme | grep -q ":" && echo rsync || echo $scheme - } - -diff --git a/usr/share/rear/lib/rsync-functions.sh b/usr/share/rear/lib/rsync-functions.sh -new file mode 100644 -index 00000000..443a9625 ---- /dev/null -+++ b/usr/share/rear/lib/rsync-functions.sh -@@ -0,0 +1,178 @@ -+# Functions for manipulation of rsync URLs (both OUTPUT_URL and BACKUP_URL) -+ -+#### OLD STYLE: -+# BACKUP_URL=[USER@]HOST:PATH # using ssh (no rsh) -+# -+# with rsync protocol PATH is a MODULE name defined in remote /etc/rsyncd.conf file -+# BACKUP_URL=[USER@]HOST::PATH # using rsync -+# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using rsync (is not compatible with new style!!!) -+ -+#### NEW STYLE: -+# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using ssh -+# BACKUP_URL=rsync://[USER@]HOST[:PORT]::/PATH # using rsync -+ -+function rsync_validate () { -+ local url="$1" -+ -+ if [[ "$(url_scheme "$url")" != "rsync" ]]; then # url_scheme still recognizes old style -+ BugError "Non-rsync URL $url !" -+ fi -+} -+ -+# Determine whether the URL specifies the use of the rsync protocol (rsyncd) or ssh -+# Do not call on non-rsync URLs (use url_scheme first) -+function rsync_proto () { -+ local url="$1" -+ -+ rsync_validate "$url" -+ if egrep -q '(::)' <<< $url ; then # new style '::' means rsync protocol -+ echo rsync -+ else -+ echo ssh -+ fi -+} -+ -+# Functions to parse the URL into its components: -+# USER, HOST, PORT, PATH -+ -+function rsync_user () { -+ local url="$1" -+ local host -+ -+ host=$(url_host "$url") -+ -+ if grep -q '@' <<< $host ; then -+ echo "${host%%@*}" # grab user name -+ else -+ echo root -+ fi -+} -+ -+function rsync_host () { -+ local url="$1" -+ local host -+ local path -+ -+ host=$(url_host "$url") -+ path=$(url_path "$url") -+ # remove USER@ if present -+ local tmp2="${host#*@}" -+ -+ case "$(rsync_proto "$url")" in -+ (rsync) -+ # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02:: -+ echo "${tmp2%%::*}" -+ ;; -+ (ssh) -+ # tmp2=host or tmp2=host: -+ echo "${tmp2%%:*}" -+ ;; -+ esac -+} -+ -+function rsync_path () { -+ local url="$1" -+ local host -+ local path -+ local url_without_scheme -+ local url_without_scheme_user -+ -+ host=$(url_host "$url") -+ path=$(url_path "$url") -+ local tmp2="${host#*@}" -+ -+ url_without_scheme="${url#*//}" -+ url_without_scheme_user="${url_without_scheme#$(rsync_user "$url")@}" -+ -+ case "$(rsync_proto "$url")" in -+ -+ (rsync) -+ if grep -q '::' <<< $url_without_scheme_user ; then -+ # we can not use url_path here, it uses / as separator, not :: -+ local url_after_separator="${url_without_scheme_user##*::}" -+ # remove leading / - this is a module name -+ echo "${url_after_separator#/}" -+ else -+ echo "${path#*/}" -+ fi -+ ;; -+ (ssh) -+ if [ "$url_without_scheme" == "$url" ]; then -+ # no scheme - old-style URL -+ if grep -q ':' <<< $url_without_scheme_user ; then -+ echo "${url_without_scheme_user##*:}" -+ else -+ BugError "Old-style rsync URL $url without : !" -+ fi -+ else -+ echo "$path" -+ fi -+ ;; -+ -+ esac -+} -+ -+function rsync_port () { -+ # XXX changing port not implemented yet -+ echo 873 -+} -+ -+# Full path to the destination directory on the remote server, -+# includes RSYNC_PREFIX. RSYNC_PREFIX is not given by the URL, -+# it is a global parameter (by default derived from hostname). -+function rsync_path_full () { -+ local url="$1" -+ -+ echo "$(rsync_path "$url")/${RSYNC_PREFIX}" -+} -+ -+# Argument for the ssh command to log in to the remote host ("user@host") -+function rsync_remote_ssh () { -+ local url="$1" -+ -+ local user host -+ -+ user="$(rsync_user "$url")" -+ host="$(rsync_host "$url")" -+ -+ echo "${user}@${host}" -+} -+ -+# Argument for the rsync command to reach the remote host, without path. -+function rsync_remote_base () { -+ local url="$1" -+ -+ local user host port -+ -+ user="$(rsync_user "$url")" -+ host="$(rsync_host "$url")" -+ port="$(rsync_port "$url")" -+ -+ case "$(rsync_proto "$url")" in -+ -+ (rsync) -+ echo "rsync://${user}@${host}:${port}/" -+ ;; -+ (ssh) -+ echo "$(rsync_remote_ssh "$url"):" -+ ;; -+ -+ esac -+} -+ -+# Complete argument to rsync to reach the remote location identified by URL, -+# but without the added RSYNC_PREFIX. -+# This essentially converts our rsync:// URLs into a form accepted by the rsync command. -+function rsync_remote () { -+ local url="$1" -+ -+ echo "$(rsync_remote_base "$url")$(rsync_path "$url")" -+} -+ -+# Complete argument to rsync including even RSYNC_PREFIX. -+# Determined from the URL and RSYNC_PREFIX. -+function rsync_remote_full () { -+ local url="$1" -+ -+ echo "$(rsync_remote_base "$url")$(rsync_path_full "$url")" -+} -diff --git a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -index 519febf5..d00d15e4 100644 ---- a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -+++ b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh -@@ -1,20 +1,32 @@ - # Create RSYNC_PREFIX under the local TMP_DIR and also on remote rsync server - # RSYNC_PREFIX=$HOSTNAME as set in default.conf - --# create temporary local work-spaces to collect files (we already make the remote backup dir with the correct mode!!) -+local proto host scheme -+ -+scheme="$(url_scheme "$OUTPUT_URL")" -+ -+# we handle only rsync:// output schemes. -+# ToDo: why does handling of the output URL scheme belong under RSYNC (which is a backup method)? -+# OUTPUT_URL is independent on the chosen backup method, so this code should be moved to be backup-independent. -+test "rsync" = "$scheme" || return 0 -+ -+proto="$(rsync_proto "$OUTPUT_URL")" -+host="$(rsync_host "$OUTPUT_URL")" -+ -+# create temporary local work-spaces to collect files - mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" --mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" - --case $RSYNC_PROTO in -+case $proto in - - (ssh) -- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 \ -- || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "$(rsync_remote "$OUTPUT_URL")" >/dev/null 2>&1 \ -+ || Error "Could not create '$(rsync_path_full "$OUTPUT_URL")' on remote ${host}" - ;; - - (rsync) -- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null \ -- || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" -+ # This must run before the backup stage. Otherwise --relative gets added to BACKUP_RSYNC_OPTIONS -+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote "$OUTPUT_URL")/" >/dev/null \ -+ || Error "Could not create '$(rsync_path_full "$OUTPUT_URL")' on remote ${host}" - ;; - - esac -diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -index 96b62da1..4ddf3cb4 100644 ---- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -+++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh -@@ -1,6 +1,17 @@ - # - # copy resulting files to remote network (backup) location - -+local proto scheme -+ -+scheme="$(url_scheme "$OUTPUT_URL")" -+ -+# we handle only rsync:// output schemes. -+# ToDo: why does handling of the output URL scheme belong under RSYNC (which is a backup method)? -+# OUTPUT_URL is independent on the chosen backup method, so this code should be moved to be backup-independent. -+test "rsync" = "$scheme" || return 0 -+ -+proto="$(rsync_proto "$OUTPUT_URL")" -+ - LogPrint "Copying resulting files to $OUTPUT_URL location" - - # if called as mkbackuponly then we just don't have any result files. -@@ -19,21 +30,21 @@ cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFI - cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" \ - || Error "Could not copy $RUNTIME_LOGFILE to local rsync location" - --case $RSYNC_PROTO in -+case $proto in - - (ssh) -- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" -+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ $(rsync_remote_full "$OUTPUT_URL")/" - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ -+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "$(rsync_remote_full "$OUTPUT_URL")/" 2>/dev/null \ - || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" - ;; - - (rsync) -- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" -+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} $(rsync_remote_full "$OUTPUT_URL")/" - # FIXME: Add an explanatory comment why "2>/dev/null" is useful here - # or remove it according to https://github.com/rear/rear/issues/1395 -- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ -+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$OUTPUT_URL")/" 2>/dev/null \ - || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" - ;; - -diff --git a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -index eb7df29e..84500039 100644 ---- a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -+++ b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh -@@ -25,8 +25,10 @@ case $(basename $BACKUP_PROG) in - - (rsync) - if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then -+ local host -+ host="$(rsync_host "$BACKUP_URL")" - # no xattrs compiled in remote rsync, so saving SELinux attributes are not possible -- Log "WARNING: --xattrs not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)" -+ Log "WARNING: --xattrs not possible on system ($host) (no xattrs compiled in rsync)" - # $TMP_DIR/selinux.mode is a trigger during backup to disable SELinux - cat $SELINUX_ENFORCE > $TMP_DIR/selinux.mode - RSYNC_SELINUX= # internal variable used in recover mode (empty means disable SELinux) -diff --git a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -index c964a148..448a1b1a 100644 ---- a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -+++ b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh -@@ -3,97 +3,40 @@ - # This file is part of Relax-and-Recover, licensed under the GNU General - # Public License. Refer to the included COPYING for full text of license. - --#### OLD STYLE: --# BACKUP_URL=[USER@]HOST:PATH # using ssh (no rsh) --# --# with rsync protocol PATH is a MODULE name defined in remote /etc/rsyncd.conf file --# BACKUP_URL=[USER@]HOST::PATH # using rsync --# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using rsync (is not compatible with new style!!!) -- --#### NEW STYLE: --# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using ssh --# BACKUP_URL=rsync://[USER@]HOST[:PORT]::/PATH # using rsync -- - if test -z "$BACKUP_URL" ; then - Error "Missing BACKUP_URL=rsync://[USER@]HOST[:PORT][::]/PATH !" - fi - --local host=$(url_host $BACKUP_URL) - local scheme=$(url_scheme $BACKUP_URL) # url_scheme still recognizes old style --local path=$(url_path $BACKUP_URL) - - if [[ "$scheme" != "rsync" ]]; then - Error "Missing BACKUP_URL=rsync://[USER@]HOST[:PORT][::]/PATH !" - fi - --RSYNC_PROTO= # ssh or rsync --RSYNC_USER= --RSYNC_HOST= --RSYNC_PORT=873 # default port (of rsync server) --RSYNC_PATH= -- -- --if egrep -q '(::)' <<< $BACKUP_URL ; then # new style '::' means rsync protocol -- RSYNC_PROTO=rsync --else -- RSYNC_PROTO=ssh --fi -- --if grep -q '@' <<< $host ; then -- RSYNC_USER="${host%%@*}" # grab user name --else -- RSYNC_USER=root --fi -- --# remove USER@ if present (we don't need it anymore) --local tmp2="${host#*@}" -- --case "$RSYNC_PROTO" in -- -- (rsync) -- # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02:: -- RSYNC_HOST="${tmp2%%::*}" -- # path=/gdhaese1@witsbebelnx02::backup or path=/backup -- if grep -q '::' <<< $path ; then -- RSYNC_PATH="${path##*::}" -- else -- RSYNC_PATH="${path##*/}" -- fi -- ;; -- (ssh) -- # tmp2=host or tmp2=host: -- RSYNC_HOST="${tmp2%%:*}" -- RSYNC_PATH=$path -- ;; -- --esac -- --#echo RSYNC_PROTO=$RSYNC_PROTO --#echo RSYNC_USER=$RSYNC_USER --#echo RSYNC_HOST=$RSYNC_HOST --#echo RSYNC_PORT=$RSYNC_PORT --#echo RSYNC_PATH=$RSYNC_PATH -+local host proto -+host="$(rsync_host "$BACKUP_URL")" -+proto="$(rsync_proto "$BACKUP_URL")" - - # check if host is reachable - if test "$PING" ; then -- ping -c 2 "$RSYNC_HOST" >/dev/null || Error "Backup host [$RSYNC_HOST] not reachable." -+ ping -c 2 "$host" >/dev/null || Error "Backup host [$host] not reachable." - else - Log "Skipping ping test" - fi - - # check protocol connectivity --case "$RSYNC_PROTO" in -+case "$proto" in - - (rsync) -- Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/" -- $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null \ -- || Error "Rsync daemon not running on $RSYNC_HOST" -+ Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} $(rsync_remote_base "$BACKUP_URL")" -+ $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(rsync_remote_base "$BACKUP_URL") >/dev/null \ -+ || Error "Rsync daemon not running on $host" - ;; - - (ssh) -- Log "Test: ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true" -- ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 \ -- || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" -+ Log "Test: ssh $(rsync_remote_ssh "$BACKUP_URL") /bin/true" -+ ssh $(rsync_remote_ssh "$BACKUP_URL") /bin/true >/dev/null 2>&1 \ -+ || Error "Secure shell connection not setup properly [$(rsync_remote_ssh "$BACKUP_URL")]" - ;; - - esac -diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -index e9103531..becf35a0 100644 ---- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -+++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh -@@ -3,15 +3,18 @@ - # Public License. Refer to the included COPYING for full text of license. - # try to grab the rsync protocol version of rsync on the remote server - --local remote_mountpoint -+local remote_mountpoint host path proto -+host="$(rsync_host "$BACKUP_URL")" -+path="$(rsync_path "$BACKUP_URL")" -+proto="$(rsync_proto "$BACKUP_URL")" - - if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then - -- case $RSYNC_PROTO in -+ case $proto in - - (ssh) -- ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \ -- || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" -+ ssh $(rsync_remote_ssh "$BACKUP_URL") rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \ -+ || Error "Secure shell connection not setup properly [$(rsync_remote_ssh "$BACKUP_URL")]" - if grep -q "protocol version" "$TMP_DIR/rsync_protocol" ; then - RSYNC_PROTOCOL_VERSION=$(grep 'protocol version' "$TMP_DIR/rsync_protocol" | awk '{print $6}') - else -@@ -24,29 +27,29 @@ if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then - RSYNC_PROTOCOL_VERSION=29 # being conservative (old rsync) - ;; - esac -- Log "Remote rsync system ($RSYNC_HOST) uses rsync protocol version $RSYNC_PROTOCOL_VERSION" -+ Log "Remote rsync system ($host) uses rsync protocol version $RSYNC_PROTOCOL_VERSION" - - else - -- Log "Remote rsync system ($RSYNC_HOST) uses rsync protocol version $RSYNC_PROTOCOL_VERSION (overruled by user)" -+ Log "Remote rsync system ($host) uses rsync protocol version $RSYNC_PROTOCOL_VERSION (overruled by user)" - - fi - --if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then -+if [ "$(rsync_user "$BACKUP_URL")" != "root" -a $proto = "ssh" ]; then - if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then - if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then - # no xattrs available in remote rsync, so --fake-super is not possible -- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)" -+ Error "rsync --fake-super not possible on system ($host) (no xattrs compiled in rsync)" - else - # when using --fake-super we must have user_xattr mount options on the remote mntpt -- remote_mountpoint=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') -- ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \ -+ remote_mountpoint=$(ssh $(rsync_remote_ssh "$BACKUP_URL") 'cd ${path}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') -+ ssh $(rsync_remote_ssh "$BACKUP_URL") "cd ${path} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \ - || Error "Remote file system $remote_mountpoint does not have user_xattr mount option set!" - #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="rsync --fake-super" ) - # see issue #366 for explanation of removing --xattrs - BACKUP_RSYNC_OPTIONS+=( --rsync-path="rsync --fake-super" ) - fi - else -- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" -+ Error "rsync --fake-super not possible on system ($host) (please upgrade rsync to 3.x)" - fi - fi -diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -index 993088be..0fa08587 100644 ---- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -+++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh -@@ -7,7 +7,11 @@ get_size() { - local backup_prog_rc - local restore_log_message - --LogPrint "Restoring $BACKUP_PROG backup from '${RSYNC_HOST}:${RSYNC_PATH}'" -+local host path -+host="$(rsync_host "$BACKUP_URL")" -+path="$(rsync_path "$BACKUP_URL")" -+ -+LogPrint "Restoring $BACKUP_PROG backup from '${host}:${path}'" - - ProgressStart "Restore operation" - ( -@@ -15,18 +19,18 @@ ProgressStart "Restore operation" - - (rsync) - -- case $RSYNC_PROTO in -+ case $(rsync_proto "$BACKUP_URL") in - - (ssh) -- Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ $TARGET_FS_ROOT/ -+ Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$BACKUP_URL")/backup"/ $TARGET_FS_ROOT/ - $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" \ -- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ \ -+ "$(rsync_remote_full "$BACKUP_URL")/backup"/ \ - $TARGET_FS_ROOT/ - ;; - - (rsync) - $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" \ -- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ $TARGET_FS_ROOT/ -+ "$(rsync_remote_full "$BACKUP_URL")/backup"/ $TARGET_FS_ROOT/ - ;; - - esac -diff --git a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -index b2fb72f5..76132794 100644 ---- a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -+++ b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh -@@ -1,14 +1,14 @@ - # check the backup archive on remote rsync server - --case $RSYNC_PROTO in -+case $(rsync_proto "$BACKUP_URL") in - - (ssh) -- ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ -- || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" -+ ssh $(rsync_remote_ssh "$BACKUP_URL") "ls -ld $(rsync_path_full "$BACKUP_URL")/backup" >/dev/null 2>&1 \ -+ || Error "Archive not found on [$(rsync_remote_full "$BACKUP_URL")]" - ;; - - (rsync) -- $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ -- || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" -+ $BACKUP_PROG "$(rsync_remote_full "$BACKUP_URL")/backup" >/dev/null 2>&1 \ -+ || Error "Archive not found on [$(rsync_remote_full "$BACKUP_URL")]" - ;; - esac diff --git a/s390-no-clobber-disks.patch b/s390-no-clobber-disks.patch index 8d2f81a..7e17fc2 100644 --- a/s390-no-clobber-disks.patch +++ b/s390-no-clobber-disks.patch @@ -1,14 +1,38 @@ +commit 015c1ffd9fa96b01882b068714d3bc3aae3b5168 +Merge: 02dad206 20cc0137 +Author: Schlomo Schapiro +Date: Tue Feb 28 22:46:13 2023 +0100 + + Merge pull request #2943 from pcahyna/s390-layout-format + + s390x (IBM Z) disk formatting fixes + + Cherry-picked-by: Lukáš Zaoral + +diff --git a/packaging/rpm/rear.spec b/packaging/rpm/rear.spec +index eba48198..fb943019 100644 +--- a/packaging/rpm/rear.spec ++++ b/packaging/rpm/rear.spec +@@ -29,8 +29,8 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) + # Of course the rear bash scripts can be installed on any architecture just as any binaries can be installed on any architecture. + # But the meaning of architecture dependent packages should be on what architectures they will work. + # Therefore only those architectures that are actually supported are explicitly listed. +-# This avoids that rear can be "just installed" on architectures that are actually not supported (e.g. ARM or IBM z Systems): +-ExclusiveArch: %ix86 x86_64 ppc ppc64 ppc64le ia64 ++# This avoids that rear can be "just installed" on architectures that are actually not supported (e.g. ARM): ++ExclusiveArch: %ix86 x86_64 ppc ppc64 ppc64le ia64 s390x + # Furthermore for some architectures it requires architecture dependent packages (like syslinux for x86 and x86_64) + # so that rear must be architecture dependent because ifarch conditions never match in case of "BuildArch: noarch" + # see the GitHub issue https://github.com/rear/rear/issues/629 diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf -index 23a83b71..0d13b487 100644 +index fe34636f..50baaf82 100644 --- a/usr/share/rear/conf/default.conf +++ b/usr/share/rear/conf/default.conf -@@ -416,6 +416,18 @@ test "$RECOVERY_UPDATE_URL" || RECOVERY_UPDATE_URL="" - # export MIGRATION_MODE='true' - # directly before he calls "rear recover": - test "$MIGRATION_MODE" || MIGRATION_MODE='' -+#### -+ -+#### +@@ -486,6 +486,17 @@ test "$MIGRATION_MODE" || MIGRATION_MODE='' + # Currently by default no disk is wiped to avoid issues until this feature was more tested: + DISKS_TO_BE_WIPED='false' + ++## +# Formatting DASDs (S/390 specific) +# DASD (Direct Access Storage Device) denotes a disk drive on the S/390 architecture. +# DASDs need to be formatted before use (even before creating a partition table on them). @@ -17,10 +41,11 @@ index 23a83b71..0d13b487 100644 +# This can be suppressed by setting FORMAT_DASDS="false". It can be useful when one intends +# to use already formatted DASDs as recovery target. +FORMAT_DASDS="" -+#### - ++## ++ ## # Resizing partitions in MIGRATION_MODE during "rear recover" + # diff --git a/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh b/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh new file mode 120000 index 00000000..5f7a2ac0 @@ -30,7 +55,7 @@ index 00000000..5f7a2ac0 +../../prepare/Linux-s390/205_s390_enable_disk.sh \ No newline at end of file diff --git a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh -index 13c69ce8..2a2bc33f 100644 +index 84a7cd33..acc65adf 100644 --- a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh +++ b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh @@ -24,6 +24,7 @@ fi @@ -40,8 +65,8 @@ index 13c69ce8..2a2bc33f 100644 + local blocksize layout dasdtype dasdcyls junk2 read component disk size label junk < <(grep "^disk $1 " "$LAYOUT_FILE") - ### Disks should be block devices. -@@ -67,7 +68,8 @@ sync + cat >> "$LAYOUT_CODE" <> "$LAYOUT_CODE" < ${devnode}" -+ LogPrint "Device $dev found according to mapping hints (will be used for '$WORKFLOW')" ++ if is_write_protected "/sys/block/$dev"; then ++ LogPrint "Device $dev is designated as write-protected (needs manual configuration)" ++ MIGRATION_MODE='true' ++ else ++ LogPrint "Device $dev found according to mapping hints (will be used for '$WORKFLOW')" ++ fi else LogPrint "Device $dev has size $newsize bytes but $size bytes is expected (needs manual configuration)" MIGRATION_MODE='true' diff --git a/usr/share/rear/layout/prepare/default/300_map_disks.sh b/usr/share/rear/layout/prepare/default/300_map_disks.sh -index 2e90768c..468aa35c 100644 +index 3353daea..d7b45a84 100644 --- a/usr/share/rear/layout/prepare/default/300_map_disks.sh +++ b/usr/share/rear/layout/prepare/default/300_map_disks.sh -@@ -112,7 +112,14 @@ while read keyword orig_device orig_size junk ; do - # Continue with next original device when it is already used as source in the mapping file: - is_mapping_source "$orig_device" && continue +@@ -121,7 +121,14 @@ while read keyword orig_device orig_size junk ; do + # considered again during the subsequent "same size" tests: + excluded_target_device_names=() # First, try to find if there is a current disk with same name and same size as the original: - sysfs_device_name="$( get_sysfs_name "$orig_device" )" + # (possibly influenced by mapping hints if known) @@ -579,30 +611,44 @@ index 2e90768c..468aa35c 100644 current_device="/sys/block/$sysfs_device_name" if test -e $current_device ; then current_size=$( get_disk_size $sysfs_device_name ) -@@ -122,11 +129,16 @@ while read keyword orig_device orig_size junk ; do - # Continue with next one if the current one is already used as target in the mapping file: - is_mapping_target "$preferred_target_device_name" && continue +@@ -129,23 +136,28 @@ while read keyword orig_device orig_size junk ; do + # its matching actual block device (e.g. /dev/sda) must be determined: + preferred_target_device_name="$( get_device_name $current_device )" # Use the current one if it is of same size as the old one: - if test "$orig_size" -eq "$current_size" ; then + if has_mapping_hint "$orig_device" || test "$orig_size" -eq "$current_size" ; then - # Ensure the determined target device is really a block device: + # Ensure the target device is really a block device on the replacement hardware. + # Here the target device has same name as the original device which was a block device on the original hardware + # but it might perhaps happen that this device name is not a block device on the replacement hardware: if test -b "$preferred_target_device_name" ; then + if has_mapping_hint "$orig_device" ; then + mapping_reason="determined by mapping hint" + else + mapping_reason="same name and same size $current_size" + fi - add_mapping "$orig_device" "$preferred_target_device_name" -- LogPrint "Using $preferred_target_device_name (same name and same size) for recreating $orig_device" -+ LogPrint "Using $preferred_target_device_name ($mapping_reason) for recreating $orig_device" - # Continue with next original device in the LAYOUT_FILE: - continue - fi + # Do not map if the current one is already used as target in the mapping file: + if is_mapping_target "$preferred_target_device_name" ; then +- DebugPrint "Cannot use $preferred_target_device_name (same name and same size) for recreating $orig_device ($preferred_target_device_name already exists as target in $MAPPING_FILE)" ++ DebugPrint "Cannot use $preferred_target_device_name ($mapping_reason) for recreating $orig_device ($preferred_target_device_name already exists as target in $MAPPING_FILE)" + excluded_target_device_names+=( "$preferred_target_device_name" ) + else + # Ensure the determined target device is not write-protected: + if is_write_protected "$preferred_target_device_name" ; then +- DebugPrint "Cannot use $preferred_target_device_name (same name and same size) for recreating $orig_device ($preferred_target_device_name is write-protected)" ++ DebugPrint "Cannot use $preferred_target_device_name ($mapping_reason) for recreating $orig_device ($preferred_target_device_name is write-protected)" + excluded_target_device_names+=( "$preferred_target_device_name" ) + else + add_mapping "$orig_device" "$preferred_target_device_name" +- LogPrint "Using $preferred_target_device_name (same name and same size $current_size) for recreating $orig_device" ++ LogPrint "Using $preferred_target_device_name ($mapping_reason) for recreating $orig_device" + # Continue with next original device because the current one is now mapped: + continue + fi diff --git a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh -index 3ab7357d..da6ce64c 100644 +index 52a4b142..a3e21c48 100644 --- a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh +++ b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh -@@ -362,18 +362,27 @@ Log "Saving disk partitions." +@@ -377,18 +377,27 @@ Log "Saving disks and their partitions" if [[ $blockd == dasd* && "$ARCH" == "Linux-s390" ]] ; then devname=$(get_device_name $disk) @@ -641,10 +687,10 @@ index 3ab7357d..da6ce64c 100644 fi #FIXME: exclude *rpmb (Replay Protected Memory Block) for nvme*, mmcblk* and uas -@@ -387,11 +396,38 @@ Log "Saving disk partitions." - devname=$(get_device_name $disk) - devsize=$(get_disk_size ${disk#/sys/block/}) - disktype=$(parted -s $devname print | grep -E "Partition Table|Disk label" | cut -d ":" -f "2" | tr -d " ") +@@ -412,11 +421,38 @@ Log "Saving disks and their partitions" + # which can happen when /dev/sdX is an empty SD card slot without medium, + # see https://github.com/rear/rear/issues/2810 + test $disktype || LogPrintError "No partition label type for 'disk $devname' (may cause 'rear recover' failure)" - - echo "# Disk $devname" - echo "# Format: disk " @@ -686,10 +732,10 @@ index 3ab7357d..da6ce64c 100644 echo "# Format: part /dev/" extract_partitions "$devname" diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh -index 91c5ff73..4f5b8f6f 100644 +index cb33ac28..6dd43313 100644 --- a/usr/share/rear/lib/layout-functions.sh +++ b/usr/share/rear/lib/layout-functions.sh -@@ -93,6 +93,12 @@ abort_recreate() { +@@ -87,6 +87,12 @@ abort_recreate() { restore_original_file "$LAYOUT_FILE" } @@ -702,7 +748,7 @@ index 91c5ff73..4f5b8f6f 100644 # Test and log if a component $1 (type $2) needs to be recreated. create_component() { local device="$1" -@@ -722,6 +728,46 @@ get_block_size() { +@@ -734,6 +740,46 @@ get_block_size() { fi } diff --git a/sources b/sources index 9e2d976..210bb61 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -SHA512 (rear-2.6.tar.gz) = 4abf3ebc405a2058a80326a99d868eb8cad7d955d145c72766834c6446227b6f8bcdeb588f8b7323280e01012aa29c09d74331df6290a3f5f2de9125958776c1 +SHA512 (rear-2.7.tar.gz) = 9cabc6c5ddb01934740b73098a80bf1e0c03140ac1fce7c399752c1145bb743573033f6e34ae59a13e34fa873c8e992ce50316ee0260d23667614d5250daa087