import rear-2.4-8.el8_0

This commit is contained in:
CentOS Sources 2019-07-30 05:17:42 -04:00 committed by Andrew Lukoshko
commit 89f1c2e3bd
14 changed files with 2585 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
SOURCES/rear-2.4.tar.gz

1
.rear.metadata Normal file
View File

@ -0,0 +1 @@
9f6037ea582635ed78f1dffaa8a7cc5cdc7db37a SOURCES/rear-2.4.tar.gz

View File

@ -0,0 +1,15 @@
diff --git a/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh
new file mode 100644
index 00000000..4c4ded08
--- /dev/null
+++ b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh
@@ -0,0 +1,9 @@
+# 249_check_rhel_grub2_efi_package.sh
+
+is_true $USING_UEFI_BOOTLOADER || return # empty or 0 means NO UEFI
+
+(
+ VERBOSE=1
+ test -r /usr/lib/grub/x86_64-efi/moddep.lst
+ PrintIfError "WARNING: /usr/lib/grub/x86_64-efi/moddep.lst not found, grub2-mkimage will likely fail. Please install the grub2-efi-x64-modules package to fix this."
+)

View File

@ -0,0 +1,275 @@
diff --git a/usr/share/rear/backup/NETFS/default/500_make_backup.sh b/usr/share/rear/backup/NETFS/default/500_make_backup.sh
index 47266910..7170cda6 100644
--- a/usr/share/rear/backup/NETFS/default/500_make_backup.sh
+++ b/usr/share/rear/backup/NETFS/default/500_make_backup.sh
@@ -67,49 +67,99 @@ else
SPLIT_COMMAND="dd of=$backuparchive"
fi
+# Used by "tar" method to record which pipe command failed
+FAILING_BACKUP_PROG_FILE="$TMP_DIR/failing_backup_prog"
+FAILING_BACKUP_PROG_RC_FILE="$TMP_DIR/failing_backup_prog_rc"
+
LogPrint "Creating $BACKUP_PROG archive '$backuparchive'"
ProgressStart "Preparing archive operation"
(
case "$(basename ${BACKUP_PROG})" in
- # tar compatible programs here
- (tar)
- set_tar_features
- Log $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \
- --no-wildcards-match-slash --one-file-system \
- --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \
- $BACKUP_PROG_CREATE_NEWER_OPTIONS \
- ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
- -X $TMP_DIR/backup-exclude.txt -C / -c -f - \
- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE \| $BACKUP_PROG_CRYPT_OPTIONS BACKUP_PROG_CRYPT_KEY \| $SPLIT_COMMAND
- $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \
- --no-wildcards-match-slash --one-file-system \
- --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \
- $BACKUP_PROG_CREATE_NEWER_OPTIONS \
- ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
- -X $TMP_DIR/backup-exclude.txt -C / -c -f - \
- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE | $BACKUP_PROG_CRYPT_OPTIONS $BACKUP_PROG_CRYPT_KEY | $SPLIT_COMMAND
- ;;
- (rsync)
- # make sure that the target is a directory
- mkdir -p $v "$backuparchive" >&2
- Log $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \
- --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \
- $(cat $TMP_DIR/backup-include.txt) "$backuparchive"
- $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \
- --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \
- $(cat $TMP_DIR/backup-include.txt) "$backuparchive" >&2
- ;;
- (*)
- Log "Using unsupported backup program '$BACKUP_PROG'"
- Log $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
- $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \
- "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \
- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive
- $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
- $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \
- "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \
- $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive
- ;;
+ # tar compatible programs here
+ (tar)
+ set_tar_features
+ Log $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \
+ --no-wildcards-match-slash --one-file-system \
+ --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \
+ $BACKUP_PROG_CREATE_NEWER_OPTIONS \
+ ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
+ -X $TMP_DIR/backup-exclude.txt -C / -c -f - \
+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE \| $BACKUP_PROG_CRYPT_OPTIONS BACKUP_PROG_CRYPT_KEY \| $SPLIT_COMMAND
+
+ # Variable used to record the short name of piped commands in case of
+ # error, e.g. ( "tar" "cat" "dd" ) in case of unencrypted and unsplit backup.
+ backup_prog_shortnames=(
+ "$(basename $(echo "$BACKUP_PROG" | awk '{ print $1 }'))"
+ "$(basename $(echo "$BACKUP_PROG_CRYPT_OPTIONS" | awk '{ print $1 }'))"
+ "$(basename $(echo "$SPLIT_COMMAND" | awk '{ print $1 }'))"
+ )
+ for index in ${!backup_prog_shortnames[@]} ; do
+ [ -n "${backup_prog_shortnames[$index]}" ] || BugError "No computed shortname for pipe component $index"
+ done
+
+ $BACKUP_PROG $TAR_OPTIONS --sparse --block-number --totals --verbose \
+ --no-wildcards-match-slash --one-file-system \
+ --ignore-failed-read "${BACKUP_PROG_OPTIONS[@]}" \
+ $BACKUP_PROG_CREATE_NEWER_OPTIONS \
+ ${BACKUP_PROG_BLOCKS:+-b $BACKUP_PROG_BLOCKS} \
+ "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
+ -X $TMP_DIR/backup-exclude.txt -C / -c -f - \
+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE | \
+ \
+ $BACKUP_PROG_CRYPT_OPTIONS $BACKUP_PROG_CRYPT_KEY | \
+ \
+ $SPLIT_COMMAND
+ pipes_rc=( ${PIPESTATUS[@]} )
+
+ # Exit code logic:
+ # - never return rc=1 (this is reserved for "tar" warning about modified files)
+ # - process exit code in pipe's reverse order
+ # - if last command failed (e.g. "dd"), return an error
+ # - otherwise if previous command failed (e.g. "encrypt"), return an error
+ # ...
+ # - otherwise return "tar" exit code
+ #
+ # When an error occurs, record the program name in $FAILING_BACKUP_PROG_FILE
+ # and real exit code in $FAILING_BACKUP_PROG_RC_FILE.
+
+ let index=${#pipes_rc[@]}-1
+ while [ $index -ge 0 ] ; do
+ rc=${pipes_rc[$index]}
+ if [ $rc -ne 0 ] ; then
+ echo "${backup_prog_shortnames[$index]}" > $FAILING_BACKUP_PROG_FILE
+ echo "$rc" > $FAILING_BACKUP_PROG_RC_FILE
+ if [ $rc -eq 1 ] && [ "${backup_prog_shortnames[$index]}" != "tar" ] ; then
+ rc=2
+ fi
+ exit $rc
+ fi
+ # This pipe command succeeded, check the previous one
+ let index--
+ done
+ # This was a success
+ exit 0
+ ;;
+ (rsync)
+ # make sure that the target is a directory
+ mkdir -p $v "$backuparchive" >&2
+ Log $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \
+ --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \
+ $(cat $TMP_DIR/backup-include.txt) "$backuparchive"
+ $BACKUP_PROG --verbose "${BACKUP_RSYNC_OPTIONS[@]}" --one-file-system --delete \
+ --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded \
+ $(cat $TMP_DIR/backup-include.txt) "$backuparchive" >&2
+ ;;
+ (*)
+ Log "Using unsupported backup program '$BACKUP_PROG'"
+ Log $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
+ $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \
+ "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \
+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive
+ $BACKUP_PROG "${BACKUP_PROG_COMPRESS_OPTIONS[@]}" \
+ $BACKUP_PROG_OPTIONS_CREATE_ARCHIVE $TMP_DIR/backup-exclude.txt \
+ "${BACKUP_PROG_OPTIONS[@]}" $backuparchive \
+ $(cat $TMP_DIR/backup-include.txt) $RUNTIME_LOGFILE > $backuparchive
+ ;;
esac 2> "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log"
# important trick: the backup prog is the last in each case entry and the case .. esac is the last command
# in the (..) subshell. As a result the return code of the subshell is the return code of the backup prog!
@@ -121,44 +171,44 @@ sleep 1 # Give the backup software a good chance to start working
# return disk usage in bytes
function get_disk_used() {
- let "$(stat -f -c 'used=(%b-%f)*%S' $1)"
- echo $used
+ let "$(stat -f -c 'used=(%b-%f)*%S' $1)"
+ echo $used
}
# While the backup runs in a sub-process, display some progress information to the user.
# ProgressInfo texts have a space at the end to get the 'OK' from ProgressStop shown separated.
test "$PROGRESS_WAIT_SECONDS" || PROGRESS_WAIT_SECONDS=1
case "$( basename $BACKUP_PROG )" in
- (tar)
- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
- #blocks="$(stat -c %b ${backuparchive})"
- #size="$((blocks*512))"
- size="$(stat -c %s ${backuparchive}* | awk '{s+=$1} END {print s}')"
- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
- done
- ;;
- (rsync)
- # since we do not want to do a $(du -s) run every second we count disk usage instead
- # this obviously leads to wrong results in case something else is writing to the same
- # disk at the same time as is very likely with a networked file system. For local disks
- # this should be good enough and in any case this is only some eye candy.
- # TODO: Find a fast way to count the actual transfer data, preferrable getting the info from rsync.
- let old_disk_used="$(get_disk_used "$backuparchive")"
- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
- let disk_used="$(get_disk_used "$backuparchive")" size=disk_used-old_disk_used
- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
- done
- ;;
- (*)
- while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
- size="$(stat -c "%s" "$backuparchive")" || {
- kill -9 $BackupPID
- ProgressError
- Error "$(basename $BACKUP_PROG) failed to create the archive file"
- }
- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
- done
- ;;
+ (tar)
+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
+ #blocks="$(stat -c %b ${backuparchive})"
+ #size="$((blocks*512))"
+ size="$(stat -c %s ${backuparchive}* | awk '{s+=$1} END {print s}')"
+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
+ done
+ ;;
+ (rsync)
+ # since we do not want to do a $(du -s) run every second we count disk usage instead
+ # this obviously leads to wrong results in case something else is writing to the same
+ # disk at the same time as is very likely with a networked file system. For local disks
+ # this should be good enough and in any case this is only some eye candy.
+ # TODO: Find a fast way to count the actual transfer data, preferrable getting the info from rsync.
+ let old_disk_used="$(get_disk_used "$backuparchive")"
+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
+ let disk_used="$(get_disk_used "$backuparchive")" size=disk_used-old_disk_used
+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
+ done
+ ;;
+ (*)
+ while sleep $PROGRESS_WAIT_SECONDS ; kill -0 $BackupPID 2>/dev/null; do
+ size="$(stat -c "%s" "$backuparchive")" || {
+ kill -9 $BackupPID
+ ProgressError
+ Error "$(basename $BACKUP_PROG) failed to create the archive file"
+ }
+ ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec] "
+ done
+ ;;
esac
ProgressStop
transfertime="$((SECONDS-starttime))"
@@ -177,10 +227,12 @@ sleep 1
# everyone should see this warning, even if not verbose
case "$(basename $BACKUP_PROG)" in
(tar)
- if (( $backup_prog_rc == 1 )); then
- LogPrint "WARNING: $(basename $BACKUP_PROG) ended with return code $backup_prog_rc and below output:
+ if (( $backup_prog_rc != 0 )); then
+ prog="$(cat $FAILING_BACKUP_PROG_FILE)"
+ if (( $backup_prog_rc == 1 )); then
+ LogUserOutput "WARNING: $prog ended with return code 1 and below output:
---snip---
-$(grep '^tar: ' $RUNTIME_LOGFILE | sed -e 's/^/ /' | tail -n3)
+$(grep '^tar: ' "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" | sed -e 's/^/ /' | tail -n3)
----------
This means that files have been modified during the archiving
process. As a result the backup may not be completely consistent
@@ -188,16 +240,19 @@ or may not be a perfect copy of the system. Relax-and-Recover
will continue, however it is highly advisable to verify the
backup in order to be sure to safely recover this system.
"
- elif (( $backup_prog_rc > 1 )); then
- Error "$(basename $BACKUP_PROG) failed with return code $backup_prog_rc and below output:
+ else
+ rc=$(cat $FAILING_BACKUP_PROG_RC_FILE)
+ Error "$prog failed with return code $rc and below output:
---snip---
-$(grep '^tar: ' $RUNTIME_LOGFILE | sed -e 's/^/ /' | tail -n3)
+$(grep "^$prog: " "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" | sed -e 's/^/ /' | tail -n3)
----------
This means that the archiving process ended prematurely, or did
not even start. As a result it is unlikely you can recover this
system properly. Relax-and-Recover is therefore aborting execution.
"
- fi;;
+ fi
+ fi
+ ;;
(*)
if (( $backup_prog_rc > 0 )) ; then
Error "$(basename $BACKUP_PROG) failed with return code $backup_prog_rc
@@ -212,10 +267,12 @@ esac
tar_message="$(tac $RUNTIME_LOGFILE | grep -m1 '^Total bytes written: ')"
if [ $backup_prog_rc -eq 0 -a "$tar_message" ] ; then
- LogPrint "$tar_message in $transfertime seconds."
+ LogPrint "$tar_message in $transfertime seconds."
elif [ "$size" ]; then
- LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]"
+ LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]"
fi
### Copy progress log to backup media
cp $v "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log" "${opath}/${BACKUP_PROG_ARCHIVE}.log" >&2
+
+# vim: set et ts=4 sw=4:

1487
SOURCES/rear-bz1639705.patch Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,156 @@
diff --git a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh
index 9c4212ae..873e244e 100644
--- a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh
+++ b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh
@@ -54,9 +54,13 @@ Log "copy_as_is_executables = ${copy_as_is_executables[@]}"
# add them to the LIBS list if they are not yet included in the copied files:
Log "Adding required libraries of executables in all the copied files to LIBS"
local required_library=""
-for required_library in $( RequiredSharedOjects "${copy_as_is_executables[@]}" ) ; do
- # Skip when the required library was already actually copied by 'tar' above:
- grep -q "$required_library" $copy_as_is_filelist_file && continue
+for required_library in $( RequiredSharedObjects "${copy_as_is_executables[@]}" ) ; do
+ # Skip when the required library was already actually copied by 'tar' above.
+ # grep for a full line (copy_as_is_filelist_file contains 1 file name per line)
+ # to avoid that libraries get skipped when their library path is a substring
+ # of another already copied library, e.g. do not skip /path/to/lib when
+ # /other/path/to/lib was already copied, cf. https://github.com/rear/rear/pull/1976
+ grep -q "^${required_library}\$" $copy_as_is_filelist_file && continue
# Skip when the required library is already in LIBS:
IsInArray "$required_library" "${LIBS[@]}" && continue
Log "Adding required library '$required_library' to LIBS"
diff --git a/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh b/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh
index f7809bc7..ed02dea9 100644
--- a/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh
+++ b/usr/share/rear/build/GNU/Linux/390_copy_binaries_libraries.sh
@@ -62,16 +62,16 @@ Log "Binaries being copied: ${all_binaries[@]}"
copy_binaries "$ROOTFS_DIR/bin" "${all_binaries[@]}"
# Copy libraries:
-# It is crucial to also have all LIBS itself in all_libs because RequiredSharedOjects()
+# It is crucial to also have all LIBS itself in all_libs because RequiredSharedObjects()
# outputs only those libraries that are required by a library but not the library itself
# so that without all LIBS itself in all_libs those libraries in LIBS are missing that
# are not needed by a binary in all_binaries (all_binaries were already copied above).
-# RequiredSharedOjects outputs the required shared objects on STDOUT.
+# RequiredSharedObjects outputs the required shared objects on STDOUT.
# The output are absolute paths to the required shared objects.
# The output can also be symbolic links (also as absolute paths).
# In case of symbolic links only the link but not the link target is output.
# Therefore for symbolic links also the link target gets copied below.
-local all_libs=( "${LIBS[@]}" $( RequiredSharedOjects "${all_binaries[@]}" "${LIBS[@]}" ) )
+local all_libs=( "${LIBS[@]}" $( RequiredSharedObjects "${all_binaries[@]}" "${LIBS[@]}" ) )
Log "Libraries being copied: ${all_libs[@]}"
local lib=""
diff --git a/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh b/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh
index 9803200d..8cb27d78 100644
--- a/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh
+++ b/usr/share/rear/build/OPALPBA/Linux-i386/391_list_executable_dependencies.sh
@@ -8,7 +8,7 @@ if is_true $KEEP_BUILD_DIR; then
executable_dependencies_list="$TMP_DIR/executable-dependencies"
for executable in "${executables[@]}"; do
- dependents=( $(RequiredSharedOjects "$ROOTFS_DIR/$executable") )
+ dependents=( $(RequiredSharedObjects "$ROOTFS_DIR/$executable") )
echo "$executable: ${dependents[*]}"
done > "$executable_dependencies_list"
diff --git a/usr/share/rear/build/default/980_verify_rootfs.sh b/usr/share/rear/build/default/980_verify_rootfs.sh
index f8b3e8e9..d03e5f34 100644
--- a/usr/share/rear/build/default/980_verify_rootfs.sh
+++ b/usr/share/rear/build/default/980_verify_rootfs.sh
@@ -51,6 +51,11 @@ if test "$BACKUP" = "SESAM" ; then
# related libraries
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SESAM_LD_LIBRARY_PATH
fi
+if test "$BACKUP" = "NBU" ; then
+ # Use a NBU-specific LD_LIBRARY_PATH to find NBU libraries
+ # see https://github.com/rear/rear/issues/1974
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$NBU_LD_LIBRARY_PATH
+fi
# Actually test all binaries for 'not found' libraries.
# Find all binaries and libraries also e.g. those that are copied via COPY_AS_IS into other paths:
for binary in $( find $ROOTFS_DIR -type f -executable -printf '/%P\n' ); do
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 0f0d0675..796f228a 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -1467,7 +1467,8 @@ OBDR_BLOCKSIZE=2048
##
#
COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt )
-COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" "/usr/openv/netbackup/bin/xbp" )
+COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal )
+NBU_LD_LIBRARY_PATH="/usr/openv/lib"
PROGS_NBU=( )
##
diff --git a/usr/share/rear/lib/_input-output-functions.sh b/usr/share/rear/lib/_input-output-functions.sh
index d5eed43e..bdbf593d 100644
--- a/usr/share/rear/lib/_input-output-functions.sh
+++ b/usr/share/rear/lib/_input-output-functions.sh
@@ -324,10 +324,11 @@ function LogToSyslog () {
# see https://github.com/rear/rear/issues/729
function has_binary () {
for bin in $@ ; do
- # Suppress success output via stdout (but keep failure output via stderr):
- if type $bin 1>/dev/null ; then
- return 0
- fi
+ # Suppress success output via stdout which is crucial when has_binary is called
+ # in other functions that provide their intended function results via stdout
+ # to not pollute intended function results with intermixed has_binary stdout
+ # (e.g. the RequiredSharedObjects function) but keep failure output via stderr:
+ type $bin 1>/dev/null && return 0
done
return 1
}
diff --git a/usr/share/rear/lib/linux-functions.sh b/usr/share/rear/lib/linux-functions.sh
index 6a767367..3fb44e6d 100644
--- a/usr/share/rear/lib/linux-functions.sh
+++ b/usr/share/rear/lib/linux-functions.sh
@@ -100,13 +100,13 @@ function FindStorageDrivers () {
# Determine all required shared objects (shared/dynamic libraries)
# for programs and/or shared objects (binaries) specified in $@.
-# RequiredSharedOjects outputs the required shared objects on STDOUT.
+# RequiredSharedObjects outputs the required shared objects on STDOUT.
# The output are absolute paths to the required shared objects.
# The output can also be symbolic links (also as absolute paths).
# In case of symbolic links only the link but not the link target is output.
-function RequiredSharedOjects () {
- has_binary ldd || Error "Cannot run RequiredSharedOjects() because there is no ldd binary"
- Log "RequiredSharedOjects: Determining required shared objects"
+function RequiredSharedObjects () {
+ has_binary ldd || Error "Cannot run RequiredSharedObjects() because there is no ldd binary"
+ Log "RequiredSharedObjects: Determining required shared objects"
# It uses 'ldd' to determine all required shared objects because 'ldd' outputs
# also transitively required shared objects i.e. libraries needed by libraries,
# e.g. for /usr/sbin/parted also the libraries needed by the libparted library:
@@ -164,10 +164,11 @@ function RequiredSharedOjects () {
# 2. Line: " lib (mem-addr)" -> virtual library
# 3. Line: " lib => not found" -> print error to stderr
# 4. Line: " lib => /path/to/lib (mem-addr)" -> print $3 '/path/to/lib'
- # 5. Line: " /path/to/lib (mem-addr)" -> print $1 '/path/to/lib'
+ # 5. Line: " /path/to/lib => /path/to/lib2 (mem-addr)" -> print $3 '/path/to/lib2'
+ # 6. Line: " /path/to/lib (mem-addr)" -> print $1 '/path/to/lib'
ldd "$@" | awk ' /^\t.+ => not found/ { print "Shared object " $1 " not found" > "/dev/stderr" }
/^\t.+ => \// { print $3 }
- /^\t\// { print $1 } ' | sort -u
+ /^\t\// && !/ => / { print $1 } ' | sort -u
}
# Provide a shell, with custom exit-prompt and history
diff --git a/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh b/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh
index e01dcdbd..3cc29777 100644
--- a/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh
+++ b/usr/share/rear/prep/NBU/default/450_check_nbu_client_configured.sh
@@ -5,5 +5,6 @@
Log "Running: /usr/openv/netbackup/bin/bplist command"
LANG=C /usr/openv/netbackup/bin/bplist -l -s `date -d "-5 days" \
"+%m/%d/%Y"` / >/dev/null
-[ $? -gt 0 ] && LogPrint "WARNING: Netbackup bplist check failed with error code $?.
+rc=$?
+[ $rc -gt 0 ] && LogPrint "WARNING: Netbackup bplist check failed with error code ${rc}.
See $RUNTIME_LOGFILE for more details."

View File

@ -0,0 +1,17 @@
diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
index 4698618c..6b3194bd 100644
--- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
@@ -854,7 +854,11 @@ function handle_physdev () {
DebugPrint "$network_interface is a physical device"
- mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'."
+ if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then
+ mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )"
+ else
+ mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'."
+ fi
# Skip fake interfaces without MAC address
[ "$mac" != "00:00:00:00:00:00" ] || return $rc_error

View File

@ -0,0 +1,91 @@
diff --git a/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh b/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh
index e3ebacce..17cd1dd6 100644
--- a/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh
+++ b/usr/share/rear/skel/default/etc/scripts/system-setup.d/55-migrate-network-devices.sh
@@ -26,18 +26,47 @@ ORIGINAL_MACS=()
# The ORIGINAL_DEVICES collects the original device names:
ORIGINAL_DEVICES=()
# The ORIG_MACS_FILE contains lines of the form: network_interface mac_address
+
+# Temporary rear_mappings_mac used when interfaces have been renamed
+tmp_mac_mapping_file=$(mktemp)
+
# TODO: What should happen if there is no ORIG_MACS_FILE or when it is empty?
while read orig_dev orig_mac junk ; do
ORIGINAL_DEVICES=( "${ORIGINAL_DEVICES[@]}" "$orig_dev")
ORIGINAL_MACS=( "${ORIGINAL_MACS[@]}" "$orig_mac" )
- # Continue with the next original MAC address if it is found on the current system:
- ip link show | grep -q "$orig_mac" && continue
- MIGRATE_DEVNAMES=( "${MIGRATE_DEVNAMES[@]}" "$orig_dev" )
- MIGRATE_MACS=( "${MIGRATE_MACS[@]}" "$orig_mac" )
+ # Continue with the next original MAC address if it is found on the current
+ # system, otherwise we consider it needs migration:
+ new_dev=$( get_device_by_hwaddr "$orig_mac" )
+ if [ -n "$new_dev" ] ; then
+ [ "$new_dev" = "$orig_dev" ] && continue
+ # The device was found but has been renamed (it was customized in
+ # source system).
+ # Create a temporary mac mapping, we don't want finalize() to update
+ # the ifcfg-* files!
+ echo "$orig_mac $orig_mac $orig_dev" >> $tmp_mac_mapping_file
+ else
+ MIGRATE_MACS+=( "$orig_mac" )
+ fi
done < $ORIG_MACS_FILE
-# Skip this process if all MACs and network interfacs (devices) are accounted for:
-test ${#MIGRATE_MACS[@]} -eq 0 && test ${#MIGRATE_DEVNAMES[@]} -eq 0 && return 0
+
+if [ ${#MIGRATE_MACS[@]} -ne 0 ] ; then
+ # If some MACs were not found (MIGRATE_MACS not empty) then, we need a migration
+ :
+elif [ -s $tmp_mac_mapping_file ] ; then
+ # Else, if some devices were renamed, we also need a migration, but it will
+ # be automatic thanks to the $tmp_mac_mapping_file mapping file
+
+ # We do not need the $MAC_MAPPING_FILE file from the user, just overwrite it
+ # Later, we will remove that file to not have finalize() modify the ifcfg-*
+ # files.
+ mkdir -p $(dirname $MAC_MAPPING_FILE)
+ cp $tmp_mac_mapping_file $MAC_MAPPING_FILE
+else
+ # Skip this process if all MACs and network interfaces (devices) are accounted for
+ unset tmp_mac_mapping_file
+ return 0
+fi
# Find the MAC addresses that are now available.
# This is an array with values of the form "$dev $mac $driver"
@@ -74,7 +103,7 @@ done
# so that it is shown to the user what MAC address mappings will be done:
if read_and_strip_file $MAC_MAPPING_FILE ; then
while read orig_dev orig_mac junk ; do
- read_and_strip_file $MAC_MAPPING_FILE | grep -q "$orig_mac" && MANUAL_MAC_MAPPING=true
+ read_and_strip_file $MAC_MAPPING_FILE | grep -qw "^$orig_mac" && MANUAL_MAC_MAPPING=true
done < $ORIG_MACS_FILE
fi
@@ -237,7 +266,7 @@ if is_true $reload_udev ; then
echo -n "Reloading udev ... "
# Force udev to reload rules (as they were just changed)
# Failback to "udevadm control --reload" in case of problem (as specify in udevadm manpage in SLES12)
- # If nothing work, then wait 1 seconf delay. It should let the time for udev to detect changes in the rules files.
+ # If nothing work, then wait 1 second delay. It should let the time for udev to detect changes in the rules files.
udevadm control --reload-rules || udevadm control --reload || sleep 1
my_udevtrigger
sleep 1
@@ -252,5 +281,13 @@ if is_true $reload_udev ; then
fi
# A later script in finalize/* will also go over the MAC mappings file and
-# apply them to the files in the recovered system.
+# apply them to the files in the recovered system, unless we did the mapping
+# automatically, which means some device has been renamed and will probably
+# gets its name back upon reboot.
+if [ -s $tmp_mac_mapping_file ] ; then
+ rm $MAC_MAPPING_FILE $tmp_mac_mapping_file
+fi
+
+unset tmp_mac_mapping_file
+# vim: set et ts=4 sw=4:

View File

@ -0,0 +1,17 @@
diff --git a/usr/share/rear/lib/uefi-functions.sh b/usr/share/rear/lib/uefi-functions.sh
index e40f2ab0..95e6292d 100644
--- a/usr/share/rear/lib/uefi-functions.sh
+++ b/usr/share/rear/lib/uefi-functions.sh
@@ -46,6 +46,11 @@ function build_bootx86_efi {
Log "Did not find grub-mkimage (cannot build bootx86.efi)"
return
fi
- $gmkimage $v -O x86_64-efi -c $TMP_DIR/mnt/EFI/BOOT/embedded_grub.cfg -o $TMP_DIR/mnt/EFI/BOOT/BOOTX64.efi -p "/EFI/BOOT" part_gpt part_msdos fat ext2 normal chain boot configfile linux linuxefi multiboot jfs iso9660 usb usbms usb_keyboard video udf ntfs all_video gzio efi_gop reboot search test echo btrfs
+ # as not all Linux distro's have the same grub modules present we verify what we have (see also https://github.com/rear/rear/pull/2001)
+ grub_modules=""
+ for grub_module in part_gpt part_msdos fat ext2 normal chain boot configfile linux linuxefi multiboot jfs iso9660 usb usbms usb_keyboard video udf ntfs all_video gzio efi_gop reboot search test echo btrfs ; do
+ test "$( find /boot -type f -name "$grub_module.mod" 2>/dev/null )" && grub_modules="$grub_modules $grub_module"
+ done
+ $gmkimage $v -O x86_64-efi -c $TMP_DIR/mnt/EFI/BOOT/embedded_grub.cfg -o $TMP_DIR/mnt/EFI/BOOT/BOOTX64.efi -p "/EFI/BOOT" $grub_modules
StopIfError "Error occurred during $gmkimage of BOOTX64.efi"
}

View File

@ -0,0 +1,94 @@
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index aa6fc938..4bc59131 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -478,6 +478,9 @@ get_sysfs_name() {
### /dev/dm-3 -> /dev/mapper/system-tmp
### /dev/dm-4 -> /dev/mapper/oralun
### /dev/dm-5 -> /dev/mapper/oralunp1
+### /dev/sda -> /dev/sda
+###
+### Returns 0 on success, 1 if device is not existing
get_device_name() {
### strip common prefixes
local name=${1#/dev/}
@@ -485,47 +488,39 @@ get_device_name() {
contains_visible_char "$name" || BugError "Empty string passed to get_device_name"
- ### Translate dm-8 -> mapper/test
- local device dev_number mapper_number
- if [[ -d /sys/block/$name ]] ; then
+ if [[ "$name" =~ ^mapper/ ]]; then
+ echo "/dev/$name"
+ return 0
+ fi
+
+ if [[ -L "/dev/$name" ]] ; then
+ # Map vg/lv into dm-X, which will then be resolved later
+ name="$( basename $(readlink -f /dev/$name) )"
+ fi
+
+ if [[ "$name" =~ ^dm- ]] ; then
+ local device
if [[ -r /sys/block/$name/dm/name ]] ; then
### recent kernels have a dm subfolder
- echo "/dev/mapper/$( < /sys/block/$name/dm/name)";
- return 0
+ device="$( < /sys/block/$name/dm/name )"
else
- ### loop over all block devices
- dev_number=$( < /sys/block/$name/dev)
- for device in /dev/mapper/* ; do
- mapper_number=$(dmsetup info -c --noheadings -o major,minor ${device#/dev/mapper/} 2>/dev/null )
- if [ "$dev_number" = "$mapper_number" ] ; then
- echo "$device"
- return 0
- fi
- done
+ local dev_number=$( < /sys/block/$name/dev)
+ if [[ ! -r "$TMP_DIR/dmsetup_info.txt" ]] ; then
+ dmsetup info --noheadings -c -o name,major,minor > "$TMP_DIR/dmsetup_info.txt"
+ fi
+ device="$( awk -F ':' "/$dev_number\$/ { print \$1 }" < "$TMP_DIR/dmsetup_info.txt" )"
+ [[ -n "$device" ]] || BugError "No device returned for major/minor $dev_number"
fi
- fi
-
- ### Translate device name to mapper name. ex: vg/lv -> mapper/vg-lv
- if [[ "$name" =~ ^mapper/ ]]; then
- echo "/dev/$name"
+ echo "/dev/mapper/$device"
return 0
fi
- if my_dm=`readlink /dev/$name`; then
- for mapper_dev in /dev/mapper/*; do
- if mapper_dm=`readlink $mapper_dev`; then
- if [ "$my_dm" = "$mapper_dm" ]; then
- echo $mapper_dev
- return 0
- fi
- fi
- done
- fi
### handle cciss sysfs naming
name=${name//!//}
### just return the possibly nonexisting name
echo "/dev/$name"
+ [[ -r "/dev/$name" ]] && return 0
return 1
}
diff --git a/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh b/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh
index 0de4f60b..64276dfe 100644
--- a/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh
+++ b/usr/share/rear/rescue/GNU/Linux/270_fc_transport_info.sh
@@ -1,3 +1,6 @@
+# don't collect this anymore, this can be very slow
+return 0
+
# collect output from production SAN disks
find /sys/class/fc_transport -follow -maxdepth 6 \( -name model -o -name vendor -o -name rev -name state -o -name model_name -o -name size -o -name node_name \) 2>/dev/null| egrep -v 'driver|rport|power|drivers|devices' | xargs grep '.' > $VAR_DIR/recovery/fc_transport.info >&2

View File

@ -0,0 +1,40 @@
diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
index 6b3194bd..57e8777a 100644
--- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
@@ -562,7 +562,12 @@ function handle_team () {
local network_interface=$1
local sysfspath=/sys/class/net/$network_interface
- if [ "$( ethtool -i $network_interface | awk '$1 == "driver:" { print $2 }' )" != "team" ] ; then
+ if has_binary ethtool ; then
+ if [ "$( ethtool -i $network_interface | awk '$1 == "driver:" { print $2 }' )" != "team" ] ; then
+ return $rc_error
+ fi
+ else
+ LogPrintError "Couldn't determine if network interface '$network_interface' is a Team, skipping."
return $rc_error
fi
@@ -854,10 +859,17 @@ function handle_physdev () {
DebugPrint "$network_interface is a physical device"
- if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then
- mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )"
- else
- mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'."
+ local mac=""
+
+ if has_binary ethtool ; then
+ mac="$( ethtool -P $network_interface 2>/dev/null | awk '{ print $NF }' )"
+ fi
+ if [ -z "$mac" ] ; then
+ if [ -e $sysfspath/bonding_slave/perm_hwaddr ] ; then
+ mac="$( cat $sysfspath/bonding_slave/perm_hwaddr )"
+ else
+ mac="$( cat $sysfspath/address )" || BugError "Could not read a MAC address for '$network_interface'."
+ fi
fi
# Skip fake interfaces without MAC address
[ "$mac" != "00:00:00:00:00:00" ] || return $rc_error

View File

@ -0,0 +1,85 @@
diff --git a/usr/share/rear/conf/GNU/Linux.conf b/usr/share/rear/conf/GNU/Linux.conf
index 5b9343b9..f0c44381 100644
--- a/usr/share/rear/conf/GNU/Linux.conf
+++ b/usr/share/rear/conf/GNU/Linux.conf
@@ -184,6 +184,8 @@ env
w
dosfslabel
sysctl
+blockdev
+lsblk
)
# the lib* serves to cover both 32bit and 64bit libraries!
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index eb45115b..af1069ea 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -365,7 +365,9 @@ get_partition_start() {
local disk_name=$1
local start_block start
- local block_size=$(get_block_size ${disk_name%/*})
+ # When reading /sys/block/.../start or "dmsetup table", output is always in
+ # 512 bytes blocks
+ local block_size=512
if [[ -r /sys/block/$disk_name/start ]] ; then
start_block=$(< $path/start)
@@ -548,11 +550,32 @@ get_device_mapping() {
}
# Get the size in bytes of a disk/partition.
+# For disks, use "sda" as argument.
# For partitions, use "sda/sda1" as argument.
get_disk_size() {
local disk_name=$1
+ # When a partition is specified (e.g. sda/sda1)
+ # then it has to read /sys/block/sda/sda1/size in the old code below.
+ # In contrast the get_block_size() function below is different
+ # because it is non-sense asking for block size of a partition,
+ # so that the get_block_size() function below is stripping everything
+ # in front of the blockdev basename (e.g. /some/path/sda -> sda)
+ # cf. https://github.com/rear/rear/pull/1885#discussion_r207900308
+
+ # Preferably use blockdev, see https://github.com/rear/rear/issues/1884
+ if has_binary blockdev; then
+ # ${disk_name##*/} translates 'sda/sda1' into 'sda1' and 'sda' into 'sda'
+ blockdev --getsize64 /dev/${disk_name##*/} && return
+ # If blockdev fails do not error out but fall through to the old code below
+ # because blockdev fails e.g. for a CDROM device when no DVD or ISO is attached to
+ # cf. https://github.com/rear/rear/pull/1885#issuecomment-410676283
+ # and https://github.com/rear/rear/pull/1885#issuecomment-410697398
+ fi
- local block_size=$(get_block_size ${disk_name%/*})
+ # Linux always considers sectors to be 512 bytes long. See the note in the
+ # kernel source, specifically, include/linux/types.h regarding the sector_t
+ # type for details.
+ local block_size=512
retry_command test -r /sys/block/$disk_name/size || Error "Could not determine size of disk $disk_name"
@@ -565,9 +588,20 @@ get_disk_size() {
# Get the block size of a disk.
get_block_size() {
+ local disk_name="${1##*/}" # /some/path/sda -> sda
+
+ # Preferably use blockdev, see https://github.com/rear/rear/issues/1884
+ if has_binary blockdev; then
+ blockdev --getss /dev/$disk_name && return
+ # If blockdev fails do not error out but fall through to the old code below
+ # because blockdev fails e.g. for a CDROM device when no DVD or ISO is attached to
+ # cf. https://github.com/rear/rear/pull/1885#issuecomment-410676283
+ # and https://github.com/rear/rear/pull/1885#issuecomment-410697398
+ fi
+
# Only newer kernels have an interface to get the block size
- if [ -r /sys/block/$1/queue/logical_block_size ] ; then
- echo $( < /sys/block/$1/queue/logical_block_size)
+ if [ -r /sys/block/$disk_name/queue/logical_block_size ] ; then
+ echo $( < /sys/block/$disk_name/queue/logical_block_size)
else
echo "512"
fi

View File

@ -0,0 +1,50 @@
diff --git a/usr/share/rear/finalize/Linux-ppc64le/680_install_PPC_bootlist.sh b/usr/share/rear/finalize/Linux-ppc64le/680_install_PPC_bootlist.sh
index bf1db404..49480f38 100644
--- a/usr/share/rear/finalize/Linux-ppc64le/680_install_PPC_bootlist.sh
+++ b/usr/share/rear/finalize/Linux-ppc64le/680_install_PPC_bootlist.sh
@@ -9,9 +9,12 @@ if grep -q "PowerNV" /proc/cpuinfo || grep -q "emulated by qemu" /proc/cpuinfo ;
fi
# Look for the PPC PReP Boot Partition.
-part=$( awk -F ' ' '/^part / {if ($6 ~ /prep/) {print $7}}' $LAYOUT_FILE )
+part_list=$( awk -F ' ' '/^part / {if ($6 ~ /prep/) {print $7}}' $LAYOUT_FILE )
-if [ -n "$part" ]; then
+# All the possible boot devices
+boot_list=()
+
+for part in $part_list ; do
LogPrint "PPC PReP Boot partition found: $part"
# Using $LAYOUT_DEPS file to find the disk device containing the partition.
@@ -29,14 +32,23 @@ if [ -n "$part" ]; then
# If yes, get the list of path which are part of the multipath device.
# Limit to the first 5 PATH (see #876)
if dmsetup ls --target multipath | grep -w ${bootdev#/dev/mapper/} >/dev/null 2>&1; then
- LogPrint "Limiting bootlist to 5 entries..."
- bootlist_path=$(dmsetup deps $bootdev -o devname | awk -F: '{gsub (" ",""); gsub("\\(","/dev/",$2) ; gsub("\\)"," ",$2) ; print $2}' | cut -d" " -f-5)
- LogPrint "Set LPAR bootlist to $bootlist_path"
- bootlist -m normal $bootlist_path
+ LogPrint "Limiting bootlist to 5 entries as a maximum..."
+ boot_list+=( $(dmsetup deps $bootdev -o devname | awk -F: '{gsub (" ",""); gsub("\\(","/dev/",$2) ; gsub("\\)"," ",$2) ; print $2}' | cut -d" " -f-5) )
else
# Single Path device found
- LogPrint "Set LPAR bootlist to $bootdev"
- bootlist -m normal $bootdev
+ boot_list+=( $bootdev )
fi
- LogIfError "Unable to set bootlist. You will have to start in SMS to set it up manually."
+done
+
+if [[ ${#boot_list[@]} -gt 5 ]]; then
+ LogPrint "Too many entries for bootlist command, limiting to first 5 entries..."
+ boot_list=( ${boot_list[@]:0:5} )
fi
+
+if [[ ${#boot_list[@]} -gt 0 ]]; then
+ LogPrint "Set LPAR bootlist to '${boot_list[@]}'"
+ bootlist -m normal $boot_list
+ LogPrintIfError "Unable to set bootlist. You will have to start in SMS to set it up manually."
+fi
+
+# vim: set et ts=4 sw=4:

256
SPECS/rear.spec Normal file
View File

@ -0,0 +1,256 @@
%define debug_package %{nil}
Summary: Relax-and-Recover is a Linux disaster recovery and system migration tool
Name: rear
Version: 2.4
Release: 8%{?dist}
License: GPLv3
Group: Applications/File
URL: http://relax-and-recover.org/
Source0: https://github.com/rear/rear/archive/%{version}.tar.gz#/rear-%{version}.tar.gz
Patch4: rear-bz1492177-warning.patch
Patch6: rear-rhbz1610638.patch
Patch7: rear-rhbz1610647.patch
Patch8: rear-bz1652828-bz1652853.patch
Patch9: rear-bz1631183.patch
Patch10: rear-bz1639705.patch
Patch11: rear-bz1653214.patch
Patch12: rear-bz1659137.patch
patch13: rear-bz1663515.patch
Patch14: rear-bz1672938.patch
Patch15: rear-bz1685166.patch
ExcludeArch: s390x
ExcludeArch: s390
### Dependencies on all distributions
BuildRequires: asciidoc
Requires: binutils
Requires: ethtool
Requires: gzip
Requires: iputils
Requires: parted
Requires: tar
Requires: openssl
Requires: gawk
Requires: attr
Requires: bc
### If you require NFS, you may need the below packages
#Requires: nfsclient portmap rpcbind
### We drop LSB requirements because it pulls in too many dependencies
### The OS is hardcoded in /etc/rear/os.conf instead
#Requires: redhat-lsb
### Required for Bacula/MySQL support
#Requires: bacula-mysql
### Required for OBDR
#Requires: lsscsi sg3_utils
### Optional requirement
#Requires: cfg2html
%ifarch x86_64 i686
Requires: syslinux
%endif
%ifarch ppc ppc64
Requires: yaboot
%endif
Requires: crontabs
Requires: iproute
#Requires: mkisofs
Requires: xorriso
# mingetty is not available anymore with RHEL 7 (use agetty instead via systemd)
# Note that CentOS also has rhel defined so there is no need to use centos
%if 0%{?rhel} && 0%{?rhel} > 6
Requires: util-linux
%else
Requires: mingetty
Requires: util-linux
%endif
### The rear-snapshot package is no more
#Obsoletes: rear-snapshot
%description
Relax-and-Recover is the leading Open Source disaster recovery and system
migration solution. It comprises of a modular
frame-work and ready-to-go workflows for many common situations to produce
a bootable image and restore from backup using this image. As a benefit,
it allows to restore to different hardware and can therefore be used as
a migration tool as well.
Currently Relax-and-Recover supports various boot media (incl. ISO, PXE,
OBDR tape, USB or eSATA storage), a variety of network protocols (incl.
sftp, ftp, http, nfs, cifs) as well as a multitude of backup strategies
(incl. IBM TSM, HP DataProtector, Symantec NetBackup, EMC NetWorker,
Bacula, Bareos, BORG, Duplicity, rsync).
Relax-and-Recover was designed to be easy to set up, requires no maintenance
and is there to assist when disaster strikes. Its setup-and-forget nature
removes any excuse for not having a disaster recovery solution implemented.
Professional services and support are available.
%pre
if [ $1 -gt 1 ] ; then
# during upgrade remove obsolete directories
%{__rm} -rf %{_datadir}/rear/output/NETFS
fi
%prep
%setup
%patch4 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
echo "30 1 * * * root /usr/sbin/rear checklayout || /usr/sbin/rear mkrescue" >rear.cron
### Add a specific os.conf so we do not depend on LSB dependencies
%{?fedora:echo -e "OS_VENDOR=Fedora\nOS_VERSION=%{?fedora}" >etc/rear/os.conf}
%{?rhel:echo -e "OS_VENDOR=RedHatEnterpriseServer\nOS_VERSION=%{?rhel}" >etc/rear/os.conf}
%build
# asciidoc writes a timestamp to files it produces, based on the last
# modified date of the source file, but is sensible to the timezone.
# This makes the results differ according to the timezone of the build machine
# and spurious changes will be seen.
# Set the timezone to UTC as a workaround.
# https://wiki.debian.org/ReproducibleBuilds/TimestampsInDocumentationGeneratedByAsciidoc
TZ=UTC %{__make} -C doc
%install
%{__rm} -rf %{buildroot}
%{__make} install DESTDIR="%{buildroot}"
%{__install} -Dp -m0644 rear.cron %{buildroot}%{_sysconfdir}/cron.d/rear
%files
%defattr(-, root, root, 0755)
%doc MAINTAINERS COPYING README.adoc doc/*.txt doc/user-guide/relax-and-recover-user-guide.html
%doc %{_mandir}/man8/rear.8*
%config(noreplace) %{_sysconfdir}/cron.d/rear
%config(noreplace) %{_sysconfdir}/rear/
%config(noreplace) %{_sysconfdir}/rear/cert/
%{_datadir}/rear/
%{_localstatedir}/lib/rear/
%{_sbindir}/rear
%changelog
* Tue May 28 2019 Pavel Cahyna <pcahyna@redhat.com> - 2.4-8
- Apply upstream PR2065 (record permanent MAC address for team members)
Resolves: rhbz1685178
* Tue May 28 2019 Pavel Cahyna <pcahyna@redhat.com> - 2.4-7
- Apply upstream PR2034 (multipath optimizations for lots of devices)
* Mon Jan 14 2019 Pavel Cahyna <pcahyna@redhat.com> - 2.4-6
- Require xorriso instead of genisoimage, it is now the preferred method
and supports files over 4GB in size.
- Apply upstream PR2004 (support for custom network interface naming)
- Backport upstream PR2001 (UEFI support broken on Fedora 29 and RHEL 8)
* Thu Dec 13 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.4-4
- Backport fixes for upstream bugs 1974 and 1975
- Backport fix for upstream bug 1913 (backup succeeds in case of tar error)
- Backport fix for upstream bug 1926 (support for LACP bonding and teaming)
- Apply upstream PR1954 (record permanent MAC address for bond members)
* Thu Aug 09 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.4-3
- Merge some spec changes from Fedora.
- Apply upstream patch PR1887
LPAR/PPC64 bootlist is incorrectly set when having multiple 'prep' partitions
- Apply upstream patch PR1885
Partition information recorded is unexpected when disk has 4K block size
* Wed Jul 18 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.4-2
- Build and install the HTML user guide. #1418459
* Wed Jun 27 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.4-1
- Rebase to version 2.4, drop patches integrated upstream
Resolves #1534646 #1484051 #1498828 #1571266 #1496518
* Wed Feb 14 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.00-6
- Ensure that NetBackup is started automatically upon recovery (PR#1544)
Also do not kill daemons spawned by sysinit.service at the service's end
(PR#1610, applies to NetBackup and also to dhclient)
Resolves #1506231
- Print a warning if grub2-mkimage is about to fail and suggest what to do.
bz#1492177
- Update the patch for #1388653 to the one actually merged upstream (PR1418)
* Fri Jan 12 2018 Pavel Cahyna <pcahyna@redhat.com> - 2.00-5
- cd to the correct directory before md5sum to fix BACKUP_INTEGRITY_CHECK.
Upstream PR#1685, bz1532676
* Mon Oct 23 2017 Pavel Cahyna <pcahyna@redhat.com> - 2.00-4
- Retry get_disk_size to fix upstream #1370, bz1388653
* Wed Sep 13 2017 Pavel Cahyna <pcahyna@redhat.com> - 2.00-3
- Fix rear mkrescue on systems w/o UEFI. Upstream PR#1481 issue#1478
- Resolves: #1479002
* Wed May 17 2017 Jakub Mazanek <jmazanek@redhat.com> - 2.00-2
- Excluding Archs s390 and s390x
- Related #1355667
* Mon Feb 20 2017 Jakub Mazanek <jmazanek@redhat.com> - 2.00-1
- Rebase to version 2.00
- Resolves #1355667
* Tue Jul 19 2016 Petr Hracek <phracek@redhat.com> - 1.17.2-6
- Replace experimental grep -P with grep -E
Resolves: #1290205
* Wed Mar 23 2016 Petr Hracek <phracek@redhat.com> - 1.17.2-5
- Remove backuped patched files
Related: #1283930
* Wed Mar 23 2016 Petr Hracek <phracek@redhat.com> - 1.17.2-4
- Rear recovery over teaming interface will not work
Resolves: #1283930
* Tue Mar 08 2016 Petr Hracek <phracek@redhat.com> - 1.17.2-3
- Replace experimental grep -P with grep -E
Resolves: #1290205
* Tue Feb 23 2016 Petr Hracek <phracek@redhat.com> - 1.17.2-2
- rear does not require syslinux
- changing to arch package so that syslinux is installed
- Resolves: #1283927
* Mon Sep 14 2015 Petr Hracek <phracek@redhat.com> - 1.17.2-1
- New upstream release 1.17.2
Related: #1059196
* Wed May 13 2015 Petr Hracek <phracek@redhat.com> 1.17.0-2
- Fix Source tag
Related: #1059196
* Mon May 04 2015 Petr Hracek <phracek@redhat.com> 1.17.0-1
- Initial package for RHEL 7
Resolves: #1059196
* Fri Oct 17 2014 Gratien D'haese <gratien.dhaese@gmail.com>
- added the suse_version lines to identify the corresponding OS_VERSION
* Fri Jun 20 2014 Gratien D'haese <gratien.dhaese@gmail.com>
- add %%pre section
* Thu Apr 11 2013 Gratien D'haese <gratien.dhaese@gmail.com>
- changes Source
* Thu Jun 03 2010 Dag Wieers <dag@wieers.com>
- Initial package. (using DAR)