import mdadm-4.2-rc2_1.el9.2
This commit is contained in:
commit
1ba7e7e521
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
SOURCES/mdadm-4.2-rc2.tar.xz
|
1
.mdadm.metadata
Normal file
1
.mdadm.metadata
Normal file
@ -0,0 +1 @@
|
||||
b6e99ec4b0c3953505dc2e6ef6e8f2a71a26207d SOURCES/mdadm-4.2-rc2.tar.xz
|
11
SOURCES/disable-Werror.patch
Normal file
11
SOURCES/disable-Werror.patch
Normal file
@ -0,0 +1,11 @@
|
||||
--- mdadm/Makefile.orig 2021-07-28 21:39:23.887433859 +0800
|
||||
+++ mdadm/Makefile 2021-07-28 21:39:37.989432841 +0800
|
||||
@@ -50,7 +50,7 @@
|
||||
CC := $(CROSS_COMPILE)gcc
|
||||
endif
|
||||
CXFLAGS ?= -ggdb
|
||||
-CWFLAGS = -Wall -Werror -Wstrict-prototypes -Wextra -Wno-unused-parameter
|
||||
+CWFLAGS = -Wall -Wstrict-prototypes -Wextra -Wno-unused-parameter
|
||||
ifdef WARN_UNUSED
|
||||
CWFLAGS += -Wp,-D_FORTIFY_SOURCE=2 -O3
|
||||
endif
|
23
SOURCES/mdadm-2.5.2-static.patch
Normal file
23
SOURCES/mdadm-2.5.2-static.patch
Normal file
@ -0,0 +1,23 @@
|
||||
--- mdadm-3.2.1/Makefile.static 2021-01-11 15:46:47.292126848 +0800
|
||||
+++ mdadm-3.2.1/Makefile 2021-01-11 15:46:10.720192519 +0800
|
||||
@@ -248,16 +248,16 @@
|
||||
install : install-bin install-man install-udev
|
||||
|
||||
install-static : mdadm.static install-man
|
||||
- $(INSTALL) -D $(STRIP) -m 755 mdadm.static $(DESTDIR)$(BINDIR)/mdadm
|
||||
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.static $(DESTDIR)$(BINDIR)/mdadm.static
|
||||
|
||||
install-tcc : mdadm.tcc install-man
|
||||
- $(INSTALL) -D $(STRIP) -m 755 mdadm.tcc $(DESTDIR)$(BINDIR)/mdadm
|
||||
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.tcc $(DESTDIR)$(BINDIR)/mdadm.tcc
|
||||
|
||||
install-uclibc : mdadm.uclibc install-man
|
||||
- $(INSTALL) -D $(STRIP) -m 755 mdadm.uclibc $(DESTDIR)$(BINDIR)/mdadm
|
||||
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.uclibc $(DESTDIR)$(BINDIR)/mdadm.uclibc
|
||||
|
||||
install-klibc : mdadm.klibc install-man
|
||||
- $(INSTALL) -D $(STRIP) -m 755 mdadm.klibc $(DESTDIR)$(BINDIR)/mdadm
|
||||
+ $(INSTALL) -D $(STRIP) -m 755 mdadm.klibc $(DESTDIR)$(BINDIR)/mdadm.klibc
|
||||
|
||||
install-man: mdadm.8 md.4 mdadm.conf.5 mdmon.8
|
||||
$(INSTALL) -D -m 644 mdadm.8 $(DESTDIR)$(MAN8DIR)/mdadm.8
|
13
SOURCES/mdadm-3.3-udev.patch
Normal file
13
SOURCES/mdadm-3.3-udev.patch
Normal file
@ -0,0 +1,13 @@
|
||||
--- mdadm/udev-md-raid-assembly.rules~ 2018-07-09 18:24:27.450774446 +0800
|
||||
+++ mdadm/udev-md-raid-assembly.rules 2018-07-09 18:25:12.630735637 +0800
|
||||
@@ -5,6 +5,10 @@
|
||||
ENV{ANACONDA}=="?*", GOTO="md_inc_end"
|
||||
# assemble md arrays
|
||||
|
||||
+# In Fedora we handle the raid components in 65-md-incremental.rules so that
|
||||
+# we can do things like honor anaconda command line options and such
|
||||
+GOTO="md_inc_end"
|
||||
+
|
||||
SUBSYSTEM!="block", GOTO="md_inc_end"
|
||||
|
||||
# skip non-initialized devices
|
60
SOURCES/mdadm-raid-check-sysconfig
Normal file
60
SOURCES/mdadm-raid-check-sysconfig
Normal file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Configuration file for /etc/cron.weekly/raid-check
|
||||
#
|
||||
# options:
|
||||
# ENABLED - must be yes in order for the raid check to proceed
|
||||
# CHECK - can be either check or repair depending on the type of
|
||||
# operation the user desires. A check operation will scan
|
||||
# the drives looking for bad sectors and automatically
|
||||
# repairing only bad sectors. If it finds good sectors that
|
||||
# contain bad data (meaning that the data in a sector does
|
||||
# not agree with what the data from another disk indicates
|
||||
# the data should be, for example the parity block + the other
|
||||
# data blocks would cause us to think that this data block
|
||||
# is incorrect), then it does nothing but increments the
|
||||
# counter in the file /sys/block/$dev/md/mismatch_count.
|
||||
# This allows the sysadmin to inspect the data in the sector
|
||||
# and the data that would be produced by rebuilding the
|
||||
# sector from redundant information and pick the correct
|
||||
# data to keep. The repair option does the same thing, but
|
||||
# when it encounters a mismatch in the data, it automatically
|
||||
# updates the data to be consistent. However, since we really
|
||||
# don't know whether it's the parity or the data block that's
|
||||
# correct (or which data block in the case of raid1), it's
|
||||
# luck of the draw whether or not the user gets the right
|
||||
# data instead of the bad data. This option is the default
|
||||
# option for devices not listed in either CHECK_DEVS or
|
||||
# REPAIR_DEVS.
|
||||
# CHECK_DEVS - a space delimited list of devs that the user specifically
|
||||
# wants to run a check operation on.
|
||||
# REPAIR_DEVS - a space delimited list of devs that the user
|
||||
# specifically wants to run a repair on.
|
||||
# SKIP_DEVS - a space delimited list of devs that should be skipped
|
||||
# NICE - Change the raid check CPU and IO priority in order to make
|
||||
# the system more responsive during lengthy checks. Valid
|
||||
# values are high, normal, low, idle.
|
||||
# MAXCONCURENT - Limit the number of devices to be checked at a time.
|
||||
# By default all devices will be checked at the same time.
|
||||
#
|
||||
# Note: the raid-check script intentionaly runs last in the cron.weekly
|
||||
# sequence. This is so we can wait for all the resync operations to complete
|
||||
# and then check the mismatch_count on each array without unduly delaying
|
||||
# other weekly cron jobs. If any arrays have a non-0 mismatch_count after
|
||||
# the check completes, we echo a warning to stdout which will then me emailed
|
||||
# to the admin as long as mails from cron jobs have not been redirected to
|
||||
# /dev/null. We do not wait for repair operations to complete as the
|
||||
# md stack will correct any mismatch_cnts automatically.
|
||||
#
|
||||
# Note2: you can not use symbolic names for the raid devices, such as you
|
||||
# /dev/md/root. The names used in this file must match the names seen in
|
||||
# /proc/mdstat and in /sys/block.
|
||||
|
||||
ENABLED=yes
|
||||
CHECK=check
|
||||
NICE=low
|
||||
# To check devs /dev/md0 and /dev/md3, use "md0 md3"
|
||||
CHECK_DEVS=""
|
||||
REPAIR_DEVS=""
|
||||
SKIP_DEVS=""
|
||||
MAXCONCURRENT=
|
1
SOURCES/mdadm.conf
Normal file
1
SOURCES/mdadm.conf
Normal file
@ -0,0 +1 @@
|
||||
d /run/mdadm 0710 root root -
|
67
SOURCES/mdadm.rules
Normal file
67
SOURCES/mdadm.rules
Normal file
@ -0,0 +1,67 @@
|
||||
# This file causes block devices with Linux RAID (mdadm) signatures to
|
||||
# automatically cause mdadm to be run.
|
||||
# See udev(8) for syntax
|
||||
|
||||
# Don't process any events if anaconda is running as anaconda brings up
|
||||
# raid devices manually
|
||||
ENV{ANACONDA}=="?*", GOTO="md_end"
|
||||
|
||||
# Also don't process disks that are slated to be a multipath device
|
||||
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end"
|
||||
|
||||
# We process add events on block devices (since they are ready as soon as
|
||||
# they are added to the system), but we must process change events as well
|
||||
# on any dm devices (like LUKS partitions or LVM logical volumes) and on
|
||||
# md devices because both of these first get added, then get brought live
|
||||
# and trigger a change event. The reason we don't process change events
|
||||
# on bare hard disks is because if you stop all arrays on a disk, then
|
||||
# run fdisk on the disk to change the partitions, when fdisk exits it
|
||||
# triggers a change event, and we want to wait until all the fdisks on
|
||||
# all member disks are done before we do anything. Unfortunately, we have
|
||||
# no way of knowing that, so we just have to let those arrays be brought
|
||||
# up manually after fdisk has been run on all of the disks.
|
||||
|
||||
# First, process all add events (md and dm devices will not really do
|
||||
# anything here, just regular disks, and this also won't get any imsm
|
||||
# array members either)
|
||||
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
IMPORT{program}="/sbin/mdadm -I $env{DEVNAME} --export $devnode --offroot $${DEVLINKS}"
|
||||
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer"
|
||||
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
|
||||
ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
|
||||
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
|
||||
ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
RUN+="/sbin/mdadm -If $name"
|
||||
|
||||
# Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline
|
||||
IMPORT{cmdline}="noiswmd"
|
||||
IMPORT{cmdline}="nodmraid"
|
||||
ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
|
||||
ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
|
||||
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
|
||||
RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
||||
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
|
||||
ENV{ID_FS_TYPE}=="isw_raid_member", \
|
||||
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
|
||||
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
|
||||
ENV{ID_FS_TYPE}=="isw_raid_member", \
|
||||
RUN+="/sbin/mdadm -If $name"
|
||||
LABEL="md_imsm_inc_end"
|
||||
|
||||
# Next make sure that this isn't a dm device we should skip for some reason
|
||||
ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end"
|
||||
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end"
|
||||
ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end"
|
||||
KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
||||
LABEL="dm_change_end"
|
||||
|
||||
# Finally catch any nested md raid arrays. If we brought up an md raid
|
||||
# array that's part of another md raid array, it won't be ready to be used
|
||||
# until the change event that occurs when it becomes live
|
||||
KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
||||
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
||||
|
||||
LABEL="md_end"
|
5
SOURCES/mdadm_event.conf
Normal file
5
SOURCES/mdadm_event.conf
Normal file
@ -0,0 +1,5 @@
|
||||
# Save /proc/mdstat in case of crash in mdadm/mdmon
|
||||
|
||||
EVENT=post-create component=mdadm
|
||||
cat /proc/mdstat >> mdstat_data
|
||||
echo "Saved output of /proc/mdstat"
|
12
SOURCES/mdmonitor.service
Normal file
12
SOURCES/mdmonitor.service
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Software RAID monitoring and management
|
||||
ConditionPathExists=/etc/mdadm.conf
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
PIDFile=/run/mdadm/mdadm.pid
|
||||
EnvironmentFile=-/etc/sysconfig/mdmonitor
|
||||
ExecStart=/sbin/mdadm --monitor --scan --syslog -f --pid-file=/run/mdadm/mdadm.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
135
SOURCES/raid-check
Normal file
135
SOURCES/raid-check
Normal file
@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script reads it's configuration from /etc/sysconfig/raid-check
|
||||
# Please use that file to enable/disable this script or to set the
|
||||
# type of check you wish performed.
|
||||
|
||||
# We might be on a kernel with no raid support at all, exit if so
|
||||
[ -f /proc/mdstat ] || exit 0
|
||||
|
||||
# and exit if we haven't been set up properly
|
||||
[ -f /etc/sysconfig/raid-check ] || exit 0
|
||||
. /etc/sysconfig/raid-check
|
||||
|
||||
# Wait until no more than arg1 arrays in arg2 list are busy
|
||||
waitbusy() {
|
||||
local threshold=$(($1 + 1))
|
||||
local dev_list="$2"
|
||||
while true
|
||||
do
|
||||
local busy=0
|
||||
local dev=""
|
||||
for dev in $dev_list; do
|
||||
local sync_action=`cat /sys/block/$dev/md/sync_action`
|
||||
if [ "$sync_action" != "idle" ]; then
|
||||
let busy++
|
||||
fi
|
||||
done
|
||||
[ $busy -lt $threshold ] && break
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
[ "$ENABLED" != "yes" ] && exit 0
|
||||
|
||||
case "$CHECK" in
|
||||
check) ;;
|
||||
repair) ;;
|
||||
*) exit 0;;
|
||||
esac
|
||||
|
||||
ionice=""
|
||||
renice=""
|
||||
case $NICE in
|
||||
high)
|
||||
renice="-n -5"
|
||||
;;
|
||||
low)
|
||||
renice="-n 5"
|
||||
ionice="-c2 -n7"
|
||||
;;
|
||||
idle)
|
||||
renice="-n 15"
|
||||
ionice="-c3"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
active_list=`grep "^md.*: active" /proc/mdstat | cut -f 1 -d ' '`
|
||||
[ -z "$active_list" ] && exit 0
|
||||
|
||||
declare -A check
|
||||
dev_list=""
|
||||
check_list=""
|
||||
for dev in $active_list; do
|
||||
echo $SKIP_DEVS | grep -w $dev >&/dev/null && continue
|
||||
if [ -f /sys/block/$dev/md/sync_action ]; then
|
||||
# Only perform the checks on idle, healthy arrays, but delay
|
||||
# actually writing the check field until the next loop so we
|
||||
# don't switch currently idle arrays to active, which happens
|
||||
# when two or more arrays are on the same physical disk
|
||||
array_state=`cat /sys/block/$dev/md/array_state`
|
||||
if [ "$array_state" != "clean" -a "$array_state" != "active" ]; then
|
||||
continue
|
||||
fi
|
||||
sync_action=`cat /sys/block/$dev/md/sync_action`
|
||||
if [ "$sync_action" != idle ]; then
|
||||
continue
|
||||
fi
|
||||
ck=""
|
||||
echo $REPAIR_DEVS | grep -w $dev >&/dev/null && ck="repair"
|
||||
echo $CHECK_DEVS | grep -w $dev >&/dev/null && ck="check"
|
||||
[ -z "$ck" ] && ck=$CHECK
|
||||
dev_list="$dev_list $dev"
|
||||
check[$dev]=$ck
|
||||
[ "$ck" = "check" ] && check_list="$check_list $dev"
|
||||
fi
|
||||
done
|
||||
[ -z "$dev_list" ] && exit 0
|
||||
|
||||
for dev in $dev_list; do
|
||||
#Only run $MAXCONCURRENT checks at a time
|
||||
if [ -n "$MAXCONCURRENT" ]; then
|
||||
waitbusy $((MAXCONCURRENT - 1)) "$dev_list"
|
||||
fi
|
||||
echo "${check[$dev]}" > /sys/block/$dev/md/sync_action
|
||||
|
||||
resync_pid=""
|
||||
wait=10
|
||||
while [ $wait -gt 0 -a -z "$resync_pid" ]; do
|
||||
sleep 6
|
||||
let wait--
|
||||
resync_pid=$(ps -ef | awk -v mddev=$dev 'BEGIN { pattern = "^\\[" mddev "_resync]$" } $8 ~ pattern { print $2 }')
|
||||
done
|
||||
[ -n "$resync_pid" -a -n "$renice" ] &&
|
||||
renice $renice -p $resync_pid >&/dev/null
|
||||
[ -n "$resync_pid" -a -n "$ionice" ] &&
|
||||
ionice $ionice -p $resync_pid >&/dev/null
|
||||
done
|
||||
[ -z "$check_list" ] && exit 0
|
||||
|
||||
waitbusy 0 "$check_list"
|
||||
|
||||
for dev in $check_list; do
|
||||
mismatch_cnt=`cat /sys/block/$dev/md/mismatch_cnt`
|
||||
# Due to the fact that raid1/10 writes in the kernel are unbuffered,
|
||||
# a raid1 array can have non-0 mismatch counts even when the
|
||||
# array is healthy. These non-0 counts will only exist in
|
||||
# transient data areas where they don't pose a problem. However,
|
||||
# since we can't tell the difference between a non-0 count that
|
||||
# is just in transient data or a non-0 count that signifies a
|
||||
# real problem, simply don't check the mismatch_cnt on raid1
|
||||
# devices as it's providing far too many false positives. But by
|
||||
# leaving the raid1 device in the check list and performing the
|
||||
# check, we still catch and correct any bad sectors there might
|
||||
# be in the device.
|
||||
raid_lvl=`cat /sys/block/$dev/md/level`
|
||||
if [ "$raid_lvl" = "raid1" -o "$raid_lvl" = "raid10" ]; then
|
||||
continue
|
||||
fi
|
||||
if [ "$mismatch_cnt" -ne 0 ]; then
|
||||
echo "WARNING: mismatch_cnt is not 0 on /dev/$dev"
|
||||
fi
|
||||
done
|
||||
|
6
SOURCES/raid-check.service
Normal file
6
SOURCES/raid-check.service
Normal file
@ -0,0 +1,6 @@
|
||||
[Unit]
|
||||
Description=RAID setup health check
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/sbin/raid-check
|
10
SOURCES/raid-check.timer
Normal file
10
SOURCES/raid-check.timer
Normal file
@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Weekly RAID setup health check
|
||||
|
||||
[Timer]
|
||||
OnCalendar=Sun *-*-* 01:00:00
|
||||
Persistent=true
|
||||
AccuracySec=24h
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
1103
SPECS/mdadm.spec
Normal file
1103
SPECS/mdadm.spec
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user