Update to latest stable upstream release, resolve multiple outstanding

bugzillas against F15.

Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Doug Ledford 2011-03-25 17:05:48 -04:00
parent bd6cba269b
commit 7d8ba69d0d
9 changed files with 110 additions and 58 deletions

5
.gitignore vendored
View File

@ -4,3 +4,8 @@ mdadm-3.1.2.tar.bz2
mdadm-3.1.3-git07202010.tar.bz2
mdadm-3.1.3-git20100722.tar.bz2
mdadm-3.1.3-git20100804.tar.bz2
mdadm-3.1.5.tar.bz2
.build*
clog
*.src.rpm
*/

View File

@ -7,23 +7,12 @@
-MAP_DIR=/dev/.mdadm
-MAP_FILE = map
-MDMON_DIR = /dev/.mdadm
+MAP_DIR=/dev/md
+MAP_DIR = /dev/md
+MAP_FILE = md-device-map
+MDMON_DIR = /dev/md
DIRFLAGS = -DMAP_DIR=\"$(MAP_DIR)\" -DMAP_FILE=\"$(MAP_FILE)\"
DIRFLAGS += -DMDMON_DIR=\"$(MDMON_DIR)\"
CFLAGS = $(CWFLAGS) $(CXFLAGS) -DSendmail=\""$(MAILCMD)"\" $(CONFFILEFLAGS) $(DIRFLAGS)
@@ -165,9 +165,8 @@ mdadm.O2 : $(SRCS) mdadm.h mdmon.O2
mdmon.O2 : $(MON_SRCS) mdadm.h mdmon.h
$(CC) -o mdmon.O2 $(CFLAGS) $(LDFLAGS) -DHAVE_STDINT_H -O2 -D_FORTIFY_SOURCE=2 $(MON_SRCS)
-# use '-z now' to guarantee no dynamic linker interactions with the monitor thread
mdmon : $(MON_OBJS)
- $(CC) $(LDFLAGS) -z now -o mdmon $(MON_OBJS) $(LDLIBS)
+ $(CC) $(LDFLAGS) -o mdmon $(MON_OBJS) $(LDLIBS)
msg.o: msg.c msg.h
test_stripe : restripe.c mdadm.h
@@ -224,16 +223,16 @@ install : mdadm mdmon install-man instal
$(INSTALL) -D $(STRIP) -m 755 mdmon $(DESTDIR)$(BINDIR)/mdmon
@ -45,23 +34,3 @@
install-man: mdadm.8 md.4 mdadm.conf.5 mdmon.8
$(INSTALL) -D -m 644 mdadm.8 $(DESTDIR)$(MAN8DIR)/mdadm.8
--- mdadm-3.1.3-git20100804/mapfile.c.static 2010-07-28 23:50:15.000000000 -0400
+++ mdadm-3.1.3-git20100804/mapfile.c 2010-08-04 11:27:35.391342438 -0400
@@ -60,9 +60,15 @@
dir "/" base ".lock", \
dir }
-#define MAP_DIRS 2
+/*
+ * one ring to bind them all...
+ *
+ * We only use a single map file location. The multiple locations issue
+ * has caused more confusion than it was worth.
+ * Doug Ledford <dledford@redhat.com>
+ */
+#define MAP_DIRS 1
char *mapname[MAP_DIRS][4] = {
- mapnames("/var/run/mdadm", "map"),
mapnames(MAP_DIR, MAP_FILE),
};

3
mdadm-cron Normal file
View File

@ -0,0 +1,3 @@
# Run system wide raid-check once a week on Sunday at 1am by default
0 1 * * Sun root /usr/sbin/raid-check

View File

@ -31,6 +31,9 @@
# REPAIR_DEVS - a space delimited list of devs that the user
# specifically wants to run a repair on.
# SKIP_DEVS - a space delimited list of devs that should be skipped
# NICE - Change the raid check CPU and IO priority in order to make
# the system more responsive during lengthy checks. Valid
# values are high, normal, low, idle.
#
# Note: the raid-check script intentionaly runs last in the cron.weekly
# sequence. This is so we can wait for all the resync operations to complete
@ -47,6 +50,7 @@
ENABLED=yes
CHECK=check
NICE=low
# To check devs /dev/md0 and /dev/md3, use "md0 md3"
CHECK_DEVS=""
REPAIR_DEVS=""

View File

@ -4,12 +4,15 @@
ENV{ANACONDA}=="?*", GOTO="md_imsm_inc_end"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -I $tempnode"
RUN+="/sbin/mdadm -I $env{DEVNAME}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -If $env{DEVNAME}"
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
ENV{rd_NO_MDIMSM}=="?*", GOTO="md_imsm_inc_end"
IMPORT{cmdline}="noiswmd"
IMPORT{cmdline}="nodmraid"
ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
# In case the initramfs only started some of the arrays in our container,
# run incremental assembly on the container itself. Note: we ran mdadm
# on the container in 64-md-raid.rules, and that's how the MD_LEVEL
@ -18,11 +21,11 @@ ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
# IMPORT{program}="/sbin/mdadm -D --export $tempnode", \
SUBSYSTEM=="block", ACTION=="add", KERNEL=="md*", \
ENV{MD_LEVEL}=="container", RUN+="/sbin/mdadm -I $tempnode"
ENV{MD_LEVEL}=="container", RUN+="/sbin/mdadm -I $env{DEVNAME}"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -I $tempnode"
RUN+="/sbin/mdadm -I $env{DEVNAME}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -If $env{DEVNAME}"
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
LABEL="md_imsm_inc_end"

View File

@ -1,12 +1,13 @@
Summary: The mdadm program controls Linux md devices (software RAID arrays)
Name: mdadm
Version: 3.1.3
Release: 0.git20100804.2%{?dist}.1
Source: http://www.kernel.org/pub/linux/utils/raid/mdadm/mdadm-%{version}-git20100804.tar.bz2
Version: 3.1.5
Release: 1%{?dist}
Source: http://www.kernel.org/pub/linux/utils/raid/mdadm/mdadm-%{version}.tar.bz2
Source1: mdmonitor.init
Source2: raid-check
Source3: mdadm.rules
Source4: mdadm-raid-check-sysconfig
Source5: mdadm-cron
Patch19: mdadm-3.1.3-udev.patch
Patch20: mdadm-2.5.2-static.patch
URL: http://www.kernel.org/pub/linux/utils/raid/mdadm/
@ -27,7 +28,7 @@ almost all functions without a configuration file, though a configuration
file can be used to help with some common tasks.
%prep
%setup -q -n mdadm-3.1.3-git20100804
%setup -q
%patch19 -p1 -b .udev
%patch20 -p1 -b .static
@ -39,9 +40,10 @@ rm -rf %{buildroot}
make DESTDIR=%{buildroot} MANDIR=%{_mandir} BINDIR=/sbin install
rm -f %{buildroot}/lib/udev/rules.d/64*
install -Dp -m 755 %{SOURCE1} %{buildroot}%{_initrddir}/mdmonitor
install -Dp -m 755 %{SOURCE2} %{buildroot}%{_sysconfdir}/cron.weekly/99-raid-check
install -Dp -m 755 %{SOURCE2} %{buildroot}%{_sbindir}/raid-check
install -Dp -m 644 %{SOURCE3} %{buildroot}/lib/udev/rules.d/65-md-incremental.rules
install -Dp -m 644 %{SOURCE4} %{buildroot}%{_sysconfdir}/sysconfig/raid-check
install -Dp -m 644 %{SOURCE5} %{buildroot}%{_sysconfdir}/cron.d/raid-check
mkdir -p -m 700 %{buildroot}/var/run/mdadm
%clean
@ -68,13 +70,30 @@ fi
%doc TODO ChangeLog mdadm.conf-example COPYING misc/*
/lib/udev/rules.d/*
/sbin/*
%{_sbindir}/raid-check
%{_initrddir}/*
%{_mandir}/man*/md*
%{_sysconfdir}/cron.weekly/*
%config(noreplace) %{_sysconfdir}/cron.d/*
%config(noreplace) %{_sysconfdir}/sysconfig/*
%attr(0700,root,root) %dir /var/run/mdadm
%ghost %attr(0700,root,root) %dir /var/run/mdadm
%changelog
* Fri Mar 25 2011 Doug Ledford <dledford@redhat.com> - 3.1.5-1
- Update to latest upstream stable release
- Update mdadm.rules file to honor noiswmd and nodmraid command line options
- Ghost the directory in /var/run, create /var/run/mdadm in mdmonitor init
script
- Don't report mismatch counts on either raid1 or raid10
- Check both active and idle arrays during raid check runs
- Move the raid-check script from cron.weekly to /usr/sbin, add a crontab
file to /etc/cron.d and mark it config(noreplace). This way users can
select their own raid-check frequency and have it honored through
upgrades.
- Allow the raid-check script to set the process and io priority of the
thread performing the check in order to preserve responsiveness of the
machine during the check.
- Resolves: 633229, 656620. 679843, 671076, 659933
* Tue Feb 08 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.1.3-0.git20100804.2.1
- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild

View File

@ -20,6 +20,7 @@
# and mdadm man pages for possible ways to configure this service.
### END INIT INFO
PIDPATH=/var/run/mdadm
PIDFILE=/var/run/mdadm/mdadm.pid
PATH=/sbin:/usr/sbin:$PATH
RETVAL=0
@ -53,6 +54,17 @@ start ()
# MAILADDR or PROGRAM or both must be set in order to run mdadm --monitor
[ -f /etc/mdadm.conf ] || return 6
grep '^\(MAILADDR\|PROGRAM\) .' /etc/mdadm.conf >/dev/null 2>&1 || return 6
# Create our directory if it isn't there yet
if [ ! -d $PIDPATH ]; then
mkdir -m 0700 -Z system_u:object_r:mdadm_var_run_t $PIDPATH >&/dev/null
RC=$?
if [ $RC -ne 0 ]; then
echo -n "Failed to create /var/run/mdadm"
failure
echo
return 1
fi
fi
if [ -f "$PIDFILE" ]; then
checkpid `cat $PIDFILE` && return 0
fi

View File

@ -19,6 +19,24 @@ case "$CHECK" in
*) exit 0;;
esac
ionice=""
renice=""
case $NICE in
high)
renice="-n -5"
;;
low)
renice="-n 5"
ionice="-c2 -n7"
;;
idle)
renice="-n 15"
ionice="-c3"
;;
*)
;;
esac
active_list=`grep "^md.*: active" /proc/mdstat | cut -f 1 -d ' '`
[ -z "$active_list" ] && exit 0
@ -26,29 +44,45 @@ declare -A check
dev_list=""
check_list=""
for dev in $active_list; do
echo $SKIP_DEVS | grep -w $dev >/dev/null 2>&1 && continue
echo $SKIP_DEVS | grep -w $dev >&/dev/null && continue
if [ -f /sys/block/$dev/md/sync_action ]; then
# Only perform the checks on idle, healthy arrays, but delay
# actually writing the check field until the next loop so we
# don't switch currently idle arrays to active, which happens
# when two or more arrays are on the same physical disk
array_state=`cat /sys/block/$dev/md/array_state`
if [ "$array_state" != "clean" -a "$array_state" != "active" ]; then
continue
fi
sync_action=`cat /sys/block/$dev/md/sync_action`
if [ "$array_state" = clean -a "$sync_action" = idle ]; then
if [ "$sync_action" != idle ]; then
continue
fi
ck=""
echo $REPAIR_DEVS | grep -w $dev >/dev/null 2>&1 && ck="repair"
echo $CHECK_DEVS | grep -w $dev >/dev/null 2>&1 && ck="check"
echo $REPAIR_DEVS | grep -w $dev >&/dev/null && ck="repair"
echo $CHECK_DEVS | grep -w $dev >&/dev/null && ck="check"
[ -z "$ck" ] && ck=$CHECK
dev_list="$dev_list $dev"
check[$dev]=$ck
[ "$ck" = "check" ] && check_list="$check_list $dev"
fi
fi
done
[ -z "$dev_list" ] && exit 0
for dev in $dev_list; do
echo "${check[$dev]}" > /sys/block/$dev/md/sync_action
resync_pid=""
wait=10
while [ $wait -gt 0 -a -z "$resync_pid" ]; do
sleep 6
let wait--
resync_pid=$(ps -ef | awk -v mddev=$dev 'BEGIN { pattern = "^\\[" mddev "_resync]$" } $8 ~ pattern { print $2 }')
done
[ -n "$resync_pid" -a -n "$renice" ] &&
renice $renice -p $resync_pid >&/dev/null
[ -n "$resync_pid" -a -n "$ionice" ] &&
ionice $ionice -p $resync_pid >&/dev/null
done
[ -z "$check_list" ] && exit 0
@ -66,7 +100,7 @@ do
done
for dev in $check_list; do
mismatch_cnt=`cat /sys/block/$dev/md/mismatch_cnt`
# Due to the fact that raid1 writes in the kernel are unbuffered,
# Due to the fact that raid1/10 writes in the kernel are unbuffered,
# a raid1 array can have non-0 mismatch counts even when the
# array is healthy. These non-0 counts will only exist in
# transient data areas where they don't pose a problem. However,
@ -78,7 +112,10 @@ for dev in $check_list; do
# check, we still catch and correct any bad sectors there might
# be in the device.
raid_lvl=`cat /sys/block/$dev/md/level`
if [ "$mismatch_cnt" -ne 0 -a "$raid_lvl" != "raid1" ]; then
if [ "$raid_lvl" = "raid1" -o "$raid_lvl" = "raid10" ]; then
continue
fi
if [ "$mismatch_cnt" -ne 0 ]; then
echo "WARNING: mismatch_cnt is not 0 on /dev/$dev"
fi
done

View File

@ -1 +1 @@
bfc11a6f0693e3e086bbd851215c824e mdadm-3.1.3-git20100804.tar.bz2
a7575707a5f2d1ed6d408446eabcb19b mdadm-3.1.5.tar.bz2