mdadm/mdadm.rules
Adam Williamson f8c5cc46d4 Fix DM_MULTIPATH_DEVICE_PATH check in mdadm.rules (#1628192)
This value can be explicitly set to 0 (meaning 'not a multipath
device'). The check as currently written will match on this, as
it just matches absolutely any one or more characters at all,
including 0. Fix the check to only look for '1', the value that
indicates the device is multipath. This is the same as how
68-del-part-nodes.rules and 69-dm-lvm-metad.rules do it.

Resolves: bz1628192

Signed-off-by: Adam Williamson <awilliam@redhat.com>
2018-09-13 15:46:35 -07:00

68 lines
3.3 KiB
Plaintext

# This file causes block devices with Linux RAID (mdadm) signatures to
# automatically cause mdadm to be run.
# See udev(8) for syntax
# Don't process any events if anaconda is running as anaconda brings up
# raid devices manually
ENV{ANACONDA}=="?*", GOTO="md_end"
# Also don't process disks that are slated to be a multipath device
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end"
# We process add events on block devices (since they are ready as soon as
# they are added to the system), but we must process change events as well
# on any dm devices (like LUKS partitions or LVM logical volumes) and on
# md devices because both of these first get added, then get brought live
# and trigger a change event. The reason we don't process change events
# on bare hard disks is because if you stop all arrays on a disk, then
# run fdisk on the disk to change the partitions, when fdisk exits it
# triggers a change event, and we want to wait until all the fdisks on
# all member disks are done before we do anything. Unfortunately, we have
# no way of knowing that, so we just have to let those arrays be brought
# up manually after fdisk has been run on all of the disks.
# First, process all add events (md and dm devices will not really do
# anything here, just regular disks, and this also won't get any imsm
# array members either)
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
IMPORT{program}="/sbin/mdadm -I $env{DEVNAME} --export $devnode --offroot ${DEVLINKS}"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
ENV{ID_FS_TYPE}=="linux_raid_member", \
RUN+="/sbin/mdadm -If $name"
# Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline
IMPORT{cmdline}="noiswmd"
IMPORT{cmdline}="nodmraid"
ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -I $env{DEVNAME}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
ENV{ID_FS_TYPE}=="isw_raid_member", \
RUN+="/sbin/mdadm -If $name"
LABEL="md_imsm_inc_end"
# Next make sure that this isn't a dm device we should skip for some reason
ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end"
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end"
ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end"
KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
LABEL="dm_change_end"
# Finally catch any nested md raid arrays. If we brought up an md raid
# array that's part of another md raid array, it won't be ready to be used
# until the change event that occurs when it becomes live
KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
LABEL="md_end"