2009-03-18 18:55:49 +00:00
|
|
|
# This file causes block devices with Linux RAID (mdadm) signatures to
|
|
|
|
# automatically cause mdadm to be run.
|
|
|
|
# See udev(8) for syntax
|
|
|
|
|
2011-08-25 19:24:06 +00:00
|
|
|
# Don't process any events if anaconda is running as anaconda brings up
|
|
|
|
# raid devices manually
|
2011-08-25 19:30:10 +00:00
|
|
|
ENV{ANACONDA}=="?*", GOTO="md_end"
|
2011-08-25 19:24:06 +00:00
|
|
|
|
|
|
|
# Also don't process disks that are slated to be a multipath device
|
2011-08-25 19:30:10 +00:00
|
|
|
ENV{DM_MULTIPATH_DEVICE_PATH}=="?*", GOTO="md_end"
|
2011-08-25 19:24:06 +00:00
|
|
|
|
|
|
|
# We process add events on block devices (since they are ready as soon as
|
|
|
|
# they are added to the system), but we must process change events as well
|
|
|
|
# on any dm devices (like LUKS partitions or LVM logical volumes) and on
|
|
|
|
# md devices because both of these first get added, then get brought live
|
|
|
|
# and trigger a change event. The reason we don't process change events
|
|
|
|
# on bare hard disks is because if you stop all arrays on a disk, then
|
|
|
|
# run fdisk on the disk to change the partitions, when fdisk exits it
|
|
|
|
# triggers a change event, and we want to wait until all the fdisks on
|
|
|
|
# all member disks are done before we do anything. Unfortunately, we have
|
|
|
|
# no way of knowing that, so we just have to let those arrays be brought
|
|
|
|
# up manually after fdisk has been run on all of the disks.
|
|
|
|
|
|
|
|
# First, process all add events (md and dm devices will not really do
|
|
|
|
# anything here, just regular disks, and this also won't get any imsm
|
|
|
|
# array members either)
|
2014-06-12 07:04:38 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
2016-05-31 01:43:21 +00:00
|
|
|
IMPORT{program}="/sbin/mdadm -I $env{DEVNAME} --export $devnode --offroot ${DEVLINKS}"
|
|
|
|
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
|
|
|
ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer"
|
2012-12-11 15:48:28 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
|
|
|
|
ENV{ID_FS_TYPE}=="linux_raid_member", \
|
2011-03-25 21:05:48 +00:00
|
|
|
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
|
2012-12-11 15:48:28 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
|
|
|
|
ENV{ID_FS_TYPE}=="linux_raid_member", \
|
|
|
|
RUN+="/sbin/mdadm -If $name"
|
2009-03-18 18:55:49 +00:00
|
|
|
|
2011-08-25 19:24:06 +00:00
|
|
|
# Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline
|
2011-03-25 21:05:48 +00:00
|
|
|
IMPORT{cmdline}="noiswmd"
|
|
|
|
IMPORT{cmdline}="nodmraid"
|
2010-08-04 15:23:45 +00:00
|
|
|
ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
|
2011-03-25 21:05:48 +00:00
|
|
|
ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
|
2014-06-12 07:04:38 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
|
2011-08-25 19:24:06 +00:00
|
|
|
RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
2012-12-11 15:48:28 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}=="?*", \
|
|
|
|
ENV{ID_FS_TYPE}=="isw_raid_member", \
|
2011-08-25 19:24:06 +00:00
|
|
|
RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
|
2012-12-11 15:48:28 +00:00
|
|
|
SUBSYSTEM=="block", ACTION=="remove", ENV{ID_PATH}!="?*", \
|
|
|
|
ENV{ID_FS_TYPE}=="isw_raid_member", \
|
|
|
|
RUN+="/sbin/mdadm -If $name"
|
2011-08-25 19:24:06 +00:00
|
|
|
LABEL="md_imsm_inc_end"
|
|
|
|
|
|
|
|
# Next make sure that this isn't a dm device we should skip for some reason
|
|
|
|
ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end"
|
2013-10-09 14:28:38 +00:00
|
|
|
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="dm_change_end"
|
2011-08-25 19:24:06 +00:00
|
|
|
ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end"
|
|
|
|
KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
|
|
|
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
|
|
|
LABEL="dm_change_end"
|
|
|
|
|
|
|
|
# Finally catch any nested md raid arrays. If we brought up an md raid
|
|
|
|
# array that's part of another md raid array, it won't be ready to be used
|
|
|
|
# until the change event that occurs when it becomes live
|
|
|
|
KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
|
|
|
|
ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
|
2011-03-25 21:05:48 +00:00
|
|
|
|
2011-08-25 19:30:10 +00:00
|
|
|
LABEL="md_end"
|