import python-blivet-3.2.2-9.el8

This commit is contained in:
CentOS Sources 2021-03-30 14:11:24 -04:00 committed by Stepan Oksanichenko
parent ade2f5b6d3
commit b991a48d68
15 changed files with 4945 additions and 1 deletions

View File

@ -0,0 +1,438 @@
From 44d7e9669fe55fd4b2b3a6c96f23e2d0669f8dbb Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 13:42:31 +0200
Subject: [PATCH] Blivet RHEL 8.3 localization update
Resolves: rhbz#1820565
---
po/ja.po | 33 ++++++++++-----------
po/ko.po | 83 ++++++++++++++++++++++++-----------------------------
po/zh_CN.po | 28 +++++++++---------
3 files changed, 68 insertions(+), 76 deletions(-)
diff --git a/po/ja.po b/po/ja.po
index 733e63a0..b4c864c2 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -29,17 +29,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Japanese (http://www.transifex.com/projects/p/blivet/language/"
-"ja/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Japanese <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ja/>\n"
"Language: ja\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -47,6 +47,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"これは通常、デバイスイメージを複製したことで、一意であるはずのUUID値が重複することが原因です。その場合は、いずれかのデバイスを切断するか、再フォーマッ"
+"トしてください。"
#: ../blivet/errors.py:217
msgid ""
@@ -54,9 +56,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"なんらかの理由により、kernel がパーティションを報告しているディスク上でディス"
-"クラベルを見つけられませんでした。何が問題となっているかは不明です。バグを "
-"http://bugzilla.redhat.com に提出してください。"
+"なんらかの理由により、kernel がパーティションを報告しているディスク上でディスクラベルを見つけられませんでした。何が問題となっているかは不明です。"
+"バグを http://bugzilla.redhat.com に提出してください"
#: ../blivet/errors.py:224
msgid ""
@@ -84,7 +85,7 @@ msgstr "FCoE は使用できません"
#: ../blivet/zfcp.py:62
msgid "You have not specified a device number or the number is invalid"
-msgstr "デバイス番号を指定していないか番号が無効です。"
+msgstr "デバイス番号を指定していないか番号が無効です"
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
@@ -202,7 +203,7 @@ msgstr "iSCSI ノードが何も探索できませんでした"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "新しい iSCSI ノードは見つかりませんでした。"
+msgstr "新しい iSCSI ノードは見つかりませんでした"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
@@ -257,7 +258,7 @@ msgstr "要求を超えたサイズを再利用することができません"
#: ../blivet/partitioning.py:1419
msgid "DiskChunk requests must be of type PartitionRequest"
-msgstr "DiskChunk 要求には PartitionResquest タイプが必要です。"
+msgstr "DiskChunk 要求には PartitionResquest タイプが必要です"
#: ../blivet/partitioning.py:1432
msgid "partitions allocated outside disklabel limits"
@@ -265,7 +266,7 @@ msgstr "ディスクラベルの範囲外に割り当てられたパーティシ
#: ../blivet/partitioning.py:1517
msgid "VGChunk requests must be of type LVRequest"
-msgstr "VGChunk 要求には LVResquest タイプが必要です。"
+msgstr "VGChunk 要求には LVResquest タイプが必要です"
#. by now we have allocated the PVs so if there isn't enough
#. space in the VG we have a real problem
@@ -368,15 +369,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "既存の %s 配列からメンバーを削除できません"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI システムパーティション"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple ブートストラップ"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
diff --git a/po/ko.po b/po/ko.po
index 66789af0..747b00c5 100644
--- a/po/ko.po
+++ b/po/ko.po
@@ -20,17 +20,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Korean (http://www.transifex.com/projects/p/blivet/language/"
-"ko/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Korean <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ko/>\n"
"Language: ko\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -38,6 +38,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"이는 일반적으로 장치 이미지 복제로 인해 고유한 UUID 값이 복제되기 때문에 발생합니다. 이 경우 장치 중 하나를 분리하거나 다시 "
+"포맷할 수 있습니다."
#: ../blivet/errors.py:217
msgid ""
@@ -45,9 +47,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 "
-"없습니다. 정확한 문제가 무엇인지 알 수 없습니다. http://bugzilla.redhat.com"
-"에 버그 리포트를 제출해 주십시오."
+"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 없습니다. 정확한 문제가 무엇인지 알 수 없습니다. "
+"http://bugzilla.redhat.com에 버그 리포트를 제출해 주십시오"
#: ../blivet/errors.py:224
msgid ""
@@ -78,11 +79,11 @@ msgstr "장치 번호를 지정하지 않았거나, 번호가 맞지 않습니
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
-msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다"
+msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:66
msgid "You have not specified a FCP LUN or the number is invalid."
-msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다"
+msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:91
#, python-format
@@ -103,7 +104,7 @@ msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s (%(e)s)에 추가할 수
#: ../blivet/zfcp.py:119
#, python-format
msgid "WWPN %(wwpn)s not found at zFCP device %(devnum)s."
-msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다. "
+msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다."
#: ../blivet/zfcp.py:134
#, python-format
@@ -111,8 +112,7 @@ msgid ""
"Could not add LUN %(fcplun)s to WWPN %(wwpn)s on zFCP device %(devnum)s "
"(%(e)s)."
msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 "
-"수 없습니다. "
+"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 수 없습니다."
#: ../blivet/zfcp.py:140
#, python-format
@@ -136,18 +136,14 @@ msgstr ""
msgid ""
"Failed LUN %(fcplun)s at WWPN %(wwpn)s on zFCP device %(devnum)s removed "
"again."
-msgstr ""
-"zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭"
-"제되었습니다. "
+msgstr "zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭제되었습니다."
#: ../blivet/zfcp.py:218
#, python-format
msgid ""
"Could not correctly delete SCSI device of zFCP %(devnum)s %(wwpn)s "
"%(fcplun)s (%(e)s)."
-msgstr ""
-"zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 "
-"없습니다. "
+msgstr "zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 없습니다."
#: ../blivet/zfcp.py:227
#, python-format
@@ -161,41 +157,40 @@ msgstr ""
#: ../blivet/zfcp.py:245
#, python-format
msgid "Could not remove WWPN %(wwpn)s on zFCP device %(devnum)s (%(e)s)."
-msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다."
#: ../blivet/zfcp.py:271
#, python-format
msgid "Could not set zFCP device %(devnum)s offline (%(e)s)."
-msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다."
#: ../blivet/iscsi.py:217
msgid "Unable to change iSCSI initiator name once set"
-msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음 "
+msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "iSCSI 개시자 이름을 지정하십시오 "
+msgstr "iSCSI 개시자 이름을 지정하십시오"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
-msgstr "iSCSI 사용 불가능 "
+msgstr "iSCSI 사용 불가능"
#: ../blivet/iscsi.py:412
msgid "No initiator name set"
-msgstr "이니셰이터 이름이 설정되지 않음 "
+msgstr "이니셰이터 이름이 설정되지 않음"
#: ../blivet/iscsi.py:530
msgid "No iSCSI nodes discovered"
-msgstr "iSCSI 노드를 찾을 수 없음 "
+msgstr "iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "새 iSCSI 노드를 찾을 수 없음 "
+msgstr "새 iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
-msgstr "검색된 노드로 로그인할 수 없음 "
+msgstr "검색된 노드로 로그인할 수 없음"
#: ../blivet/partitioning.py:454
msgid "unable to allocate aligned partition"
@@ -265,7 +260,7 @@ msgstr "LVM 요청에 필요한 공간이 충분하지 않습니다"
#: ../blivet/deviceaction.py:194
#, python-format
msgid "Executing %(action)s"
-msgstr "%(action)s 실행 "
+msgstr "%(action)s 실행"
#: ../blivet/deviceaction.py:322
msgid "create device"
@@ -286,7 +281,7 @@ msgstr "포맷 생성"
#: ../blivet/deviceaction.py:613
#, python-format
msgid "Creating %(type)s on %(device)s"
-msgstr "%(device)s에 %(type)s 생성 "
+msgstr "%(device)s에 %(type)s 생성"
#: ../blivet/deviceaction.py:640
#, python-format
@@ -327,11 +322,11 @@ msgstr "컨테이너 멤버 삭제"
#: ../blivet/deviceaction.py:1058
msgid "configure format"
-msgstr "포맷 설정 "
+msgstr "포맷 설정"
#: ../blivet/deviceaction.py:1114
msgid "configure device"
-msgstr "장치 설정 "
+msgstr "장치 설정"
#: ../blivet/devices/raid.py:58
#, python-format
@@ -341,32 +336,28 @@ msgid ""
msgid_plural ""
"RAID level %(raid_level)s requires that device have at least %(min_members)d "
"members."
-msgstr[0] ""
-"RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니"
-"다. "
+msgstr[0] "RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니다."
#: ../blivet/devices/raid.py:79
#, python-format
msgid ""
"RAID level %(raid_level)s is an invalid value. Must be one of (%(levels)s)."
-msgstr ""
-"RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 "
-"합니다. "
+msgstr "RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 합니다."
#: ../blivet/devices/raid.py:104
#, python-format
msgid "Cannot remove a member from existing %s array"
-msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다 "
+msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
-msgstr "EFI 시스템 파티션 "
+msgstr "EFI 시스템 파티션"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple 부트스트랩"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
@@ -384,7 +375,7 @@ msgstr "암호화됨"
#: ../blivet/formats/luks.py:388
msgid "DM Integrity"
-msgstr "DM 무결성 "
+msgstr "DM 무결성"
#: ../blivet/formats/__init__.py:148
msgid "Unknown"
diff --git a/po/zh_CN.po b/po/zh_CN.po
index 480801de..2be6d492 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -20,24 +20,24 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-13 02:13+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/blivet/"
-"language/zh_CN/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Chinese (Simplified) <https://translate.fedoraproject.org/"
+"projects/blivet/blivet-rhel8/zh_CN/>\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
"This is usually caused by cloning the device image resulting in duplication "
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
-msgstr ""
+msgstr "这通常是由于克隆设备镜像导致 UUID 值重复造成的,而 UUID 值应该是唯一的。如果是这种情况,可以断开其中一个设备或重新格式化它。"
#: ../blivet/errors.py:217
msgid ""
@@ -45,8 +45,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具"
-"体问题所在。请在 http://bugzilla.redhat.com 提交 bug。"
+"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具体问题所在。请在 http://bugzilla.redhat.com 提交 "
+"bug"
#: ../blivet/errors.py:224
msgid ""
@@ -170,7 +170,7 @@ msgstr "设定后就无法更改 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "您必须提供一个 iSCSI 启动程序名称。"
+msgstr "您必须提供一个 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
@@ -223,7 +223,7 @@ msgstr ""
#: ../blivet/partitioning.py:962
msgid "Unable to allocate requested partition scheme."
-msgstr "无法分配所需分区方案"
+msgstr "无法分配所需分区方案。"
#: ../blivet/partitioning.py:997
msgid "not enough free space after creating extended partition"
@@ -347,15 +347,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "无法从存在的 %s 阵列中删除一个成员"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI 系统分区"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple Bootstrap"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
--
2.25.4

View File

@ -0,0 +1,24 @@
From 7bc4e324580656585adad0cbe51d60ed3540b766 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 3 Jul 2020 13:04:23 +0200
Subject: [PATCH] Do not use FSAVAIL and FSUSE% options when running lsblk
These options were added in util-linux 2.33 which is not available
on older systems so we should not use these.
---
blivet/blivet.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index fcc2080b..e7dbd37b 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -77,7 +77,7 @@ def __init__(self):
self._dump_file = "%s/storage.state" % tempfile.gettempdir()
try:
- options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,FSAVAIL,FSUSE%,MOUNTPOINT"
+ options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,MOUNTPOINT"
out = capture_output(["lsblk", "--bytes", "-a", "-o", options])
except Exception: # pylint: disable=broad-except
pass

View File

@ -0,0 +1,39 @@
From 462099a9137fb7997140360c07665a21615a0fea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dan=20Hor=C3=A1k?= <dan@danny.cz>
Date: Tue, 7 Jul 2020 13:19:02 +0200
Subject: [PATCH] set allowed disk labels for s390x as standard ones (msdos +
gpt) plus dasd
This will solve issues when a SCSI or NVMe disk with GPT partition table
is used with a s390x machine (rhbz#1827066, rhbz#1854110).
---
blivet/formats/disklabel.py | 2 +-
tests/formats_test/disklabel_test.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index 3dcac12b..53e2c010 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -230,7 +230,7 @@ def get_platform_label_types(cls):
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_s390():
- label_types = ["msdos", "dasd"]
+ label_types += ["dasd"]
return label_types
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 94f3775f..3068dc07 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -95,7 +95,7 @@ def test_platform_label_types(self, arch):
arch.is_arm.return_value = False
arch.is_s390.return_value = True
- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"])
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"])
arch.is_s390.return_value = False
def test_label_type_size_check(self):

View File

@ -0,0 +1,47 @@
From 7303f4a3f2fe3280339f6303dcff31b6ade12176 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 16:30:55 +0200
Subject: [PATCH] Do not use BlockDev.utils_have_kernel_module to check for
modules
The function unfortunately uses only the name when searching for
the module and we need to use aliases for modules like ext2 and
ext3. So we need to use "modprobe --dry-run" instead.
---
blivet/formats/fs.py | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index eee15aaa..bcfbc08e 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -60,12 +60,6 @@
import logging
log = logging.getLogger("blivet")
-import gi
-gi.require_version("GLib", "2.0")
-gi.require_version("BlockDev", "2.0")
-
-from gi.repository import GLib
-from gi.repository import BlockDev
AVAILABLE_FILESYSTEMS = kernel_filesystems
@@ -462,13 +456,13 @@ def check_module(self):
for module in self._modules:
try:
- succ = BlockDev.utils_have_kernel_module(module)
- except GLib.GError as e:
+ rc = util.run_program(["modprobe", "--dry-run", module])
+ except OSError as e:
log.error("Could not check kernel module availability %s: %s", module, e)
self._supported = False
return
- if not succ:
+ if rc:
log.debug("Kernel module %s not available", module)
self._supported = False
return

View File

@ -0,0 +1,844 @@
From 18ce766bc90abdf0d8ca54bdf578463392a52ee9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 10:57:19 +0200
Subject: [PATCH 1/2] Fix name resolution for MD devices and partitions on them
UDev data for both member disks/partitions and partitions on arrays
contain the MD_* properties we must be extra careful when deciding
what name we'll use for the device.
Resolves: rhbz#1862904
---
blivet/udev.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 41c99496..c85eb3dc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -202,9 +202,16 @@ def device_get_name(udev_info):
""" Return the best name for a device based on the udev db data. """
if "DM_NAME" in udev_info:
name = udev_info["DM_NAME"]
- elif "MD_DEVNAME" in udev_info and os.path.exists(device_get_sysfs_path(udev_info) + "/md"):
+ elif "MD_DEVNAME" in udev_info:
mdname = udev_info["MD_DEVNAME"]
- if device_is_partition(udev_info):
+ if device_is_md(udev_info):
+ # MD RAID array -> use MD_DEVNAME
+ name = mdname
+ elif device_get_format(udev_info) == "linux_raid_member":
+ # MD RAID member -> use SYS_NAME
+ name = udev_info["SYS_NAME"]
+ elif device_is_partition(udev_info):
+ # partition on RAID -> construct name from MD_DEVNAME + partition number
# for partitions on named RAID we want to use the raid name, not
# the node, e.g. "raid1" instead of "md127p1"
partnum = udev_info["ID_PART_ENTRY_NUMBER"]
@@ -213,6 +220,7 @@ def device_get_name(udev_info):
else:
name = mdname + partnum
else:
+ # something else -> default to MD_DEVNAME
name = mdname
else:
name = udev_info["SYS_NAME"]
--
2.25.4
From dc96961adcb9dd6ef6d09e4daaa0a5eaae1ffe60 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 11:10:03 +0200
Subject: [PATCH 2/2] Add tests for udev.device_get_name for RAID devices
This includes sample UDev data for various combinations of RAID
devices configuration.
Related: rhbz#1862904
---
tests/udev_data/__init__.py | 0
tests/udev_data/raid_data.py | 705 +++++++++++++++++++++++++++++++++++
tests/udev_test.py | 46 +++
3 files changed, 751 insertions(+)
create mode 100644 tests/udev_data/__init__.py
create mode 100644 tests/udev_data/raid_data.py
diff --git a/tests/udev_data/__init__.py b/tests/udev_data/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/udev_data/raid_data.py b/tests/udev_data/raid_data.py
new file mode 100644
index 00000000..509cbfbd
--- /dev/null
+++ b/tests/udev_data/raid_data.py
@@ -0,0 +1,705 @@
+# Sample UDev data for various MD RAID devices:
+# - member_boot: data for the member disk or partition after booting the system
+# - member_assemble: data for the member disk or partition after re-assembling stopped array using
+# 'mdadm --assemble --scan' (yes, this is different from member_boot)
+# - raid_device: data for the RAID array device
+# - raid_partition: data for partition on the array
+#
+# We have data for different combinations of member "types", MD metadata versions and named v unnamed
+# RAID devices.
+# The data were gathered on Fedora 32.
+
+
+class RaidOnDisk1():
+ member_name = "sda"
+ raid_name = "127"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': '127',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md127.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:127 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b /dev/md/127',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '4eec0361',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603606045',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/127p1 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b-part1 /dev/disk/by-id/md-name-localhost.localdomain:127-part1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '4eec0361-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '2',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603714783',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk2():
+ member_name = "sdc"
+ raid_name = "name"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': 'name',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:name /dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f /dev/md/name',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '19e9cb5b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5844744',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f-part1 /dev/disk/by-id/md-name-localhost.localdomain:name-part1 /dev/md/name1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '19e9cb5b-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'ec985633',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '1',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5928255',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk3():
+ member_name = "sde"
+ raid_name = "125"
+ raid_node = "md125"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md125',
+ 'MD_DEVNAME': '125',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md125.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ raid_device = {'DEVLINKS': '/dev/md/125 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md125',
+ 'DEVPATH': '/devices/virtual/block/md125',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'e74877cd',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '125',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5786380',
+ 'SYS_NAME': 'md125',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/125p1 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04-part1',
+ 'DEVNAME': '/dev/md125p1',
+ 'DEVPATH': '/devices/virtual/block/md125/md125p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:125',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2094976',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': 'e74877cd-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '3',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8808457',
+ 'SYS_NAME': 'md125p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125/md125p1'}
+
+
+class RaidOnPartition1():
+ member_name = "sdh3"
+ raid_name = "122"
+ raid_node = "md122"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md122',
+ 'MD_DEVNAME': '122',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212 /dev/disk/by-id/md-name-localhost.localdomain:122 /dev/md/122',
+ 'DEVNAME': '/dev/md122',
+ 'DEVPATH': '/devices/virtual/block/md122',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '6dc80b3b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '122',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8770105',
+ 'SYS_NAME': 'md122',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212-part1 /dev/disk/by-id/md-name-localhost.localdomain:122-part1 /dev/md/122p1',
+ 'DEVNAME': '/dev/md122p1',
+ 'DEVPATH': '/devices/virtual/block/md122/md122p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:122',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '200704',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '6dc80b3b-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '6',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '9003885',
+ 'SYS_NAME': 'md122p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122/md122p1'}
+
+
+class RaidOnPartition2():
+ member_name = "sdh1"
+ raid_name = "123"
+ raid_node = "md123"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md123',
+ 'MD_DEVNAME': '123',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md123.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'UDISKS_MD_MEMBER_DEVICES': '2',
+ 'UDISKS_MD_MEMBER_EVENTS': '18',
+ 'UDISKS_MD_MEMBER_LEVEL': 'raid1',
+ 'UDISKS_MD_MEMBER_UPDATE_TIME': '1597143914',
+ 'UDISKS_MD_MEMBER_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ raid_device = {'DEVLINKS': '/dev/md/123 /dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md123',
+ 'DEVPATH': '/devices/virtual/block/md123',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '653f84c8',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '123',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8760382',
+ 'SYS_NAME': 'md123',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04-part1 /dev/md/123p1',
+ 'DEVNAME': '/dev/md123p1',
+ 'DEVPATH': '/devices/virtual/block/md123/md123p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:123',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '202624',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '653f84c8-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '5',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8952876',
+ 'SYS_NAME': 'md123p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123/md123p1'}
diff --git a/tests/udev_test.py b/tests/udev_test.py
index 653eeb6d..d30a647b 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -2,6 +2,8 @@
import unittest
import mock
+from udev_data import raid_data
+
class UdevTest(unittest.TestCase):
@@ -77,3 +79,47 @@ class UdevTest(unittest.TestCase):
# Normal MD RAID (w/ at least one non-disk member)
device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
+
+
+class UdevGetNameRaidTest(unittest.TestCase):
+
+ def _test_raid_name(self, udev_data):
+ import blivet.udev
+
+ # members don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ member_name = blivet.udev.device_get_name(udev_data.member_boot)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ member_name = blivet.udev.device_get_name(udev_data.member_assemble)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ with mock.patch("blivet.udev.device_is_md", return_value=True):
+ raid_name = blivet.udev.device_get_name(udev_data.raid_device)
+ self.assertEqual(raid_name, udev_data.raid_name)
+
+ # partitions also don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ part_name = blivet.udev.device_get_name(udev_data.raid_partition)
+ expected_name = udev_data.raid_name + "p1" if udev_data.raid_name[-1].isdigit() else udev_data.raid_name + "1"
+ self.assertEqual(part_name, expected_name)
+
+ def test_raid_name_on_disk_no_name(self):
+ data = raid_data.RaidOnDisk1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk__with_name(self):
+ data = raid_data.RaidOnDisk2()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk_old_metadata(self):
+ data = raid_data.RaidOnDisk3()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_no_name(self):
+ data = raid_data.RaidOnPartition1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_old_metadata(self):
+ data = raid_data.RaidOnPartition2()
+ self._test_raid_name(data)
--
2.25.4

View File

@ -0,0 +1,269 @@
From f19140993e94be9e58c8a01c18f1907792f59927 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 5 Aug 2020 13:44:38 +0200
Subject: [PATCH] Fix ignoring disk devices with parents or children
For disk-like devices like multipath we should allow to ignore
these by simply ignoring the mpath device or by ignoring all of its
drives.
- when ignoring the "mpatha" device we should also ignore "sda" and
"sdb"
- when ignoring both "sda" and "sdb" we should also ignore "mpatha"
- when ignoring only "sda" we should not ignore "mpatha" (we don't
want to deal with an "incomplete" multipath device in the tree)
This is consistent with the existing behaviour when using exclusive
disks (or "ignoredisks --only-use" in kickstart).
Resolves: rhbz#1866243
---
blivet/devicetree.py | 51 ++++++++-----
tests/devicetree_test.py | 157 ++++++++++++++++++++++++++++-----------
2 files changed, 146 insertions(+), 62 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 5cc360e1..2afb0d0e 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -907,31 +907,48 @@ class DeviceTreeBase(object):
hidden.add_hook(new=False)
lvm.lvm_cc_removeFilterRejectRegexp(hidden.name)
+ def _disk_in_taglist(self, disk, taglist):
+ # Taglist is a list containing mix of disk names and tags into which disk may belong.
+ # Check if it does. Raise ValueError if unknown tag is encountered.
+ if disk.name in taglist:
+ return True
+ tags = [t[1:] for t in taglist if t.startswith("@")]
+ for tag in tags:
+ if tag not in Tags.__members__:
+ raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
+ if Tags(tag) in disk.tags:
+ return True
+ return False
+
def _is_ignored_disk(self, disk):
""" Checks config for lists of exclusive and ignored disks
and returns if the given one should be ignored
"""
-
- def disk_in_taglist(disk, taglist):
- # Taglist is a list containing mix of disk names and tags into which disk may belong.
- # Check if it does. Raise ValueError if unknown tag is encountered.
- if disk.name in taglist:
- return True
- tags = [t[1:] for t in taglist if t.startswith("@")]
- for tag in tags:
- if tag not in Tags.__members__:
- raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
- if Tags(tag) in disk.tags:
- return True
- return False
-
- return ((self.ignored_disks and disk_in_taglist(disk, self.ignored_disks)) or
- (self.exclusive_disks and not disk_in_taglist(disk, self.exclusive_disks)))
+ return ((self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)) or
+ (self.exclusive_disks and not self._disk_in_taglist(disk, self.exclusive_disks)))
def _hide_ignored_disks(self):
# hide any subtrees that begin with an ignored disk
for disk in [d for d in self._devices if d.is_disk]:
- if self._is_ignored_disk(disk):
+ is_ignored = self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)
+ is_exclusive = self.exclusive_disks and self._disk_in_taglist(disk, self.exclusive_disks)
+
+ if is_ignored:
+ if len(disk.children) == 1:
+ if not all(self._is_ignored_disk(d) for d in disk.children[0].parents):
+ raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.")
+
+ # and also children like fwraid or mpath
+ self.hide(disk.children[0])
+
+ # this disk is ignored: ignore it and all it's potential parents
+ for p in disk.parents:
+ self.hide(p)
+
+ # and finally hide the disk itself
+ self.hide(disk)
+
+ if self.exclusive_disks and not is_exclusive:
ignored = True
# If the filter allows all members of a fwraid or mpath, the
# fwraid or mpath itself is implicitly allowed as well. I don't
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index a8f369cf..6032e7f6 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -370,51 +370,6 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertTrue(sdb in tree.devices)
self.assertTrue(sdc in tree.devices)
- # now test exclusive_disks special cases for multipath
- sda.format = get_format("multipath_member", exists=True)
- sdb.format = get_format("multipath_member", exists=True)
- sdc.format = get_format("multipath_member", exists=True)
- mpatha = MultipathDevice("mpatha", parents=[sda, sdb, sdc])
- tree._add_device(mpatha)
-
- tree.ignored_disks = []
- tree.exclusive_disks = ["mpatha"]
-
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- # all members in exclusive_disks implies the mpath in exclusive_disks
- tree.exclusive_disks = ["sda", "sdb", "sdc"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- tree.exclusive_disks = ["sda", "sdb"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- hide.assert_any_call(mpatha)
- hide.assert_any_call(sdc)
-
- # verify that hide works as expected
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertFalse(sdc in tree.devices)
- self.assertFalse(mpatha in tree.devices)
-
def test_get_related_disks(self):
tree = DeviceTree()
@@ -447,3 +402,115 @@ class DeviceTreeTestCase(unittest.TestCase):
tree.unhide(sda)
self.assertEqual(tree.get_related_disks(sda), set([sda, sdb]))
self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb]))
+
+
+class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.tree = DeviceTree()
+
+ self.sda = DiskDevice("sda")
+ self.sdb = DiskDevice("sdb")
+ self.sdc = DiskDevice("sdc")
+
+ self.tree._add_device(self.sda)
+ self.tree._add_device(self.sdb)
+ self.tree._add_device(self.sdc)
+
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+
+ # now test exclusive_disks special cases for multipath
+ self.sda.format = get_format("multipath_member", exists=True)
+ self.sdb.format = get_format("multipath_member", exists=True)
+ self.sdc.format = get_format("multipath_member", exists=True)
+ self.mpatha = MultipathDevice("mpatha", parents=[self.sda, self.sdb, self.sdc])
+ self.tree._add_device(self.mpatha)
+
+ def test_exclusive_disks_multipath_1(self):
+ # multipath is exclusive -> all disks should be exclusive
+ self.tree.ignored_disks = []
+ self.tree.exclusive_disks = ["mpatha"]
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_2(self):
+ # all disks exclusive -> mpath should also be exclusive
+ self.tree.exclusive_disks = ["sda", "sdb", "sdc"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_3(self):
+ # some disks exclusive -> mpath should be hidden
+ self.tree.exclusive_disks = ["sda", "sdb"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sdc)
+
+ # verify that hide works as expected
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_1(self):
+ # mpatha ignored -> disks should be hidden
+ self.tree.ignored_disks = ["mpatha"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_2(self):
+ # all disks ignored -> mpath should be hidden
+ self.tree.ignored_disks = ["sda", "sdb", "sdc"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_3(self):
+ # some disks ignored -> error
+ self.tree.ignored_disks = ["sda", "sdb"]
+ self.tree.exclusive_disks = []
+
+ with self.assertRaises(DeviceTreeError):
+ self.tree._hide_ignored_disks()
--
2.25.4

View File

@ -0,0 +1,459 @@
From 433d863cd8a57e5fc30948ff905e6a477ed5f17c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 14 Jul 2020 11:27:08 +0200
Subject: [PATCH 1/4] Add support for XFS format grow
---
blivet/formats/fs.py | 2 ++
blivet/tasks/availability.py | 1 +
blivet/tasks/fsresize.py | 54 ++++++++++++++++++++++++++++++++++++
3 files changed, 57 insertions(+)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index eee15aaa..12cb9885 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1089,11 +1089,13 @@ class XFS(FS):
_formattable = True
_linux_native = True
_supported = True
+ _resizable = True
_packages = ["xfsprogs"]
_info_class = fsinfo.XFSInfo
_mkfs_class = fsmkfs.XFSMkfs
_readlabel_class = fsreadlabel.XFSReadLabel
_size_info_class = fssize.XFSSize
+ _resize_class = fsresize.XFSResize
_sync_class = fssync.XFSSync
_writelabel_class = fswritelabel.XFSWriteLabel
_writeuuid_class = fswriteuuid.XFSWriteUUID
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index b6b5955a..df62780c 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -455,5 +455,6 @@ TUNE2FS_APP = application_by_version("tune2fs", E2FSPROGS_VERSION)
XFSADMIN_APP = application("xfs_admin")
XFSDB_APP = application("xfs_db")
XFSFREEZE_APP = application("xfs_freeze")
+XFSRESIZE_APP = application("xfs_growfs")
MOUNT_APP = application("mount")
diff --git a/blivet/tasks/fsresize.py b/blivet/tasks/fsresize.py
index e7e26984..12c0367f 100644
--- a/blivet/tasks/fsresize.py
+++ b/blivet/tasks/fsresize.py
@@ -20,7 +20,10 @@
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
+import os
+import tempfile
+from contextlib import contextmanager
from six import add_metaclass
from ..errors import FSError
@@ -32,6 +35,9 @@ from . import task
from . import fstask
from . import dfresize
+import logging
+log = logging.getLogger("blivet")
+
@add_metaclass(abc.ABCMeta)
class FSResizeTask(fstask.FSTask):
@@ -115,6 +121,54 @@ class NTFSResize(FSResize):
]
+class XFSResize(FSResize):
+ ext = availability.XFSRESIZE_APP
+ unit = B
+ size_fmt = None
+
+ @contextmanager
+ def _do_temp_mount(self):
+ if self.fs.status:
+ yield
+ else:
+ dev_name = os.path.basename(self.fs.device)
+ tmpdir = tempfile.mkdtemp(prefix="xfs-tempmount-%s" % dev_name)
+ log.debug("mounting XFS on '%s' to '%s' for resize", self.fs.device, tmpdir)
+ try:
+ self.fs.mount(mountpoint=tmpdir)
+ except FSError as e:
+ raise FSError("Failed to mount XFS filesystem for resize: %s" % str(e))
+
+ try:
+ yield
+ finally:
+ util.umount(mountpoint=tmpdir)
+ os.rmdir(tmpdir)
+
+ def _get_block_size(self):
+ if self.fs._current_info:
+ # this should be set by update_size_info()
+ for line in self.fs._current_info.split("\n"):
+ if line.startswith("blocksize ="):
+ return int(line.split("=")[-1])
+
+ raise FSError("Failed to get XFS filesystem block size for resize")
+
+ def size_spec(self):
+ # size for xfs_growfs is in blocks
+ return str(self.fs.target_size.convert_to(self.unit) / self._get_block_size())
+
+ @property
+ def args(self):
+ return [self.fs.system_mountpoint, "-D", self.size_spec()]
+
+ def do_task(self):
+ """ Resizes the XFS format. """
+
+ with self._do_temp_mount():
+ super(XFSResize, self).do_task()
+
+
class TmpFSResize(FSResize):
ext = availability.MOUNT_APP
--
2.26.2
From 56d05334231c30699a9c77dedbc23fdb021b9dee Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 14 Jul 2020 11:27:51 +0200
Subject: [PATCH 2/4] Add tests for XFS resize
XFS supports only grow so we can't reuse most of the fstesting
code and we also need to test the resize on partition because
XFS won't allow grow to size bigger than the underlying block
device.
---
tests/formats_test/fs_test.py | 91 +++++++++++++++++++++++++++++++++
tests/formats_test/fstesting.py | 33 ++++++------
2 files changed, 107 insertions(+), 17 deletions(-)
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index 15fc0c35..9bc5d20d 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -2,8 +2,13 @@ import os
import tempfile
import unittest
+import parted
+
import blivet.formats.fs as fs
from blivet.size import Size, ROUND_DOWN
+from blivet.errors import DeviceFormatError
+from blivet.formats import get_format
+from blivet.devices import PartitionDevice, DiskDevice
from tests import loopbackedtestcase
@@ -50,6 +55,92 @@ class ReiserFSTestCase(fstesting.FSAsRoot):
class XFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.XFS
+ def can_resize(self, an_fs):
+ resize_tasks = (an_fs._resize, an_fs._size_info)
+ return not any(t.availability_errors for t in resize_tasks)
+
+ def _create_partition(self, disk, size):
+ disk.format = get_format("disklabel", device=disk.path, label_type="msdos")
+ disk.format.create()
+ pstart = disk.format.alignment.grainSize
+ pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize)
+ disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL)
+ disk.format.parted_disk.commit()
+ part = disk.format.parted_disk.getPartitionBySector(pstart)
+
+ device = PartitionDevice(os.path.basename(part.path))
+ device.disk = disk
+ device.exists = True
+ device.parted_partition = part
+
+ return device
+
+ def _remove_partition(self, partition, disk):
+ disk.format.remove_partition(partition.parted_partition)
+ disk.format.parted_disk.commit()
+
+ def test_resize(self):
+ an_fs = self._fs_class()
+ if not an_fs.formattable:
+ self.skipTest("can not create filesystem %s" % an_fs.name)
+ an_fs.device = self.loop_devices[0]
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ self._test_sizes(an_fs)
+ # CHECKME: target size is still 0 after updated_size_info is called.
+ self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size)
+
+ if not self.can_resize(an_fs):
+ self.assertFalse(an_fs.resizable)
+ # Not resizable, so can not do resizing actions.
+ with self.assertRaises(DeviceFormatError):
+ an_fs.target_size = Size("64 MiB")
+ with self.assertRaises(DeviceFormatError):
+ an_fs.do_resize()
+ else:
+ disk = DiskDevice(os.path.basename(self.loop_devices[0]))
+ part = self._create_partition(disk, Size("50 MiB"))
+ an_fs = self._fs_class()
+ an_fs.device = part.path
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ self.assertTrue(an_fs.resizable)
+
+ # grow the partition so we can grow the filesystem
+ self._remove_partition(part, disk)
+ part = self._create_partition(disk, size=part.size + Size("40 MiB"))
+
+ # Try a reasonable target size
+ TARGET_SIZE = Size("64 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+ self.assertIsNone(an_fs.do_resize())
+ ACTUAL_SIZE = TARGET_SIZE.round_to_nearest(an_fs._resize.unit, rounding=ROUND_DOWN)
+ self.assertEqual(an_fs.size, ACTUAL_SIZE)
+ self.assertEqual(an_fs._size, ACTUAL_SIZE)
+ self._test_sizes(an_fs)
+
+ self._remove_partition(part, disk)
+
+ # and no errors should occur when checking
+ self.assertIsNone(an_fs.do_check())
+
+ def test_shrink(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_too_small(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_no_explicit_target_size2(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_too_big2(self):
+ # XXX this tests assumes that resizing to max size - 1 B will fail, but xfs_grow won't
+ self.skipTest("Not checking resize for this test category.")
+
class HFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.HFS
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index 62f806f9..86b2a116 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -11,16 +11,6 @@ from blivet.size import Size, ROUND_DOWN
from blivet.formats import fs
-def can_resize(an_fs):
- """ Returns True if this filesystem has all necessary resizing tools
- available.
-
- :param an_fs: a filesystem object
- """
- resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize)
- return not any(t.availability_errors for t in resize_tasks)
-
-
@add_metaclass(abc.ABCMeta)
class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
@@ -32,6 +22,15 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def __init__(self, methodName='run_test'):
super(FSAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE])
+ def can_resize(self, an_fs):
+ """ Returns True if this filesystem has all necessary resizing tools
+ available.
+
+ :param an_fs: a filesystem object
+ """
+ resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize)
+ return not any(t.availability_errors for t in resize_tasks)
+
def _test_sizes(self, an_fs):
""" Test relationships between different size values.
@@ -190,7 +189,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# CHECKME: target size is still 0 after updated_size_info is called.
self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size)
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.assertFalse(an_fs.resizable)
# Not resizable, so can not do resizing actions.
with self.assertRaises(DeviceFormatError):
@@ -221,7 +220,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# in constructor call behavior would be different.
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -244,7 +243,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
"""
SIZE = Size("64 MiB")
an_fs = self._fs_class(size=SIZE)
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -264,7 +263,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_shrink(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -296,7 +295,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_small(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create or resize filesystem %s" % an_fs.name)
@@ -315,7 +314,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_big(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -334,7 +333,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_big2(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
--
2.26.2
From 51acc04f4639f143b55789a06a68aae988a91296 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 15 Jul 2020 12:59:04 +0200
Subject: [PATCH 3/4] Add support for checking and fixing XFS using xfs_repair
---
blivet/formats/fs.py | 1 +
blivet/tasks/availability.py | 1 +
blivet/tasks/fsck.py | 12 ++++++++++++
tests/formats_test/fs_test.py | 6 +++---
4 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 12cb9885..06fbdf10 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1091,6 +1091,7 @@ class XFS(FS):
_supported = True
_resizable = True
_packages = ["xfsprogs"]
+ _fsck_class = fsck.XFSCK
_info_class = fsinfo.XFSInfo
_mkfs_class = fsmkfs.XFSMkfs
_readlabel_class = fsreadlabel.XFSReadLabel
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index df62780c..f3b76650 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -456,5 +456,6 @@ XFSADMIN_APP = application("xfs_admin")
XFSDB_APP = application("xfs_db")
XFSFREEZE_APP = application("xfs_freeze")
XFSRESIZE_APP = application("xfs_growfs")
+XFSREPAIR_APP = application("xfs_repair")
MOUNT_APP = application("mount")
diff --git a/blivet/tasks/fsck.py b/blivet/tasks/fsck.py
index 5274f13a..8477f5f8 100644
--- a/blivet/tasks/fsck.py
+++ b/blivet/tasks/fsck.py
@@ -123,6 +123,18 @@ class Ext2FSCK(FSCK):
return "\n".join(msgs) or None
+class XFSCK(FSCK):
+ _fsck_errors = {1: "Runtime error encountered during repair operation.",
+ 2: "XFS repair was unable to proceed due to a dirty log."}
+
+ ext = availability.XFSREPAIR_APP
+ options = []
+
+ def _error_message(self, rc):
+ msgs = (self._fsck_errors[c] for c in self._fsck_errors.keys() if rc & c)
+ return "\n".join(msgs) or None
+
+
class HFSPlusFSCK(FSCK):
_fsck_errors = {3: "Quick check found a dirty filesystem; no repairs done.",
4: "Root filesystem was dirty. System should be rebooted.",
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index 9bc5d20d..8fb099fd 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -123,10 +123,10 @@ class XFSTestCase(fstesting.FSAsRoot):
self.assertEqual(an_fs._size, ACTUAL_SIZE)
self._test_sizes(an_fs)
- self._remove_partition(part, disk)
+ # and no errors should occur when checking
+ self.assertIsNone(an_fs.do_check())
- # and no errors should occur when checking
- self.assertIsNone(an_fs.do_check())
+ self._remove_partition(part, disk)
def test_shrink(self):
self.skipTest("Not checking resize for this test category.")
--
2.26.2
From 2a6947098e66f880193f3bac2282a6c7857ca5f7 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 16 Jul 2020 09:05:35 +0200
Subject: [PATCH 4/4] Use xfs_db in read-only mode when getting XFS information
This way it will also work on mounted filesystems.
---
blivet/tasks/fsinfo.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/tasks/fsinfo.py b/blivet/tasks/fsinfo.py
index af208f5d..41ff700f 100644
--- a/blivet/tasks/fsinfo.py
+++ b/blivet/tasks/fsinfo.py
@@ -95,7 +95,7 @@ class ReiserFSInfo(FSInfo):
class XFSInfo(FSInfo):
ext = availability.XFSDB_APP
- options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize"]
+ options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize", "-r"]
class UnimplementedFSInfo(fstask.UnimplementedFSTask):
--
2.26.2

View File

@ -0,0 +1,76 @@
From aa4ce218fe9b4ee3571d872ff1575a499596181c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 29 May 2020 12:14:30 +0200
Subject: [PATCH 1/2] Do not limit swap to 128 GiB
The limit was part of change to limit suggested swap size in
kickstart which doesn't use the SwapSpace._max_size so there is no
reason to limit this for manual installations.
16 TiB seems to be max usable swap size based on mkswap code.
Resolves: rhbz#1656485
---
blivet/formats/swap.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/blivet/formats/swap.py b/blivet/formats/swap.py
index 4b8a7edf..3cc59138 100644
--- a/blivet/formats/swap.py
+++ b/blivet/formats/swap.py
@@ -52,8 +52,7 @@ class SwapSpace(DeviceFormat):
_linux_native = True # for clearpart
_plugin = availability.BLOCKDEV_SWAP_PLUGIN
- # see rhbz#744129 for details
- _max_size = Size("128 GiB")
+ _max_size = Size("16 TiB")
config_actions_map = {"label": "write_label"}
--
2.26.2
From 93aa6ad87116f1c86616d73dbe561251c4a0c286 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 11 Jun 2020 14:27:44 +0200
Subject: [PATCH 2/2] Add test for SwapSpace max size
---
tests/formats_test/swap_test.py | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
create mode 100644 tests/formats_test/swap_test.py
diff --git a/tests/formats_test/swap_test.py b/tests/formats_test/swap_test.py
new file mode 100644
index 00000000..56356144
--- /dev/null
+++ b/tests/formats_test/swap_test.py
@@ -0,0 +1,24 @@
+import test_compat # pylint: disable=unused-import
+
+import six
+import unittest
+
+from blivet.devices.storage import StorageDevice
+from blivet.errors import DeviceError
+from blivet.formats import get_format
+
+from blivet.size import Size
+
+
+class SwapNodevTestCase(unittest.TestCase):
+
+ def test_swap_max_size(self):
+ StorageDevice("dev", size=Size("129 GiB"),
+ fmt=get_format("swap"))
+
+ StorageDevice("dev", size=Size("15 TiB"),
+ fmt=get_format("swap"))
+
+ with six.assertRaisesRegex(self, DeviceError, "device is too large for new format"):
+ StorageDevice("dev", size=Size("17 TiB"),
+ fmt=get_format("swap"))
--
2.26.2

View File

@ -0,0 +1,78 @@
From 4e6a322d32d2a12f8a87ab763a6286cf3d7b5c27 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 8 Sep 2020 13:57:40 +0200
Subject: [PATCH] Use UnusableConfigurationError for partially hidden multipath
devices
Follow-up for https://github.com/storaged-project/blivet/pull/883
to make Anaconda show an error message instead of crashing.
Resolves: rhbz#1877052
---
blivet/devicetree.py | 4 ++--
blivet/errors.py | 6 ++++++
tests/devicetree_test.py | 4 ++--
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 2afb0d0e..57a9bbd7 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -32,7 +32,7 @@ from gi.repository import BlockDev as blockdev
from .actionlist import ActionList
from .callbacks import callbacks
-from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError
+from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError, InvalidMultideviceSelection
from .deviceaction import ActionDestroyDevice, ActionDestroyFormat
from .devices import BTRFSDevice, NoDevice, PartitionDevice
from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice
@@ -936,7 +936,7 @@ class DeviceTreeBase(object):
if is_ignored:
if len(disk.children) == 1:
if not all(self._is_ignored_disk(d) for d in disk.children[0].parents):
- raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.")
+ raise InvalidMultideviceSelection("Including only a subset of raid/multipath member disks is not allowed.")
# and also children like fwraid or mpath
self.hide(disk.children[0])
diff --git a/blivet/errors.py b/blivet/errors.py
index 811abf81..7a93f1ce 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -233,6 +233,12 @@ class DuplicateVGError(UnusableConfigurationError):
"Hint 2: You can get the VG UUIDs by running "
"'pvs -o +vg_uuid'.")
+
+class InvalidMultideviceSelection(UnusableConfigurationError):
+ suggestion = N_("All parent devices must be selected when choosing exclusive "
+ "or ignored disks for a multipath or firmware RAID device.")
+
+
# DeviceAction
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 6032e7f6..4e47ffc3 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -5,7 +5,7 @@ import six
import unittest
from blivet.actionlist import ActionList
-from blivet.errors import DeviceTreeError, DuplicateUUIDError
+from blivet.errors import DeviceTreeError, DuplicateUUIDError, InvalidMultideviceSelection
from blivet.deviceaction import ACTION_TYPE_DESTROY, ACTION_OBJECT_DEVICE
from blivet.devicelibs import lvm
from blivet.devices import DiskDevice
@@ -512,5 +512,5 @@ class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
self.tree.ignored_disks = ["sda", "sdb"]
self.tree.exclusive_disks = []
- with self.assertRaises(DeviceTreeError):
+ with self.assertRaises(InvalidMultideviceSelection):
self.tree._hide_ignored_disks()
--
2.26.2

View File

@ -0,0 +1,32 @@
From 866a48e6c3d8246d2897bb402a191df5f2848aa4 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 23 Jun 2020 10:33:33 +0200
Subject: [PATCH] Fix possible UnicodeDecodeError when reading model from sysfs
Some Innovation IT NVMe devices have an (invalid) unicode in their
model name.
Resolves: rhbz#1849326
---
blivet/udev.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 41c99496..2c795225 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -185,8 +185,9 @@ def __is_blacklisted_blockdev(dev_name):
if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
- if os.path.exists("/sys/class/block/%s/device/model" % (dev_name,)):
- model = open("/sys/class/block/%s/device/model" % (dev_name,)).read()
+ model_path = "/sys/class/block/%s/device/model" % dev_name
+ if os.path.exists(model_path):
+ model = open(model_path, encoding="utf-8", errors="replace").read()
for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"):
if model.find(bad) != -1:
log.info("ignoring %s with model %s", dev_name, model)
--
2.26.2

View File

@ -0,0 +1,415 @@
From 3f6bbf52442609b8e6e3919a3fdd8c5af64923e6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:48:41 +0200
Subject: [PATCH 1/3] Add basic support for LVM VDO devices
This adds support for LVM VDO devices detection during populate
and allows removing both VDO LVs and VDO pools using actions.
---
blivet/devices/lvm.py | 150 +++++++++++++++++++++++++++++++-
blivet/populator/helpers/lvm.py | 16 +++-
tests/action_test.py | 39 +++++++++
tests/devices_test/lvm_test.py | 34 ++++++++
tests/storagetestcase.py | 11 ++-
5 files changed, 245 insertions(+), 5 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 97de6acd..d9e24a33 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1789,8 +1789,132 @@ class LVMThinLogicalVolumeMixin(object):
data.pool_name = self.pool.lvname
+class LVMVDOPoolMixin(object):
+ def __init__(self):
+ self._lvs = []
+
+ @property
+ def is_vdo_pool(self):
+ return self.seg_type == "vdo-pool"
+
+ @property
+ def type(self):
+ return "lvmvdopool"
+
+ @property
+ def resizable(self):
+ return False
+
+ @util.requires_property("is_vdo_pool")
+ def _add_log_vol(self, lv):
+ """ Add an LV to this VDO pool. """
+ if lv in self._lvs:
+ raise ValueError("lv is already part of this VDO pool")
+
+ self.vg._add_log_vol(lv)
+ log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
+ self._lvs.append(lv)
+
+ @util.requires_property("is_vdo_pool")
+ def _remove_log_vol(self, lv):
+ """ Remove an LV from this VDO pool. """
+ if lv not in self._lvs:
+ raise ValueError("specified lv is not part of this VDO pool")
+
+ self._lvs.remove(lv)
+ self.vg._remove_log_vol(lv)
+
+ @property
+ @util.requires_property("is_vdo_pool")
+ def lvs(self):
+ """ A list of this VDO pool's LVs """
+ return self._lvs[:] # we don't want folks changing our list
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+ def _create(self):
+ """ Create the device. """
+ raise NotImplementedError
+
+
+class LVMVDOLogicalVolumeMixin(object):
+ def __init__(self):
+ pass
+
+ def _init_check(self):
+ pass
+
+ def _check_parents(self):
+ """Check that this device has parents as expected"""
+ if isinstance(self.parents, (list, ParentList)):
+ if len(self.parents) != 1:
+ raise ValueError("constructor requires a single vdo-pool LV")
+
+ container = self.parents[0]
+ else:
+ container = self.parents
+
+ if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool:
+ raise ValueError("constructor requires a vdo-pool LV")
+
+ @property
+ def vg_space_used(self):
+ return Size(0) # the pool's size is already accounted for in the vg
+
+ @property
+ def is_vdo_lv(self):
+ return self.seg_type == "vdo"
+
+ @property
+ def vg(self):
+ # parents[0] is the pool, not the VG so set the VG here
+ return self.pool.vg
+
+ @property
+ def type(self):
+ return "vdolv"
+
+ @property
+ def resizable(self):
+ return False
+
+ @property
+ @util.requires_property("is_vdo_lv")
+ def pool(self):
+ return self.parents[0]
+
+ def _create(self):
+ """ Create the device. """
+ raise NotImplementedError
+
+ def _destroy(self):
+ # nothing to do here, VDO LV is destroyed automatically together with
+ # the VDO pool
+ pass
+
+ def remove_hook(self, modparent=True):
+ if modparent:
+ self.pool._remove_log_vol(self)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
+
+ def add_hook(self, new=True):
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
+ if new:
+ return
+
+ if self not in self.pool.lvs:
+ self.pool._add_log_vol(self)
+
+
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
- LVMThinPoolMixin, LVMThinLogicalVolumeMixin):
+ LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
+ LVMVDOLogicalVolumeMixin):
""" An LVM Logical Volume """
# generally resizable, see :property:`resizable` for details
@@ -1879,6 +2003,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
+ LVMVDOPoolMixin.__init__(self)
+ LVMVDOLogicalVolumeMixin.__init__(self)
LVMInternalLogicalVolumeMixin._init_check(self)
LVMSnapshotMixin._init_check(self)
@@ -1905,6 +2031,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
ret.append(LVMThinPoolMixin)
if self.is_thin_lv:
ret.append(LVMThinLogicalVolumeMixin)
+ if self.is_vdo_pool:
+ ret.append(LVMVDOPoolMixin)
+ if self.is_vdo_lv:
+ ret.append(LVMVDOLogicalVolumeMixin)
return ret
def _try_specific_call(self, name, *args, **kwargs):
@@ -2066,6 +2196,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
def display_lv_name(self):
return self.lvname
+ @property
+ @type_specific
+ def pool(self):
+ return super(LVMLogicalVolumeDevice, self).pool
+
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
@@ -2167,6 +2302,19 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
udev.settle()
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
+ @type_specific
+ def _add_log_vol(self, lv):
+ pass
+
+ @type_specific
+ def _remove_log_vol(self, lv):
+ pass
+
+ @property
+ @type_specific
+ def lvs(self):
+ return []
+
@property
@type_specific
def direct(self):
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 4b674fac..ff8bf59f 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -211,9 +211,6 @@ class LVMFormatPopulator(FormatPopulator):
origin = self._devicetree.get_device_by_name(origin_device_name)
lv_kwargs["origin"] = origin
- elif lv_attr[0] == 'v':
- # skip vorigins
- return
elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'):
# an internal LV, add the an instance of the appropriate class
# to internal_lvs for later processing when non-internal LVs are
@@ -237,6 +234,19 @@ class LVMFormatPopulator(FormatPopulator):
origin = self._devicetree.get_device_by_name(origin_device_name)
lv_kwargs["origin"] = origin
+ lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
+ elif lv_attr[0] == 'd':
+ # vdo pool
+ # nothing to do here
+ pass
+ elif lv_attr[0] == 'v':
+ if lv_type != "vdo":
+ # skip vorigins
+ return
+ pool_name = blockdev.lvm.vdolvpoolname(vg_name, lv_name)
+ pool_device_name = "%s-%s" % (vg_name, pool_name)
+ add_required_lv(pool_device_name, "failed to look up VDO pool")
+
lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
elif lv_name.endswith(']'):
# unrecognized Internal LVM2 device
diff --git a/tests/action_test.py b/tests/action_test.py
index 90c1b312..8f9a7424 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1252,6 +1252,45 @@ class DeviceActionTestCase(StorageTestCase):
self.assertEqual(set(self.storage.lvs), {pool})
self.assertEqual(set(pool._internal_lvs), {lv1, lv2})
+ def test_lvm_vdo_destroy(self):
+ self.destroy_all_devices()
+ sdc = self.storage.devicetree.get_device_by_name("sdc")
+ sdc1 = self.new_device(device_class=PartitionDevice, name="sdc1",
+ size=Size("50 GiB"), parents=[sdc],
+ fmt=blivet.formats.get_format("lvmpv"))
+ self.schedule_create_device(sdc1)
+
+ vg = self.new_device(device_class=LVMVolumeGroupDevice,
+ name="vg", parents=[sdc1])
+ self.schedule_create_device(vg)
+
+ pool = self.new_device(device_class=LVMLogicalVolumeDevice,
+ name="data", parents=[vg],
+ size=Size("10 GiB"),
+ seg_type="vdo-pool", exists=True)
+ self.storage.devicetree._add_device(pool)
+ lv = self.new_device(device_class=LVMLogicalVolumeDevice,
+ name="meta", parents=[pool],
+ size=Size("50 GiB"),
+ seg_type="vdo", exists=True)
+ self.storage.devicetree._add_device(lv)
+
+ remove_lv = self.schedule_destroy_device(lv)
+ self.assertListEqual(pool.lvs, [])
+ self.assertNotIn(lv, vg.lvs)
+
+ # cancelling the action should put lv back to both vg and pool lvs
+ self.storage.devicetree.actions.remove(remove_lv)
+ self.assertListEqual(pool.lvs, [lv])
+ self.assertIn(lv, vg.lvs)
+
+ # can't remove non-leaf pool
+ with self.assertRaises(ValueError):
+ self.schedule_destroy_device(pool)
+
+ self.schedule_destroy_device(lv)
+ self.schedule_destroy_device(pool)
+
class ConfigurationActionsTest(unittest.TestCase):
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 9e701d18..204cb99a 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -405,6 +405,40 @@ class LVMDeviceTest(unittest.TestCase):
exists=False)
self.assertFalse(vg.is_empty)
+ def test_lvm_vdo_pool(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv])
+ pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"),
+ seg_type="vdo-pool", exists=True)
+ self.assertTrue(pool.is_vdo_pool)
+
+ free = vg.free_space
+ lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"),
+ seg_type="vdo", exists=True)
+ self.assertTrue(lv.is_vdo_lv)
+ self.assertEqual(lv.vg, vg)
+ self.assertEqual(lv.pool, pool)
+
+ # free space in the vg shouldn't be affected by the vdo lv
+ self.assertEqual(lv.vg_space_used, 0)
+ self.assertEqual(free, vg.free_space)
+
+ self.assertListEqual(pool.lvs, [lv])
+
+ # now try to destroy both the pool and the vdo lv
+ # for the lv this should be a no-op, destroying the pool should destroy both
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ lv.destroy()
+ lv.remove_hook()
+ self.assertFalse(lv.exists)
+ self.assertFalse(lvm.lvremove.called)
+ self.assertListEqual(pool.lvs, [])
+
+ pool.destroy()
+ self.assertFalse(pool.exists)
+ self.assertTrue(lvm.lvremove.called)
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py
index e581bca6..1844dec5 100644
--- a/tests/storagetestcase.py
+++ b/tests/storagetestcase.py
@@ -96,7 +96,16 @@ class StorageTestCase(unittest.TestCase):
def new_device(self, *args, **kwargs):
""" Return a new Device instance suitable for testing. """
device_class = kwargs.pop("device_class")
- exists = kwargs.pop("exists", False)
+
+ # we intentionally don't pass the "exists" kwarg to the constructor
+ # becauses this causes issues with some devices (especially partitions)
+ # but we still need it for some LVs like VDO because we can't create
+ # those so we need to fake their existence even for the constructor
+ if device_class is blivet.devices.LVMLogicalVolumeDevice:
+ exists = kwargs.get("exists", False)
+ else:
+ exists = kwargs.pop("exists", False)
+
part_type = kwargs.pop("part_type", parted.PARTITION_NORMAL)
device = device_class(*args, **kwargs)
--
2.26.2
From f05a66e1bed1ca1f3cd7d7ffecd6693ab4d7f32a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:52:47 +0200
Subject: [PATCH 2/3] Fix checking for filesystem support in action_test
---
tests/action_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/action_test.py b/tests/action_test.py
index 8f9a7424..228eb97a 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -56,7 +56,7 @@ FORMAT_CLASSES = [
@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
-@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
+@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
class DeviceActionTestCase(StorageTestCase):
""" DeviceActionTestSuite """
--
2.26.2
From 69bd2e69e21c8779377a6f54b3d83cb35138867a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:54:03 +0200
Subject: [PATCH 3/3] Fix LV min size for resize in test_action_dependencies
We've recently changed min size for all filesystems so we can't
resize the LV to the device minimal size.
This was overlooked in the original change because these tests
were skipped.
---
tests/action_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/action_test.py b/tests/action_test.py
index 228eb97a..77176f46 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -870,7 +870,7 @@ class DeviceActionTestCase(StorageTestCase):
name="testlv2", parents=[testvg])
testlv2.format = self.new_format("ext4", device=testlv2.path,
exists=True, device_instance=testlv2)
- shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB"))
+ shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB") + Ext4FS._min_size)
shrink_lv2.apply()
self.assertTrue(grow_lv.requires(shrink_lv2))
--
2.26.2

View File

@ -0,0 +1,30 @@
From d477f8d076789cbe1c0a85545ea8b5133fdc4bdf Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 18 Sep 2020 13:58:48 +0200
Subject: [PATCH] Let parted fix fixable issues with partition table
This will automatically fix issues like GPT partition table not
covering whole device after disk size change.
Resolves: rhbz#1846869
---
blivet/populator/populator.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 465c272d..fe566816 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -64,6 +64,9 @@ def parted_exn_handler(exn_type, exn_options, exn_msg):
if exn_type == parted.EXCEPTION_TYPE_ERROR and \
exn_options == parted.EXCEPTION_OPT_YES_NO:
ret = parted.EXCEPTION_RESOLVE_YES
+ elif exn_type == parted.EXCEPTION_TYPE_WARNING and \
+ exn_options & parted.EXCEPTION_RESOLVE_FIX:
+ ret = parted.EXCEPTION_RESOLVE_FIX
return ret
--
2.29.2

View File

@ -0,0 +1,112 @@
From 430cd2cdba8fba434b5bed2d2a7ed97803c62f6d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 5 Jan 2021 16:56:52 +0100
Subject: [PATCH 1/3] Fix possible UnicodeDecodeError when reading sysfs
attributes
This is a follow-up for https://github.com/storaged-project/blivet/pull/861
where we fixed reading device model in "__is_blacklisted_blockdev"
but we read the device model from other places too so it makes
more sense to "fix" all sysfs attribute reads.
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 2fa9c8fc..48b7818f 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -379,7 +379,7 @@ def get_sysfs_attr(path, attr, root=None):
log.warning("%s is not a valid attribute", attr)
return None
- f = open(fullattr, "r")
+ f = open(fullattr, "r", encoding="utf-8", errors="replace")
data = f.read()
f.close()
sdata = "".join(["%02x" % (ord(x),) for x in data])
--
2.29.2
From 15350b52f30910d4fadad92da0195710adcb69a0 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 5 Jan 2021 16:59:14 +0100
Subject: [PATCH 2/3] Use util.get_sysfs_attr in __is_ignored_blockdev to read
device mode
---
blivet/udev.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 2c795225..25375459 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -185,9 +185,8 @@ def __is_blacklisted_blockdev(dev_name):
if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
- model_path = "/sys/class/block/%s/device/model" % dev_name
- if os.path.exists(model_path):
- model = open(model_path, encoding="utf-8", errors="replace").read()
+ model = util.get_sysfs_attr("/sys/class/block/%s" % dev_name, "device/model")
+ if model:
for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"):
if model.find(bad) != -1:
log.info("ignoring %s with model %s", dev_name, model)
--
2.29.2
From 64ece8c0dafb550bbde4798a766515fb04f44568 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Jan 2021 12:34:49 +0100
Subject: [PATCH 3/3] Add test for util.get_sysfs_attr
---
tests/util_test.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/tests/util_test.py b/tests/util_test.py
index 9a2ff492..853b6166 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -2,7 +2,9 @@
import test_compat
from six.moves import mock
+import os
import six
+import tempfile
import unittest
from decimal import Decimal
@@ -157,3 +159,24 @@ class DependencyGuardTestCase(unittest.TestCase):
with mock.patch.object(_requires_something, '_check_avail', return_value=True):
self.assertEqual(self._test_dependency_guard_non_critical(), True)
self.assertEqual(self._test_dependency_guard_critical(), True)
+
+
+class GetSysfsAttrTestCase(unittest.TestCase):
+
+ def test_get_sysfs_attr(self):
+
+ with tempfile.TemporaryDirectory() as sysfs:
+ model_file = os.path.join(sysfs, "model")
+ with open(model_file, "w") as f:
+ f.write("test model\n")
+
+ model = util.get_sysfs_attr(sysfs, "model")
+ self.assertEqual(model, "test model")
+
+ # now with some invalid byte in the model
+ with open(model_file, "wb") as f:
+ f.write(b"test model\xef\n")
+
+ # the unicode replacement character (U+FFFD) should be used instead
+ model = util.get_sysfs_attr(sysfs, "model")
+ self.assertEqual(model, "test model\ufffd")
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,7 @@ Version: 3.2.2
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 3%{?prerelease}%{?dist}
Release: 9%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
Group: System Environment/Libraries
@ -36,6 +36,20 @@ Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-Skip-test_mounting-for-filesystems-that-are-not-moun.patch
Patch3: 0004-Add-extra-sleep-after-pvremove-call.patch
Patch4: 0005-Round-down-to-nearest-MiB-value-when-writing-ks-parittion-info.ks
Patch5: 0006-Blivet-RHEL-8.3-localization-update.patch
Patch6: 0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch
Patch7: 0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch
Patch8: 0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch
Patch9: 0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch
Patch10: 0011-Fix-ignoring-disk-devices-with-parents-or-children.patch
Patch11: 0012-xfs-grow-support.patch
Patch12: 0013-Do-not-limit-swap-to-128-GiB.patch
Patch13: 0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch
Patch14: 0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch
Patch15: 0016-Basic-LVM-VDO-support.patch
Patch16: 0017-Let-parted-fix-fixable-issues-with-partition-table.patch
Patch17: 0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch
Patch18: 0019-LVM-VDO-support.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -197,6 +211,46 @@ configuration.
%endif
%changelog
* Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9
- LVM VDO support
Resolves: rhbz#1509337
* Mon Jan 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-8
- Let parted fix fixable issues with partition table
Resolves: rhbz#1846869
- Fix possible UnicodeDecodeError when reading sysfs attributes
Resolves: rhbz#1849326
* Wed Nov 18 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-7
- Add support for XFS format grow
Resolves: rhbz#1862349
- Do not limit swap to 128 GiB
Resolves: rhbz#1656485
- Use UnusableConfigurationError for partially hidden multipath devices
Resolves: rhbz#1877052
- Fix possible UnicodeDecodeError when reading model from sysfs
Resolves: rhbz#1849326
- Add basic support for LVM VDO devices
Resolves: rhbz#1828745
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-6
- Fix name resolution for MD devices and partitions on them
Resolves: rhbz#1862904
- Fix ignoring disk devices with parents or children
Resolves: rhbz#1866243
* Thu Jul 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-5
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd
Resolves: rhbz#1855200
- Do not use BlockDev.utils_have_kernel_module to check for modules
Resolves: rhbz#1855344
* Thu Jul 09 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- Blivet RHEL 8.3 localization update
Resolves: rhbz#182056
- Do not use FSAVAIL and FSUSE% options when running lsblk
Resolves: rhbz#1853624
* Tue Jun 30 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-3
- Round down to nearest MiB value when writing ks parittion info
Resolves: rhbz#1850670