diff --git a/.gitignore b/.gitignore index d3a23d4..4d90c03 100644 --- a/.gitignore +++ b/.gitignore @@ -9,9 +9,9 @@ SOURCES/ffi-1.13.1.gem SOURCES/json-2.3.0.gem SOURCES/mustermann-1.1.1.gem SOURCES/open4-1.3.4-1.gem -SOURCES/pcs-0.10.10.tar.gz -SOURCES/pcs-web-ui-0.1.7.tar.gz -SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz +SOURCES/pcs-0.10.12.tar.gz +SOURCES/pcs-web-ui-0.1.12.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.12.tar.xz SOURCES/pyagentx-0.4.pcs.2.tar.gz SOURCES/python-dateutil-2.8.1.tar.gz SOURCES/rack-2.2.3.gem diff --git a/.pcs.metadata b/.pcs.metadata index 6167c6c..b16fa68 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -9,9 +9,9 @@ cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem 0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem 50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem -a1c0585455b7e050c33598598a045ccd2776cb28 SOURCES/pcs-0.10.10.tar.gz -b9ed12ca957c2f204ec37ea2836b924c36fab379 SOURCES/pcs-web-ui-0.1.7.tar.gz -8824285e6f1c2807d9222d573c6e6df1e50d8410 SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz +1937b826a36bb8396da227361d13f4c25830929c SOURCES/pcs-0.10.12.tar.gz +a29bfd22130ac978c5d4a6a82108ce37ad2a5db9 SOURCES/pcs-web-ui-0.1.12.tar.gz +c9723466d7bfb353899307a5700177f47e7e6cff SOURCES/pcs-web-ui-node-modules-0.1.12.tar.xz 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz bd26127e57f83a10f656b62c46524c15aeb844dd SOURCES/python-dateutil-2.8.1.tar.gz 345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem diff --git a/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch b/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch deleted file mode 100644 index 796544d..0000000 --- a/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch +++ /dev/null @@ -1,1172 +0,0 @@ -From e3f9823283517bafa8d309fb6148539e0e8ecdb2 Mon Sep 17 00:00:00 2001 -From: Miroslav Lisik -Date: Fri, 10 Sep 2021 11:40:03 +0200 -Subject: [PATCH] add missing file test_stonith_update_scsi_devices.py - ---- - .../test_stonith_update_scsi_devices.py | 1153 +++++++++++++++++ - 1 file changed, 1153 insertions(+) - create mode 100644 pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py - -diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -new file mode 100644 -index 0000000..3bc5132 ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -@@ -0,0 +1,1153 @@ -+import json -+from unittest import mock, TestCase -+ -+ -+from pcs_test.tools import fixture -+from pcs_test.tools.command_env import get_env_tools -+from pcs_test.tools.misc import get_test_resource as rc -+ -+from pcs import settings -+from pcs.lib.commands import stonith -+from pcs.common import ( -+ communication, -+ reports, -+) -+from pcs.common.interface import dto -+from pcs.common.tools import timeout_to_seconds -+ -+from .cluster.common import ( -+ corosync_conf_fixture, -+ get_two_node, -+ node_fixture, -+) -+ -+SCSI_STONITH_ID = "scsi-fence-device" -+SCSI_NODE = "node1" -+_DIGEST = "0" * 31 -+DEFAULT_DIGEST = _DIGEST + "0" -+ALL_DIGEST = _DIGEST + "1" -+NONPRIVATE_DIGEST = _DIGEST + "2" -+NONRELOADABLE_DIGEST = _DIGEST + "3" -+DEVICES_1 = ("/dev/sda",) -+DEVICES_2 = ("/dev/sda", "/dev/sdb") -+DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc") -+ -+DEFAULT_MONITOR = ("monitor", "60s", None, None) -+DEFAULT_OPS = (DEFAULT_MONITOR,) -+DEFAULT_LRM_START_OPS = (("0", DEFAULT_DIGEST, None, None),) -+DEFAULT_LRM_MONITOR_OPS = (("60000", DEFAULT_DIGEST, None, None),) -+DEFAULT_LRM_START_OPS_UPDATED = (("0", ALL_DIGEST, None, None),) -+DEFAULT_LRM_MONITOR_OPS_UPDATED = (("60000", ALL_DIGEST, None, None),) -+ -+ -+def _fixture_ops(resource_id, ops): -+ return "\n".join( -+ [ -+ ( -+ '' -+ ).format( -+ resource_id=resource_id, -+ name=name, -+ _interval=_interval if _interval else interval, -+ interval=interval, -+ timeout=f'timeout="{timeout}"' if timeout else "", -+ ) -+ for name, interval, timeout, _interval in ops -+ ] -+ ) -+ -+ -+def _fixture_devices_nvpair(resource_id, devices): -+ if devices is None: -+ return "" -+ return ( -+ '' -+ ).format(resource_id=resource_id, devices=",".join(sorted(devices))) -+ -+ -+def fixture_scsi( -+ stonith_id=SCSI_STONITH_ID, devices=DEVICES_1, resource_ops=DEFAULT_OPS -+): -+ return """ -+ -+ -+ -+ {devices} -+ -+ -+ -+ -+ -+ -+ -+ -+ {operations} -+ -+ -+ -+ -+ """.format( -+ stonith_id=stonith_id, -+ devices=_fixture_devices_nvpair(stonith_id, devices), -+ operations=_fixture_ops(stonith_id, resource_ops), -+ ) -+ -+ -+def _fixture_lrm_rsc_ops(op_type, resource_id, lrm_ops): -+ return [ -+ ( -+ '' -+ ).format( -+ op_type_id="last" if op_type == "start" else op_type, -+ op_type=op_type, -+ resource_id=resource_id, -+ ms=ms, -+ _all=f'op-digest="{_all}"' if _all else "", -+ secure=f'op-secure-digest="{secure}"' if secure else "", -+ restart=f'op-restart-digest="{restart}"' if restart else "", -+ ) -+ for ms, _all, secure, restart in lrm_ops -+ ] -+ -+ -+def _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops): -+ return _fixture_lrm_rsc_ops("monitor", resource_id, lrm_monitor_ops) -+ -+ -+def _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops): -+ return _fixture_lrm_rsc_ops("start", resource_id, lrm_start_ops) -+ -+ -+def _fixture_status_lrm_ops_base( -+ resource_id, -+ lrm_ops, -+): -+ return f""" -+ -+ -+ -+ -+ -+ {lrm_ops} -+ -+ -+ -+ -+ -+ """ -+ -+ -+def _fixture_status_lrm_ops( -+ resource_id, -+ lrm_start_ops=DEFAULT_LRM_START_OPS, -+ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, -+): -+ return _fixture_status_lrm_ops_base( -+ resource_id, -+ "\n".join( -+ _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops) -+ + _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops) -+ ), -+ ) -+ -+ -+def fixture_digests_xml(resource_id, node_name, devices=""): -+ return f""" -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """ -+ -+ -+FIXTURE_CRM_MON_RES_RUNNING_1 = f""" -+ -+ -+ -+""" -+ -+FIXTURE_CRM_MON_RES_RUNNING_2 = f""" -+ -+ -+ -+ -+ -+ -+""" -+FIXTURE_CRM_MON_NODES = """ -+ -+ -+ -+ -+ -+""" -+ -+FIXTURE_CRM_MON_RES_STOPPED = f""" -+ -+""" -+ -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class UpdateScsiDevices(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ self.existing_nodes = ["node1", "node2", "node3"] -+ self.existing_corosync_nodes = [ -+ node_fixture(node, node_id) -+ for node_id, node in enumerate(self.existing_nodes, 1) -+ ] -+ self.config.env.set_known_nodes(self.existing_nodes) -+ -+ def assert_command_success( -+ self, -+ devices_before=DEVICES_1, -+ devices_updated=DEVICES_2, -+ resource_ops=DEFAULT_OPS, -+ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, -+ lrm_start_ops=DEFAULT_LRM_START_OPS, -+ lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, -+ lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, -+ ): -+ # pylint: disable=too-many-locals -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi( -+ devices=devices_before, resource_ops=resource_ops -+ ), -+ status=_fixture_status_lrm_ops( -+ SCSI_STONITH_ID, -+ lrm_start_ops=lrm_start_ops, -+ lrm_monitor_ops=lrm_monitor_ops, -+ ), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ devices_opt = "devices={}".format(",".join(devices_updated)) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated) -+ ), -+ args=[devices_opt], -+ ) -+ -+ for num, op in enumerate(resource_ops, 1): -+ name, interval, timeout, _ = op -+ if name != "monitor": -+ continue -+ args = [devices_opt] -+ args.append( -+ "CRM_meta_interval={}".format( -+ 1000 * timeout_to_seconds(interval) -+ ) -+ ) -+ if timeout: -+ args.append( -+ "CRM_meta_timeout={}".format( -+ 1000 * timeout_to_seconds(timeout) -+ ) -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name=f"{name}-{num}.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=",".join(devices_updated), -+ ), -+ args=args, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture( -+ self.existing_corosync_nodes, -+ get_two_node(len(self.existing_corosync_nodes)), -+ ) -+ ) -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ devices_updated, node_labels=self.existing_nodes -+ ) -+ self.config.env.push_cib( -+ resources=fixture_scsi( -+ devices=devices_updated, resource_ops=resource_ops -+ ), -+ status=_fixture_status_lrm_ops( -+ SCSI_STONITH_ID, -+ lrm_start_ops=lrm_start_ops_updated, -+ lrm_monitor_ops=lrm_monitor_ops_updated, -+ ), -+ ) -+ stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated -+ ) -+ self.env_assist.assert_reports([]) -+ -+ def test_update_1_to_1_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_1, devices_updated=DEVICES_1 -+ ) -+ -+ def test_update_2_to_2_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_1, devices_updated=DEVICES_1 -+ ) -+ -+ def test_update_1_to_2_devices(self): -+ self.assert_command_success() -+ -+ def test_update_1_to_3_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_1, devices_updated=DEVICES_3 -+ ) -+ -+ def test_update_3_to_1_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_3, devices_updated=DEVICES_1 -+ ) -+ -+ def test_update_3_to_2_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_3, devices_updated=DEVICES_2 -+ ) -+ -+ def test_default_monitor(self): -+ self.assert_command_success() -+ -+ def test_no_monitor_ops(self): -+ self.assert_command_success( -+ resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=() -+ ) -+ -+ def test_1_monitor_with_timeout(self): -+ self.assert_command_success( -+ resource_ops=(("monitor", "30s", "10s", None),), -+ lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), -+ lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), -+ ) -+ -+ def test_2_monitor_ops_with_timeouts(self): -+ self.assert_command_success( -+ resource_ops=( -+ ("monitor", "30s", "10s", None), -+ ("monitor", "40s", "20s", None), -+ ), -+ lrm_monitor_ops=( -+ ("30000", DEFAULT_DIGEST, None, None), -+ ("40000", DEFAULT_DIGEST, None, None), -+ ), -+ lrm_monitor_ops_updated=( -+ ("30000", ALL_DIGEST, None, None), -+ ("40000", ALL_DIGEST, None, None), -+ ), -+ ) -+ -+ def test_2_monitor_ops_with_one_timeout(self): -+ self.assert_command_success( -+ resource_ops=( -+ ("monitor", "30s", "10s", None), -+ ("monitor", "60s", None, None), -+ ), -+ lrm_monitor_ops=( -+ ("30000", DEFAULT_DIGEST, None, None), -+ ("60000", DEFAULT_DIGEST, None, None), -+ ), -+ lrm_monitor_ops_updated=( -+ ("30000", ALL_DIGEST, None, None), -+ ("60000", ALL_DIGEST, None, None), -+ ), -+ ) -+ -+ def test_various_start_ops_one_lrm_start_op(self): -+ self.assert_command_success( -+ resource_ops=( -+ ("monitor", "60s", None, None), -+ ("start", "0s", "40s", None), -+ ("start", "0s", "30s", "1"), -+ ("start", "10s", "5s", None), -+ ("start", "20s", None, None), -+ ), -+ ) -+ -+ def test_1_nonrecurring_start_op_with_timeout(self): -+ self.assert_command_success( -+ resource_ops=( -+ ("monitor", "60s", None, None), -+ ("start", "0s", "40s", None), -+ ), -+ ) -+ -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class TestUpdateScsiDevicesFailures(TestCase): -+ # pylint: disable=too-many-public-methods -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ self.existing_nodes = ["node1", "node2", "node3"] -+ self.existing_corosync_nodes = [ -+ node_fixture(node, node_id) -+ for node_id, node in enumerate(self.existing_nodes, 1) -+ ] -+ self.config.env.set_known_nodes(self.existing_nodes) -+ -+ def test_pcmk_doesnt_support_digests(self): -+ self.config.runner.pcmk.is_resource_digests_supported( -+ is_supported=False -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, () -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_devices_cannot_be_empty(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, () -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.INVALID_OPTION_VALUE, -+ option_name="devices", -+ option_value="", -+ allowed_values=None, -+ cannot_be_empty=True, -+ forbidden_characters=None, -+ ) -+ ] -+ ) -+ -+ def test_nonexistant_id(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), "non-existent-id", DEVICES_2 -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.ID_NOT_FOUND, -+ id="non-existent-id", -+ expected_types=["primitive"], -+ context_type="cib", -+ context_id="", -+ ) -+ ] -+ ) -+ -+ def test_not_a_resource_id(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), -+ f"{SCSI_STONITH_ID}-instance_attributes-devices", -+ DEVICES_2, -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, -+ id=f"{SCSI_STONITH_ID}-instance_attributes-devices", -+ expected_types=["primitive"], -+ current_type="nvpair", -+ ) -+ ] -+ ) -+ -+ def test_not_supported_resource_type(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), "dummy", DEVICES_2 -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, -+ resource_id="dummy", -+ resource_type="Dummy", -+ supported_stonith_types=["fence_scsi"], -+ ) -+ ] -+ ) -+ -+ def test_devices_option_missing(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi(devices=None)) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "no devices option configured for stonith device " -+ f"'{SCSI_STONITH_ID}'" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ] -+ ) -+ -+ def test_devices_option_empty(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi(devices="")) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "no devices option configured for stonith device " -+ f"'{SCSI_STONITH_ID}'" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ] -+ ) -+ -+ def test_stonith_resource_is_not_running(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=f"resource '{SCSI_STONITH_ID}' is not running on any node", -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_stonith_resource_is_running_on_more_than_one_node(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ f"resource '{SCSI_STONITH_ID}' is running on more than " -+ "1 node" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_lrm_op_missing_digest_attributes(self): -+ devices = ",".join(DEVICES_2) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops_base( -+ SCSI_STONITH_ID, -+ f'', -+ ), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[f"devices={devices}"], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason="no digests attributes in lrm_rsc_op element", -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_crm_resource_digests_missing(self): -+ devices = ",".join(DEVICES_2) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops_base( -+ SCSI_STONITH_ID, -+ ( -+ f'' -+ ), -+ ), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[f"devices={devices}"], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "necessary digest for 'op-restart-digest' attribute is " -+ "missing" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_no_lrm_start_op(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "lrm_rsc_op element for start operation was not found" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi( -+ resource_ops=( -+ ("monitor", "30s", "10s", None), -+ ("monitor", "30s", "20s", "31"), -+ ("monitor", "60s", None, None), -+ ) -+ ), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=["devices={}".format(",".join(DEVICES_2))], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "number of lrm_rsc_op and op elements for monitor " -+ "operation differs" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_lrm_monitor_ops_not_found(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi( -+ resource_ops=(("monitor", "30s", None, None),) -+ ), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=["devices={}".format(",".join(DEVICES_2))], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "monitor lrm_rsc_op element for resource " -+ f"'{SCSI_STONITH_ID}', node '{SCSI_NODE}' and interval " -+ "'30000' not found" -+ ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_node_missing_name_and_missing_auth_token(self): -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=["devices={}".format(",".join(DEVICES_2))], -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="monitor.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=[ -+ "devices={}".format(",".join(DEVICES_2)), -+ "CRM_meta_interval=60000", -+ ], -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture( -+ self.existing_corosync_nodes -+ + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], -+ ) -+ ) -+ self.config.env.set_known_nodes(self.existing_nodes[:-1]) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ), -+ fixture.error( -+ reports.codes.HOST_NOT_FOUND, -+ host_list=[self.existing_nodes[-1]], -+ ), -+ ] -+ ) -+ -+ def _unfence_failure_common_calls(self): -+ devices = ",".join(DEVICES_2) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -+ ) -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[f"devices={devices}"], -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="monitor.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[ -+ f"devices={devices}", -+ "CRM_meta_interval=60000", -+ ], -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.existing_corosync_nodes) -+ ) -+ -+ def test_unfence_failure_unable_to_connect(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[0]) -+ ), -+ was_connected=False, -+ error_msg="errA", -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[1]) -+ ), -+ output=json.dumps( -+ dto.to_dict( -+ communication.dto.InternalCommunicationResultDto( -+ status=communication.const.COM_STATUS_ERROR, -+ status_msg="error", -+ report_list=[ -+ reports.ReportItem.error( -+ reports.messages.StonithUnfencingFailed( -+ "errB" -+ ) -+ ).to_dto() -+ ], -+ data=None, -+ ) -+ ) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[2]) -+ ), -+ ), -+ ], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.existing_nodes[0], -+ command="api/v1/scsi-unfence-node/v1", -+ reason="errA", -+ ), -+ fixture.error( -+ reports.codes.STONITH_UNFENCING_FAILED, -+ reason="errB", -+ context=reports.dto.ReportItemContextDto( -+ node=self.existing_nodes[1], -+ ), -+ ), -+ ] -+ ) -+ -+ def test_unfence_failure_agent_script_failed(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[0]) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[1]) -+ ), -+ output=json.dumps( -+ dto.to_dict( -+ communication.dto.InternalCommunicationResultDto( -+ status=communication.const.COM_STATUS_ERROR, -+ status_msg="error", -+ report_list=[ -+ reports.ReportItem.error( -+ reports.messages.StonithUnfencingFailed( -+ "errB" -+ ) -+ ).to_dto() -+ ], -+ data=None, -+ ) -+ ) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[2]) -+ ), -+ ), -+ ], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.STONITH_UNFENCING_FAILED, -+ reason="errB", -+ context=reports.dto.ReportItemContextDto( -+ node=self.existing_nodes[1], -+ ), -+ ), -+ ] -+ ) -+ -+ def test_corosync_targets_unable_to_connect(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ output='{"corosync":true}', -+ ), -+ ] -+ + [ -+ dict( -+ label=node, -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ) -+ for node in self.existing_nodes[1:] -+ ] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -+ ), -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ force_code=reports.codes.SKIP_OFFLINE_NODES, -+ node=node, -+ command="remote/status", -+ reason="an error", -+ ) -+ for node in self.existing_nodes[1:] -+ ] -+ ) -+ -+ def test_corosync_targets_skip_offline_unfence_node_running_corosync( -+ self, -+ ): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ output='{"corosync":true}', -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ output='{"corosync":false}', -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ ] -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict(devices=DEVICES_2, node=self.existing_nodes[0]) -+ ), -+ ), -+ ], -+ ) -+ self.config.env.push_cib( -+ resources=fixture_scsi(devices=DEVICES_2), -+ status=_fixture_status_lrm_ops( -+ SCSI_STONITH_ID, -+ lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, -+ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, -+ ), -+ ) -+ stonith.update_scsi_devices( -+ self.env_assist.get_env(), -+ SCSI_STONITH_ID, -+ DEVICES_2, -+ force_flags=[reports.codes.SKIP_OFFLINE_NODES], -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.warn( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.existing_nodes[2], -+ command="remote/status", -+ reason="an error", -+ ), -+ ] -+ ) -+ -+ def test_corosync_targets_unable_to_perform_unfencing_operation( -+ self, -+ ): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ output='{"corosync":false}', -+ ), -+ ] -+ ) -+ self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[]) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), -+ SCSI_STONITH_ID, -+ DEVICES_2, -+ force_flags=[reports.codes.SKIP_OFFLINE_NODES], -+ ), -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.warn( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=node, -+ command="remote/status", -+ reason="an error", -+ ) -+ for node in self.existing_nodes[0:2] -+ ] -+ + [ -+ fixture.error( -+ reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, -+ ), -+ ] -+ ) --- -2.31.1 - diff --git a/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch b/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch new file mode 100644 index 0000000..2960f32 --- /dev/null +++ b/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch @@ -0,0 +1,73 @@ +From e5fc48f45a60228a82980dcd6d68ca01cf447eac Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 7 Dec 2021 11:58:09 +0100 +Subject: [PATCH 2/3] fix rsc update cmd when unable to get agent metadata + +`resource update` command failed with a traceback when updating a +resource with a non-existing resource agent +--- + pcs/resource.py | 14 ++++++++------ + pcs_test/tier1/legacy/test_resource.py | 21 +++++++++++++++++++++ + 2 files changed, 29 insertions(+), 6 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index c0e8b0d9..4514338d 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -1049,13 +1049,15 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + if report_list: + process_library_reports(report_list) + except lib_ra.ResourceAgentError as e: +- severity = ( +- reports.ReportItemSeverity.WARNING +- if modifiers.get("--force") +- else reports.ReportItemSeverity.ERROR +- ) + process_library_reports( +- [lib_ra.resource_agent_error_to_report_item(e, severity)] ++ [ ++ lib_ra.resource_agent_error_to_report_item( ++ e, ++ reports.get_severity( ++ reports.codes.FORCE, modifiers.get("--force") ++ ), ++ ) ++ ] + ) + except LibraryError as e: + process_library_reports(e.args) +diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py +index 3f0e08b9..bae0587a 100644 +--- a/pcs_test/tier1/legacy/test_resource.py ++++ b/pcs_test/tier1/legacy/test_resource.py +@@ -4879,6 +4879,27 @@ class UpdateInstanceAttrs( + ), + ) + ++ def test_nonexisting_agent(self): ++ agent = "ocf:pacemaker:nonexistent" ++ message = ( ++ f"Agent '{agent}' is not installed or does " ++ "not provide valid metadata: Metadata query for " ++ f"{agent} failed: Input/output error" ++ ) ++ self.assert_pcs_success( ++ f"resource create --force D0 {agent}".split(), ++ f"Warning: {message}\n", ++ ) ++ ++ self.assert_pcs_fail( ++ "resource update D0 test=testA".split(), ++ f"Error: {message}, use --force to override\n", ++ ) ++ self.assert_pcs_success( ++ "resource update --force D0 test=testA".split(), ++ f"Warning: {message}\n", ++ ) ++ + def test_update_existing(self): + xml = """ + +-- +2.31.1 + diff --git a/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch b/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch new file mode 100644 index 0000000..ae32aae --- /dev/null +++ b/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch @@ -0,0 +1,1031 @@ +From fe1ad27f32e69e3e7c046b51e5406a0693ea1c35 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 11 Jan 2022 08:01:10 +0100 +Subject: [PATCH 3/5] Multiple fixes of `pcs resource move --autodelete` + command + +--- + pcs/common/reports/codes.py | 1 + + pcs/common/reports/messages.py | 21 ++ + pcs/lib/cib/node.py | 14 +- + pcs/lib/commands/resource.py | 105 ++++++- + pcs/lib/node.py | 7 +- + .../tier0/common/reports/test_messages.py | 12 + + .../resource/test_resource_move_autoclean.py | 280 +++++++++++++++++- + .../resource/test_resource_move_ban.py | 45 ++- + .../tools/command_env/config_runner_pcmk.py | 2 + + pcs_test/tools/command_env/mock_runner.py | 2 +- + pcs_test/tools/fixture_cib.py | 1 + + 11 files changed, 456 insertions(+), 34 deletions(-) + +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index 5bae7170..3e0512d9 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -418,6 +418,7 @@ RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M( + ) + RESOURCE_MOVE_CONSTRAINT_CREATED = M("RESOURCE_MOVE_CONSTRAINT_CREATED") + RESOURCE_MOVE_CONSTRAINT_REMOVED = M("RESOURCE_MOVE_CONSTRAINT_REMOVED") ++RESOURCE_MOVE_NOT_AFFECTING_RESOURCE = M("RESOURCE_MOVE_NOT_AFFECTING_RESOURCE") + RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES = M( + "RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES" + ) +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index 43ce38e1..9d665e73 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -6110,6 +6110,27 @@ class ResourceMoveConstraintRemoved(ReportItemMessage): + ) + + ++@dataclass(frozen=True) ++class ResourceMoveNotAffectingResource(ReportItemMessage): ++ """ ++ Creating a location constraint to move a resource has no effect on the ++ resource. ++ ++ resource_id -- id of the resource to be moved ++ """ ++ ++ resource_id: str ++ _code = codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE ++ ++ @property ++ def message(self) -> str: ++ return ( ++ f"Unable to move resource '{self.resource_id}' using a location " ++ "constraint. Current location of the resource may be affected by " ++ "some other constraint." ++ ) ++ ++ + @dataclass(frozen=True) + class ResourceMoveAffectsOtherResources(ReportItemMessage): + """ +diff --git a/pcs/lib/cib/node.py b/pcs/lib/cib/node.py +index 20a41ca0..df2ffbaa 100644 +--- a/pcs/lib/cib/node.py ++++ b/pcs/lib/cib/node.py +@@ -1,12 +1,17 @@ + from collections import namedtuple ++from typing import Set + from lxml import etree ++from lxml.etree import _Element + + from pcs.common import reports + from pcs.common.reports.item import ReportItem + from pcs.lib.cib.nvpair import update_nvset + from pcs.lib.cib.tools import get_nodes + from pcs.lib.errors import LibraryError +-from pcs.lib.xml_tools import append_when_useful ++from pcs.lib.xml_tools import ( ++ append_when_useful, ++ get_root, ++) + + + class PacemakerNode(namedtuple("PacemakerNode", "name addr")): +@@ -58,6 +63,13 @@ def update_node_instance_attrs( + append_when_useful(cib_nodes, node_el) + + ++def get_node_names(cib: _Element) -> Set[str]: ++ return { ++ str(node.attrib["uname"]) ++ for node in get_nodes(get_root(cib)).iterfind("./node") ++ } ++ ++ + def _ensure_node_exists(tree, node_name, state_nodes=None): + """ + Make sure node with specified name exists +diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py +index d0e8f4db..82ce73e0 100644 +--- a/pcs/lib/commands/resource.py ++++ b/pcs/lib/commands/resource.py +@@ -50,12 +50,16 @@ from pcs.lib.cib.tools import ( + from pcs.lib.env import LibraryEnvironment, WaitType + from pcs.lib.errors import LibraryError + from pcs.lib.external import CommandRunner +-from pcs.lib.node import get_existing_nodes_names_addrs ++from pcs.lib.node import ( ++ get_existing_nodes_names_addrs, ++ get_pacemaker_node_names, ++) + from pcs.lib.pacemaker import simulate as simulate_tools + from pcs.lib.pacemaker.live import ( + diff_cibs_xml, + get_cib, + get_cib_xml, ++ get_cluster_status_dom, + has_resource_unmove_unban_expired_support, + push_cib_diff_xml, + resource_ban, +@@ -1589,6 +1593,16 @@ def move( + ) + + ++def _nodes_exist_reports( ++ cib: _Element, node_names: Iterable[str] ++) -> ReportItemList: ++ existing_node_names = get_pacemaker_node_names(cib) ++ return [ ++ reports.ReportItem.error(reports.messages.NodeNotFound(node_name)) ++ for node_name in (set(node_names) - existing_node_names) ++ ] ++ ++ + def move_autoclean( + env: LibraryEnvironment, + resource_id: str, +@@ -1626,6 +1640,9 @@ def move_autoclean( + if resource_el is not None: + report_list.extend(resource.common.validate_move(resource_el, master)) + ++ if node: ++ report_list.extend(_nodes_exist_reports(cib, [node])) ++ + if env.report_processor.report_list(report_list).has_errors: + raise LibraryError() + +@@ -1659,8 +1676,32 @@ def move_autoclean( + add_constraint_cib_diff = diff_cibs_xml( + env.cmd_runner(), env.report_processor, cib_xml, rsc_moved_cib_xml + ) ++ with get_tmp_cib( ++ env.report_processor, rsc_moved_cib_xml ++ ) as rsc_moved_constraint_cleared_cib_file: ++ stdout, stderr, retval = resource_unmove_unban( ++ env.cmd_runner( ++ dict(CIB_file=rsc_moved_constraint_cleared_cib_file.name) ++ ), ++ resource_id, ++ node, ++ master, ++ ) ++ if retval != 0: ++ raise LibraryError( ++ ReportItem.error( ++ reports.messages.ResourceUnmoveUnbanPcmkError( ++ resource_id, stdout, stderr ++ ) ++ ) ++ ) ++ rsc_moved_constraint_cleared_cib_file.seek(0) ++ constraint_removed_cib = rsc_moved_constraint_cleared_cib_file.read() + remove_constraint_cib_diff = diff_cibs_xml( +- env.cmd_runner(), env.report_processor, rsc_moved_cib_xml, cib_xml ++ env.cmd_runner(), ++ env.report_processor, ++ rsc_moved_cib_xml, ++ constraint_removed_cib, + ) + + if not (add_constraint_cib_diff and remove_constraint_cib_diff): +@@ -1689,13 +1730,15 @@ def move_autoclean( + ) + ) + ) +- _ensure_resource_is_not_moved( ++ _ensure_resource_moved_and_not_moved_back( + env.cmd_runner, + env.report_processor, + etree_to_str(after_move_simulated_cib), + remove_constraint_cib_diff, + resource_id, + strict, ++ resource_state_before, ++ node, + ) + push_cib_diff_xml(env.cmd_runner(), add_constraint_cib_diff) + env.report_processor.report( +@@ -1704,13 +1747,15 @@ def move_autoclean( + ) + ) + env.wait_for_idle(wait_timeout) +- _ensure_resource_is_not_moved( ++ _ensure_resource_moved_and_not_moved_back( + env.cmd_runner, + env.report_processor, + get_cib_xml(env.cmd_runner()), + remove_constraint_cib_diff, + resource_id, + strict, ++ resource_state_before, ++ node, + ) + push_cib_diff_xml(env.cmd_runner(), remove_constraint_cib_diff) + env.report_processor.report( +@@ -1730,16 +1775,35 @@ def move_autoclean( + raise LibraryError() + + +-def _ensure_resource_is_not_moved( ++def _ensure_resource_moved_and_not_moved_back( + runner_factory: Callable[[Optional[Mapping[str, str]]], CommandRunner], + report_processor: reports.ReportProcessor, + cib_xml: str, + remove_constraint_cib_diff: str, + resource_id: str, + strict: bool, ++ resource_state_before: Dict[str, List[str]], ++ node: Optional[str], + ) -> None: + # pylint: disable=too-many-locals + with get_tmp_cib(report_processor, cib_xml) as rsc_unmove_cib_file: ++ if not _was_resource_moved( ++ node, ++ resource_state_before, ++ get_resource_state( ++ get_cluster_status_dom( ++ runner_factory(dict(CIB_file=rsc_unmove_cib_file.name)) ++ ), ++ resource_id, ++ ), ++ ): ++ raise LibraryError( ++ reports.ReportItem.error( ++ reports.messages.ResourceMoveNotAffectingResource( ++ resource_id ++ ) ++ ) ++ ) + push_cib_diff_xml( + runner_factory(dict(CIB_file=rsc_unmove_cib_file.name)), + remove_constraint_cib_diff, +@@ -1809,20 +1873,31 @@ def _resource_running_on_nodes( + return frozenset() + + ++def _was_resource_moved( ++ node: Optional[str], ++ resource_state_before: Dict[str, List[str]], ++ resource_state_after: Dict[str, List[str]], ++) -> bool: ++ running_on_nodes = _resource_running_on_nodes(resource_state_after) ++ return not bool( ++ resource_state_before ++ and ( # running resource moved ++ not running_on_nodes ++ or (node and node not in running_on_nodes) ++ or (resource_state_before == resource_state_after) ++ ) ++ ) ++ ++ + def _move_wait_report( + resource_id: str, + node: Optional[str], + resource_state_before: Dict[str, List[str]], + resource_state_after: Dict[str, List[str]], + ) -> ReportItem: +- allowed_nodes = frozenset([node] if node else []) +- running_on_nodes = _resource_running_on_nodes(resource_state_after) +- + severity = reports.item.ReportItemSeverity.info() +- if resource_state_before and ( # running resource moved +- not running_on_nodes +- or (allowed_nodes and allowed_nodes.isdisjoint(running_on_nodes)) +- or (resource_state_before == resource_state_after) ++ if not _was_resource_moved( ++ node, resource_state_before, resource_state_after + ): + severity = reports.item.ReportItemSeverity.error() + if not resource_state_after: +@@ -1873,14 +1948,18 @@ class _MoveBanTemplate: + lifetime=None, + wait: WaitType = False, + ): ++ # pylint: disable=too-many-locals + # validate + wait_timeout = env.ensure_wait_satisfiable(wait) # raises on error + ++ cib = env.get_cib() + resource_el, report_list = resource.common.find_one_resource( +- get_resources(env.get_cib()), resource_id ++ get_resources(cib), resource_id + ) + if resource_el is not None: + report_list.extend(self._validate(resource_el, master)) ++ if node: ++ report_list.extend(_nodes_exist_reports(cib, [node])) + if env.report_processor.report_list(report_list).has_errors: + raise LibraryError() + +diff --git a/pcs/lib/node.py b/pcs/lib/node.py +index ff08f747..3a7f236e 100644 +--- a/pcs/lib/node.py ++++ b/pcs/lib/node.py +@@ -3,6 +3,7 @@ from typing import ( + List, + Optional, + Tuple, ++ Set, + ) + + from lxml.etree import _Element +@@ -11,7 +12,7 @@ from pcs.common import reports + from pcs.common.reports import ReportItemList + from pcs.common.reports import ReportItemSeverity + from pcs.common.reports.item import ReportItem +-from pcs.lib.cib.node import PacemakerNode ++from pcs.lib.cib.node import PacemakerNode, get_node_names + from pcs.lib.cib.resource import remote_node, guest_node + from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade + from pcs.lib.corosync.node import CorosyncNode +@@ -28,6 +29,10 @@ def get_existing_nodes_names( + ) + + ++def get_pacemaker_node_names(cib: _Element) -> Set[str]: ++ return get_node_names(cib) | set(get_existing_nodes_names(None, cib)[0]) ++ ++ + def get_existing_nodes_names_addrs( + corosync_conf=None, cib=None, error_on_missing_name=False + ): +diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py +index c85aaa9c..4a7b4945 100644 +--- a/pcs_test/tier0/common/reports/test_messages.py ++++ b/pcs_test/tier0/common/reports/test_messages.py +@@ -4515,6 +4515,18 @@ class ResourceMoveConstraintRemoved(NameBuildTest): + ) + + ++class ResourceMoveNotAffectingResource(NameBuildTest): ++ def test_success(self): ++ self.assert_message_from_report( ++ ( ++ "Unable to move resource 'R1' using a location constraint. " ++ "Current location of the resource may be affected by some " ++ "other constraint." ++ ), ++ reports.ResourceMoveNotAffectingResource("R1"), ++ ) ++ ++ + class ResourceMoveAffectsOtherResources(NameBuildTest): + def test_multiple(self): + self.assert_message_from_report( +diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py b/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py +index 32d758de..1bd4ee82 100644 +--- a/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py ++++ b/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py +@@ -20,6 +20,25 @@ from pcs_test.tools.command_env import get_env_tools + from pcs_test.tools.misc import get_test_resource as rc + + ++def _node_fixture(name, node_id): ++ return f'' ++ ++ ++def _node_list_fixture(nodes): ++ return "\n".join( ++ _node_fixture(node_name, node_id) ++ for node_id, node_name in enumerate(nodes) ++ ) ++ ++ ++def _nodes_section_fixture(content): ++ return f""" ++ ++ {content} ++ ++ """ ++ ++ + def _rsc_primitive_fixture(res_id): + return f'' + +@@ -145,11 +164,17 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + resources=_resources_tag( + _resource_primitive + _resource_promotable_clone + ), ++ nodes=_nodes_section_fixture( ++ _node_list_fixture([self.orig_node, self.new_node]) ++ ), + ) + self.orig_cib = etree_to_str( + xml_fromstring(self.config.calls.get(config_load_cib_name).stdout) + ) + self.cib_with_constraint = '' ++ self.cib_without_constraint = ( ++ '' ++ ) + self.cib_simulate_constraint = ( + '' + ) +@@ -160,6 +185,9 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + self.cib_diff_add_constraint_updated_tmp_file_name = ( + "cib_diff_add_constraint_updated" + ) ++ self.cib_constraint_removed_by_unmove_file_name = ( ++ "cib_constraint_removed_by_unmove" ++ ) + self.cib_diff_remove_constraint_orig_tmp_file_name = ( + "cib_diff_remove_constraint_orig" + ) +@@ -220,13 +248,18 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + self.cib_diff_add_constraint_updated_tmp_file_name, + orig_content=self.cib_with_constraint, + ), ++ TmpFileCall( ++ self.cib_constraint_removed_by_unmove_file_name, ++ orig_content=self.cib_with_constraint, ++ new_content=self.cib_without_constraint, ++ ), + TmpFileCall( + self.cib_diff_remove_constraint_orig_tmp_file_name, + orig_content=self.cib_with_constraint, + ), + TmpFileCall( + self.cib_diff_remove_constraint_updated_tmp_file_name, +- orig_content=self.orig_cib, ++ orig_content=self.cib_without_constraint, + ), + TmpFileCall( + self.simulated_cib_add_constraint_tmp_file_name, +@@ -296,6 +329,12 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + stdout=self.cib_diff_add_constraint, + name="runner.cib.diff.add_constraint", + ) ++ self.config.runner.pcmk.resource_clear( ++ resource=resource_id, ++ master=is_promotable, ++ node=self.new_node if with_node else None, ++ env=dict(CIB_file=self.cib_constraint_removed_by_unmove_file_name), ++ ) + self.config.runner.cib.diff( + self.cib_diff_remove_constraint_orig_tmp_file_name, + self.cib_diff_remove_constraint_updated_tmp_file_name, +@@ -308,6 +347,13 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + cib_xml=self.cib_with_constraint, + name="pcmk.simulate.rsc.move", + ) ++ self.config.runner.pcmk.load_state( ++ resources=status_after, ++ name="runner.pcmk.load_state.mid_simulation", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name ++ ), ++ ) + self.config.runner.cib.push_diff( + cib_diff=self.cib_diff_remove_constraint, + name="pcmk.push_cib_diff.simulation.remove_constraint", +@@ -335,6 +381,13 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + self.cib_with_constraint, + name="load_cib_after_move", + ) ++ self.config.runner.pcmk.load_state( ++ resources=status_after, ++ name="runner.pcmk.load_state.after_push", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name ++ ), ++ ) + self.config.runner.cib.push_diff( + cib_diff=self.cib_diff_remove_constraint, + name="pcmk.push_cib_diff.simulation.remove_constraint_after_move", +@@ -380,6 +433,11 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + file_path=self.cib_diff_add_constraint_updated_tmp_file_name, + content=self.cib_with_constraint, + ), ++ fixture.debug( ++ reports.codes.TMP_FILE_WRITE, ++ file_path=self.cib_constraint_removed_by_unmove_file_name, ++ content=self.cib_with_constraint, ++ ), + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=self.cib_diff_remove_constraint_orig_tmp_file_name, +@@ -388,7 +446,7 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup): + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=self.cib_diff_remove_constraint_updated_tmp_file_name, +- content=self.orig_cib, ++ content=self.cib_without_constraint, + ), + fixture.debug( + reports.codes.TMP_FILE_WRITE, +@@ -758,9 +816,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + resources=_state_resource_fixture(resource_id, "Stopped"), + ) + self.env_assist.assert_raise_library_error( +- lambda: move_autoclean( +- self.env_assist.get_env(), resource_id, node="node" +- ), ++ lambda: move_autoclean(self.env_assist.get_env(), resource_id), + [ + fixture.error( + reports.codes.CANNOT_MOVE_RESOURCE_NOT_RUNNING, +@@ -770,11 +826,33 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + expected_in_processor=False, + ) + ++ def test_node_not_found(self): ++ resource_id = "A" ++ node = "non_existing_node" ++ self.config.runner.cib.load( ++ resources=_resources_tag(_rsc_primitive_fixture(resource_id)), ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: move_autoclean( ++ self.env_assist.get_env(), resource_id, node ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.NODE_NOT_FOUND, ++ node=node, ++ searched_types=[], ++ ) ++ ], ++ ) ++ + def test_constraint_already_exist(self): + resource_id = "A" + config_load_cib_name = "load_cib" + node = "node1" + cib_with_constraint = '' ++ cib_without_constraint = '' + cib_rsc_move_tmp_file_name = "cib_rsc_move_tmp_file" + cib_diff_add_constraint_orig_tmp_file_name = ( + "cib_diff_add_constraint_orig" +@@ -788,6 +866,9 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + cib_diff_remove_constraint_updated_tmp_file_name = ( + "cib_diff_remove_constraint_updated" + ) ++ cib_constraint_removed_by_unmove_file_name = ( ++ "cib_constraint_removed_by_unmove" ++ ) + self.config.runner.cib.load( + resources=_resources_tag(_rsc_primitive_fixture(resource_id)), + constraints=f""" +@@ -795,6 +876,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + + + """, ++ nodes=_nodes_section_fixture(_node_list_fixture([node])), + name=config_load_cib_name, + ) + orig_cib = etree_to_str( +@@ -815,13 +897,18 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + cib_diff_add_constraint_updated_tmp_file_name, + orig_content=cib_with_constraint, + ), ++ TmpFileCall( ++ cib_constraint_removed_by_unmove_file_name, ++ orig_content=cib_with_constraint, ++ new_content=cib_without_constraint, ++ ), + TmpFileCall( + cib_diff_remove_constraint_orig_tmp_file_name, + orig_content=cib_with_constraint, + ), + TmpFileCall( + cib_diff_remove_constraint_updated_tmp_file_name, +- orig_content=orig_cib, ++ orig_content=cib_without_constraint, + ), + ] + ) +@@ -839,6 +926,11 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + stdout="", + name="runner.cib.diff.add_constraint", + ) ++ self.config.runner.pcmk.resource_clear( ++ resource=resource_id, ++ node=node, ++ env=dict(CIB_file=cib_constraint_removed_by_unmove_file_name), ++ ) + self.config.runner.cib.diff( + cib_diff_remove_constraint_orig_tmp_file_name, + cib_diff_remove_constraint_updated_tmp_file_name, +@@ -863,6 +955,11 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + file_path=cib_diff_add_constraint_updated_tmp_file_name, + content=cib_with_constraint, + ), ++ fixture.debug( ++ reports.codes.TMP_FILE_WRITE, ++ file_path=cib_constraint_removed_by_unmove_file_name, ++ content=cib_with_constraint, ++ ), + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=cib_diff_remove_constraint_orig_tmp_file_name, +@@ -871,7 +968,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup): + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=cib_diff_remove_constraint_updated_tmp_file_name, +- content=orig_cib, ++ content=cib_without_constraint, + ), + fixture.info( + reports.codes.NO_ACTION_NECESSARY, +@@ -896,6 +993,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + self.cib_diff_add_constraint = "diff_add_constraint" + self.cib_diff_remove_constraint = "diff_remove_constraint" + self.cib_with_constraint = '' ++ self.cib_without_constraint = ( ++ '' ++ ) + self.cib_rsc_move_tmp_file_name = "cib_rsc_move_tmp_file" + self.cib_diff_add_constraint_orig_tmp_file_name = ( + "cib_diff_add_constraint_orig" +@@ -903,6 +1003,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + self.cib_diff_add_constraint_updated_tmp_file_name = ( + "cib_diff_add_constraint_updated" + ) ++ self.cib_constraint_removed_by_unmove_file_name = ( ++ "cib_constraint_removed_by_unmove" ++ ) + self.cib_diff_remove_constraint_orig_tmp_file_name = ( + "cib_diff_remove_constraint_orig" + ) +@@ -951,6 +1054,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + + self.config.runner.cib.load( + resources=_resources_tag(_rsc_primitive_fixture(self.resource_id)), ++ nodes=_nodes_section_fixture( ++ _node_list_fixture(["node1", "node2"]) ++ ), + name=self.config_load_cib_name, + ) + self.orig_cib = etree_to_str( +@@ -979,13 +1085,18 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + self.cib_diff_add_constraint_updated_tmp_file_name, + orig_content=self.cib_with_constraint, + ), ++ TmpFileCall( ++ self.cib_constraint_removed_by_unmove_file_name, ++ orig_content=self.cib_with_constraint, ++ new_content=self.cib_without_constraint, ++ ), + TmpFileCall( + self.cib_diff_remove_constraint_orig_tmp_file_name, + orig_content=self.cib_with_constraint, + ), + TmpFileCall( + self.cib_diff_remove_constraint_updated_tmp_file_name, +- orig_content=self.orig_cib, ++ orig_content=self.cib_without_constraint, + ), + TmpFileCall( + self.simulated_cib_add_constraint_tmp_file_name, +@@ -1067,6 +1178,11 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + stdout=self.cib_diff_add_constraint, + name="runner.cib.diff.add_constraint", + ) ++ self.config.runner.pcmk.resource_clear( ++ resource=self.resource_id, ++ node=node, ++ env=dict(CIB_file=self.cib_constraint_removed_by_unmove_file_name), ++ ) + self.config.runner.cib.diff( + self.cib_diff_remove_constraint_orig_tmp_file_name, + self.cib_diff_remove_constraint_updated_tmp_file_name, +@@ -1081,6 +1197,15 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ) + if stage <= 1: + return ++ self.config.runner.pcmk.load_state( ++ resources=_state_resource_fixture( ++ self.resource_id, "Started", node if node else "node2" ++ ), ++ name="runner.pcmk.load_state.mid_simulation", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name ++ ), ++ ) + self.config.runner.cib.push_diff( + cib_diff=self.cib_diff_remove_constraint, + name="pcmk.push_cib_diff.simulation.remove_constraint", +@@ -1110,6 +1235,17 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + self.cib_with_constraint, + name="load_cib_after_move", + ) ++ if stage <= 3: ++ return ++ self.config.runner.pcmk.load_state( ++ resources=_state_resource_fixture( ++ self.resource_id, "Started", node if node else "node2" ++ ), ++ name="runner.pcmk.load_state.after_push", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name ++ ), ++ ) + self.config.runner.cib.push_diff( + cib_diff=self.cib_diff_remove_constraint, + name="pcmk.push_cib_diff.simulation.remove_constraint_after_move", +@@ -1126,7 +1262,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ), + name="pcmk.simulate.rsc.unmove.after_push", + ) +- if stage <= 3: ++ if stage <= 4: + return + self.config.runner.cib.push_diff( + cib_diff=self.cib_diff_remove_constraint, +@@ -1153,6 +1289,11 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + file_path=self.cib_diff_add_constraint_updated_tmp_file_name, + content=self.cib_with_constraint, + ), ++ fixture.debug( ++ reports.codes.TMP_FILE_WRITE, ++ file_path=self.cib_constraint_removed_by_unmove_file_name, ++ content=self.cib_with_constraint, ++ ), + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=self.cib_diff_remove_constraint_orig_tmp_file_name, +@@ -1161,7 +1302,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + fixture.debug( + reports.codes.TMP_FILE_WRITE, + file_path=self.cib_diff_remove_constraint_updated_tmp_file_name, +- content=self.orig_cib, ++ content=self.cib_without_constraint, + ), + fixture.debug( + reports.codes.TMP_FILE_WRITE, +@@ -1199,7 +1340,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + reports.codes.WAIT_FOR_IDLE_STARTED, + timeout=0, + ), +- ][: {None: None, 3: -2, 2: 7, 1: 5}[stage]] ++ ][: {None: None, 4: -2, 3: 10, 2: 8, 1: 6}[stage]] + + def test_move_affects_other_resources_strict(self): + self.tmp_file_mock_obj.set_calls( +@@ -1304,7 +1445,8 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ), + ) + ) +- self.set_up_testing_env(stage=3) ++ setup_stage = 4 ++ self.set_up_testing_env(stage=setup_stage) + self.env_assist.assert_raise_library_error( + lambda: move_autoclean(self.env_assist.get_env(), self.resource_id), + [ +@@ -1316,7 +1458,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ], + expected_in_processor=False, + ) +- self.env_assist.assert_reports(self.get_reports(stage=3)) ++ self.env_assist.assert_reports(self.get_reports(stage=setup_stage)) + + def test_unmove_after_push_affects_other_resources_strict(self): + self.tmp_file_mock_obj.set_calls( +@@ -1330,7 +1472,8 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ), + ) + ) +- self.set_up_testing_env(stage=3) ++ setup_stage = 4 ++ self.set_up_testing_env(stage=setup_stage) + self.env_assist.assert_raise_library_error( + lambda: move_autoclean( + self.env_assist.get_env(), +@@ -1346,7 +1489,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ], + expected_in_processor=False, + ) +- self.env_assist.assert_reports(self.get_reports(stage=3)) ++ self.env_assist.assert_reports(self.get_reports(stage=setup_stage)) + + def test_resource_not_runnig_after_move(self): + self.tmp_file_mock_obj.set_calls( +@@ -1381,8 +1524,113 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup): + ] + ) + ++ def test_simulation_resource_not_moved(self): ++ node = "node2" ++ different_node = f"different-{node}" ++ setup_stage = 1 ++ self.tmp_file_mock_obj.set_calls( ++ self.get_tmp_files_mocks( ++ _simulation_transition_fixture( ++ _simulation_synapses_fixture(self.resource_id) ++ ), ++ ) ++ + [ ++ TmpFileCall( ++ self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name, ++ orig_content=self.cib_simulate_constraint, ++ ), ++ ] ++ ) ++ self.set_up_testing_env(node=node, stage=setup_stage) ++ self.config.runner.pcmk.load_state( ++ resources=_state_resource_fixture( ++ self.resource_id, "Started", different_node ++ ), ++ name="runner.pcmk.load_state.final", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name ++ ), ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: move_autoclean( ++ self.env_assist.get_env(), ++ self.resource_id, ++ node=node, ++ ), ++ [ ++ fixture.error( ++ reports.codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE, ++ resource_id=self.resource_id, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ self.env_assist.assert_reports( ++ self.get_reports(stage=setup_stage) ++ + [ ++ fixture.debug( ++ reports.codes.TMP_FILE_WRITE, ++ file_path=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name, ++ content=self.cib_simulate_constraint, ++ ), ++ ] ++ ) ++ ++ def test_after_push_resource_not_moved(self): ++ node = "node2" ++ different_node = f"different-{node}" ++ setup_stage = 3 ++ self.tmp_file_mock_obj.set_calls( ++ self.get_tmp_files_mocks( ++ _simulation_transition_fixture( ++ _simulation_synapses_fixture(self.resource_id) ++ ), ++ _simulation_transition_fixture(), ++ ) ++ + [ ++ TmpFileCall( ++ self.cib_apply_diff_remove_constraint_after_push_tmp_file_name, ++ orig_content=self.cib_with_constraint, ++ ), ++ ] ++ ) ++ self.set_up_testing_env(node=node, stage=setup_stage) ++ self.config.runner.pcmk.load_state( ++ resources=_state_resource_fixture( ++ self.resource_id, "Started", different_node ++ ), ++ name="runner.pcmk.load_state.final", ++ env=dict( ++ CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name, ++ ), ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: move_autoclean( ++ self.env_assist.get_env(), ++ self.resource_id, ++ node=node, ++ ), ++ [ ++ fixture.error( ++ reports.codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE, ++ resource_id=self.resource_id, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ self.env_assist.assert_reports( ++ self.get_reports(stage=setup_stage) ++ + [ ++ fixture.debug( ++ reports.codes.TMP_FILE_WRITE, ++ file_path=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name, ++ content=self.cib_with_constraint, ++ ), ++ ] ++ ) ++ + def test_resource_running_on_a_different_node(self): +- node = "node1" ++ node = "node2" + different_node = f"different-{node}" + self.tmp_file_mock_obj.set_calls( + self.get_tmp_files_mocks( +diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py b/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py +index 5d57fa06..28dd1cd1 100644 +--- a/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py ++++ b/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py +@@ -10,6 +10,29 @@ from pcs.common.reports import ReportItemSeverity as severities + from pcs.common.reports import codes as report_codes + from pcs.lib.commands import resource + ++ ++def _node_fixture(name, node_id): ++ return f'' ++ ++ ++def _node_list_fixture(nodes): ++ return "\n".join( ++ _node_fixture(node_name, node_id) ++ for node_id, node_name in enumerate(nodes) ++ ) ++ ++ ++def _nodes_section_fixture(content): ++ return f""" ++ ++ {content} ++ ++ """ ++ ++ ++nodes_section = _nodes_section_fixture( ++ _node_list_fixture(["node", "node1", "node2"]) ++) + resources_primitive = """ + + +@@ -128,8 +151,24 @@ class MoveBanBaseMixin(MoveBanClearBaseMixin): + expected_in_processor=False, + ) + ++ def test_node_not_found(self): ++ self.config.runner.cib.load(resources=resources_primitive) ++ node = "node" ++ self.env_assist.assert_raise_library_error( ++ lambda: self.lib_action(self.env_assist.get_env(), "A", node) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ report_codes.NODE_NOT_FOUND, node=node, searched_types=[] ++ ) ++ ] ++ ) ++ + def test_all_options(self): +- self.config.runner.cib.load(resources=resources_promotable) ++ self.config.runner.cib.load( ++ resources=resources_promotable, nodes=nodes_section ++ ) + self.config_pcmk_action( + resource="A-clone", + master=True, +@@ -274,7 +313,9 @@ class MoveBanWaitMixin: + def setUp(self): + self.timeout = 10 + self.env_assist, self.config = get_env_tools(self) +- self.config.runner.cib.load(resources=resources_primitive) ++ self.config.runner.cib.load( ++ resources=resources_primitive, nodes=nodes_section ++ ) + + @mock.patch.object( + settings, +diff --git a/pcs_test/tools/command_env/config_runner_pcmk.py b/pcs_test/tools/command_env/config_runner_pcmk.py +index e276e03b..213941b8 100644 +--- a/pcs_test/tools/command_env/config_runner_pcmk.py ++++ b/pcs_test/tools/command_env/config_runner_pcmk.py +@@ -706,6 +706,7 @@ class PcmkShortcuts: + stdout="", + stderr="", + returncode=0, ++ env=None, + ): + """ + Create a call for crm_resource --clear +@@ -722,6 +723,7 @@ class PcmkShortcuts: + string stdout -- crm_resource's stdout + string stderr -- crm_resource's stderr + int returncode -- crm_resource's returncode ++ dict env -- CommandRunner environment variables + """ + # arguments are used via locals() + # pylint: disable=unused-argument +diff --git a/pcs_test/tools/command_env/mock_runner.py b/pcs_test/tools/command_env/mock_runner.py +index f7871fc2..8520ce02 100644 +--- a/pcs_test/tools/command_env/mock_runner.py ++++ b/pcs_test/tools/command_env/mock_runner.py +@@ -143,6 +143,6 @@ class Runner: + env.update(env_extend) + if env != call.env: + raise self.__call_queue.error_with_context( +- f"ENV doesn't match. Expected: {call.env}; Real: {env}" ++ f"Command #{i}: ENV doesn't match. Expected: {call.env}; Real: {env}" + ) + return call.stdout, call.stderr, call.returncode +diff --git a/pcs_test/tools/fixture_cib.py b/pcs_test/tools/fixture_cib.py +index 602491c8..bf02bacc 100644 +--- a/pcs_test/tools/fixture_cib.py ++++ b/pcs_test/tools/fixture_cib.py +@@ -310,6 +310,7 @@ MODIFIER_GENERATORS = { + "replace": replace_all, + "append": append_all, + "resources": lambda xml: replace_all({"./configuration/resources": xml}), ++ "nodes": lambda xml: replace_all({"./configuration/nodes": xml}), + "constraints": lambda xml: replace_all( + {"./configuration/constraints": xml} + ), +-- +2.31.1 + diff --git a/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch b/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch deleted file mode 100644 index 60b7502..0000000 --- a/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch +++ /dev/null @@ -1,787 +0,0 @@ -From cf68ded959ad03244c94de308b79fc1af806a474 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Wed, 15 Sep 2021 07:55:50 +0200 -Subject: [PATCH 1/2] fix unfencing in `pcs stonith update-scsi-devices` - -* do not unfence newly added devices on fenced cluster nodes ---- - pcs/common/reports/codes.py | 6 ++ - pcs/common/reports/messages.py | 41 +++++++ - pcs/lib/commands/scsi.py | 55 +++++++++- - pcs/lib/commands/stonith.py | 26 +++-- - pcs/lib/communication/scsi.py | 40 ++++--- - .../tier0/common/reports/test_messages.py | 24 +++++ - pcs_test/tier0/lib/commands/test_scsi.py | 101 ++++++++++++++++-- - .../test_stonith_update_scsi_devices.py | 87 ++++++++++++--- - .../tools/command_env/config_http_scsi.py | 16 ++- - .../tools/command_env/config_runner_scsi.py | 36 ++++++- - pcsd/api_v1.rb | 2 +- - pcsd/capabilities.xml | 8 +- - 12 files changed, 387 insertions(+), 55 deletions(-) - -diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py -index bbd61500..4bee0bac 100644 ---- a/pcs/common/reports/codes.py -+++ b/pcs/common/reports/codes.py -@@ -468,6 +468,12 @@ STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT = M( - "STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT" - ) - STONITH_UNFENCING_FAILED = M("STONITH_UNFENCING_FAILED") -+STONITH_UNFENCING_DEVICE_STATUS_FAILED = M( -+ "STONITH_UNFENCING_DEVICE_STATUS_FAILED" -+) -+STONITH_UNFENCING_SKIPPED_DEVICES_FENCED = M( -+ "STONITH_UNFENCING_SKIPPED_DEVICES_FENCED" -+) - STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM = M( - "STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM" - ) -diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py -index f9688437..be8dd154 100644 ---- a/pcs/common/reports/messages.py -+++ b/pcs/common/reports/messages.py -@@ -2782,6 +2782,47 @@ class StonithUnfencingFailed(ReportItemMessage): - return f"Unfencing failed:\n{self.reason}" - - -+@dataclass(frozen=True) -+class StonithUnfencingDeviceStatusFailed(ReportItemMessage): -+ """ -+ Unfencing failed on a cluster node. -+ """ -+ -+ device: str -+ reason: str -+ -+ _code = codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED -+ -+ @property -+ def message(self) -> str: -+ return ( -+ "Unfencing failed, unable to check status of device " -+ f"'{self.device}': {self.reason}" -+ ) -+ -+ -+@dataclass(frozen=True) -+class StonithUnfencingSkippedDevicesFenced(ReportItemMessage): -+ """ -+ Unfencing skipped on a cluster node, because fenced devices were found on -+ the node. -+ """ -+ -+ devices: List[str] -+ -+ _code = codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED -+ -+ @property -+ def message(self) -> str: -+ return ( -+ "Unfencing skipped, {device_pl} {devices} {is_pl} fenced" -+ ).format( -+ device_pl=format_plural(self.devices, "device"), -+ devices=format_list(self.devices), -+ is_pl=format_plural(self.devices, "is", "are"), -+ ) -+ -+ - @dataclass(frozen=True) - class StonithRestartlessUpdateUnableToPerform(ReportItemMessage): - """ -diff --git a/pcs/lib/commands/scsi.py b/pcs/lib/commands/scsi.py -index 31a3ef2d..ff20a563 100644 ---- a/pcs/lib/commands/scsi.py -+++ b/pcs/lib/commands/scsi.py -@@ -8,20 +8,65 @@ from pcs.lib.env import LibraryEnvironment - from pcs.lib.errors import LibraryError - - --def unfence_node(env: LibraryEnvironment, node: str, devices: Iterable[str]): -+def unfence_node( -+ env: LibraryEnvironment, -+ node: str, -+ original_devices: Iterable[str], -+ updated_devices: Iterable[str], -+) -> None: - """ -- Unfence scsi devices on a node by calling fence_scsi agent script. -+ Unfence scsi devices on a node by calling fence_scsi agent script. Only -+ newly added devices will be unfenced (set(updated_devices) - -+ set(original_devices)). Before unfencing, original devices are be checked -+ if any of them are not fenced. If there is a fenced device, unfencing will -+ be skipped. - - env -- provides communication with externals - node -- node name on wich is unfencing performed -- devices -- scsi devices to be unfenced -+ original_devices -- list of devices defined before update -+ updated_devices -- list of devices defined after update - """ -+ devices_to_unfence = set(updated_devices) - set(original_devices) -+ if not devices_to_unfence: -+ return -+ fence_scsi_bin = os.path.join(settings.fence_agent_binaries, "fence_scsi") -+ fenced_devices = [] -+ for device in original_devices: -+ stdout, stderr, return_code = env.cmd_runner().run( -+ [ -+ fence_scsi_bin, -+ "--action=status", -+ f"--devices={device}", -+ f"--plug={node}", -+ ] -+ ) -+ if return_code == 2: -+ fenced_devices.append(device) -+ elif return_code != 0: -+ raise LibraryError( -+ reports.ReportItem.error( -+ reports.messages.StonithUnfencingDeviceStatusFailed( -+ device, join_multilines([stderr, stdout]) -+ ) -+ ) -+ ) -+ if fenced_devices: -+ # At least one of existing devices is off, which means the node has -+ # been fenced and new devices should not be unfenced. -+ env.report_processor.report( -+ reports.ReportItem.info( -+ reports.messages.StonithUnfencingSkippedDevicesFenced( -+ fenced_devices -+ ) -+ ) -+ ) -+ return - stdout, stderr, return_code = env.cmd_runner().run( - [ -- os.path.join(settings.fence_agent_binaries, "fence_scsi"), -+ fence_scsi_bin, - "--action=on", - "--devices", -- ",".join(sorted(devices)), -+ ",".join(sorted(devices_to_unfence)), - f"--plug={node}", - ], - ) -diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py -index 6f26e7d3..0dcf44f2 100644 ---- a/pcs/lib/commands/stonith.py -+++ b/pcs/lib/commands/stonith.py -@@ -453,7 +453,8 @@ def _update_scsi_devices_get_element_and_devices( - - def _unfencing_scsi_devices( - env: LibraryEnvironment, -- device_list: Iterable[str], -+ original_devices: Iterable[str], -+ updated_devices: Iterable[str], - force_flags: Container[reports.types.ForceCode] = (), - ) -> None: - """ -@@ -461,9 +462,13 @@ def _unfencing_scsi_devices( - to pcsd and corosync is running. - - env -- provides all for communication with externals -- device_list -- devices to be unfenced -+ original_devices -- devices before update -+ updated_devices -- devices after update - force_flags -- list of flags codes - """ -+ devices_to_unfence = set(updated_devices) - set(original_devices) -+ if not devices_to_unfence: -+ return - cluster_nodes_names, nodes_report_list = get_existing_nodes_names( - env.get_corosync_conf(), - error_on_missing_name=True, -@@ -487,7 +492,11 @@ def _unfencing_scsi_devices( - online_corosync_target_list = run_and_raise( - env.get_node_communicator(), com_cmd - ) -- com_cmd = Unfence(env.report_processor, sorted(device_list)) -+ com_cmd = Unfence( -+ env.report_processor, -+ original_devices=sorted(original_devices), -+ updated_devices=sorted(updated_devices), -+ ) - com_cmd.set_targets(online_corosync_target_list) - run_and_raise(env.get_node_communicator(), com_cmd) - -@@ -531,9 +540,9 @@ def update_scsi_devices( - IdProvider(stonith_el), - set_device_list, - ) -- devices_for_unfencing = set(set_device_list).difference(current_device_list) -- if devices_for_unfencing: -- _unfencing_scsi_devices(env, devices_for_unfencing, force_flags) -+ _unfencing_scsi_devices( -+ env, current_device_list, set_device_list, force_flags -+ ) - env.push_cib() - - -@@ -585,6 +594,7 @@ def update_scsi_devices_add_remove( - IdProvider(stonith_el), - updated_device_set, - ) -- if add_device_list: -- _unfencing_scsi_devices(env, add_device_list, force_flags) -+ _unfencing_scsi_devices( -+ env, current_device_list, updated_device_set, force_flags -+ ) - env.push_cib() -diff --git a/pcs/lib/communication/scsi.py b/pcs/lib/communication/scsi.py -index 7b272017..250d67aa 100644 ---- a/pcs/lib/communication/scsi.py -+++ b/pcs/lib/communication/scsi.py -@@ -1,4 +1,5 @@ - import json -+from typing import Iterable - - from dacite import DaciteError - -@@ -26,9 +27,15 @@ class Unfence( - MarkSuccessfulMixin, - RunRemotelyBase, - ): -- def __init__(self, report_processor, devices): -+ def __init__( -+ self, -+ report_processor: reports.ReportProcessor, -+ original_devices: Iterable[str], -+ updated_devices: Iterable[str], -+ ) -> None: - super().__init__(report_processor) -- self._devices = devices -+ self._original_devices = original_devices -+ self._updated_devices = updated_devices - - def _get_request_data(self): - return None -@@ -38,9 +45,13 @@ class Unfence( - Request( - target, - RequestData( -- "api/v1/scsi-unfence-node/v1", -+ "api/v1/scsi-unfence-node/v2", - data=json.dumps( -- {"devices": self._devices, "node": target.label} -+ dict( -+ node=target.label, -+ original_devices=self._original_devices, -+ updated_devices=self._updated_devices, -+ ) - ), - ), - ) -@@ -48,7 +59,9 @@ class Unfence( - ] - - def _process_response(self, response): -- report_item = response_to_report_item(response) -+ report_item = response_to_report_item( -+ response, report_pcsd_too_old_on_404=True -+ ) - if report_item: - self._report(report_item) - return -@@ -57,15 +70,14 @@ class Unfence( - result = from_dict( - InternalCommunicationResultDto, json.loads(response.data) - ) -- if result.status != const.COM_STATUS_SUCCESS: -- context = reports.ReportItemContext(node_label) -- self._report_list( -- [ -- reports.report_dto_to_item(report, context) -- for report in result.report_list -- ] -- ) -- else: -+ context = reports.ReportItemContext(node_label) -+ self._report_list( -+ [ -+ reports.report_dto_to_item(report, context) -+ for report in result.report_list -+ ] -+ ) -+ if result.status == const.COM_STATUS_SUCCESS: - self._on_success() - - except (json.JSONDecodeError, DaciteError): -diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py -index b0826cfd..05c3f619 100644 ---- a/pcs_test/tier0/common/reports/test_messages.py -+++ b/pcs_test/tier0/common/reports/test_messages.py -@@ -1904,6 +1904,30 @@ class StonithUnfencingFailed(NameBuildTest): - ) - - -+class StonithUnfencingDeviceStatusFailed(NameBuildTest): -+ def test_build_message(self): -+ self.assert_message_from_report( -+ "Unfencing failed, unable to check status of device 'dev1': reason", -+ reports.StonithUnfencingDeviceStatusFailed("dev1", "reason"), -+ ) -+ -+ -+class StonithUnfencingSkippedDevicesFenced(NameBuildTest): -+ def test_one_device(self): -+ self.assert_message_from_report( -+ "Unfencing skipped, device 'dev1' is fenced", -+ reports.StonithUnfencingSkippedDevicesFenced(["dev1"]), -+ ) -+ -+ def test_multiple_devices(self): -+ self.assert_message_from_report( -+ "Unfencing skipped, devices 'dev1', 'dev2', 'dev3' are fenced", -+ reports.StonithUnfencingSkippedDevicesFenced( -+ ["dev2", "dev1", "dev3"] -+ ), -+ ) -+ -+ - class StonithRestartlessUpdateUnableToPerform(NameBuildTest): - def test_build_message(self): - self.assert_message_from_report( -diff --git a/pcs_test/tier0/lib/commands/test_scsi.py b/pcs_test/tier0/lib/commands/test_scsi.py -index de75743f..8ef9836a 100644 ---- a/pcs_test/tier0/lib/commands/test_scsi.py -+++ b/pcs_test/tier0/lib/commands/test_scsi.py -@@ -10,26 +10,113 @@ from pcs.lib.commands import scsi - class TestUnfenceNode(TestCase): - def setUp(self): - self.env_assist, self.config = get_env_tools(self) -+ self.old_devices = ["device1", "device3"] -+ self.new_devices = ["device3", "device0", "device2"] -+ self.added_devices = set(self.new_devices) - set(self.old_devices) -+ self.node = "node1" - -- def test_success(self): -- self.config.runner.scsi.unfence_node("node1", ["/dev/sda", "/dev/sdb"]) -+ def test_success_devices_to_unfence(self): -+ for old_dev in self.old_devices: -+ self.config.runner.scsi.get_status( -+ self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" -+ ) -+ self.config.runner.scsi.unfence_node(self.node, self.added_devices) - scsi.unfence_node( -- self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"] -+ self.env_assist.get_env(), -+ self.node, -+ self.old_devices, -+ self.new_devices, - ) - self.env_assist.assert_reports([]) - -- def test_failure(self): -+ def test_success_no_devices_to_unfence(self): -+ scsi.unfence_node( -+ self.env_assist.get_env(), -+ self.node, -+ {"device1", "device2", "device3"}, -+ {"device3"}, -+ ) -+ self.env_assist.assert_reports([]) -+ -+ def test_unfencing_failure(self): -+ err_msg = "stderr" -+ for old_dev in self.old_devices: -+ self.config.runner.scsi.get_status( -+ self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" -+ ) - self.config.runner.scsi.unfence_node( -- "node1", ["/dev/sda", "/dev/sdb"], stderr="stderr", return_code=1 -+ self.node, self.added_devices, stderr=err_msg, return_code=1 - ) - self.env_assist.assert_raise_library_error( - lambda: scsi.unfence_node( -- self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"] -+ self.env_assist.get_env(), -+ self.node, -+ self.old_devices, -+ self.new_devices, - ), - [ - fixture.error( -- report_codes.STONITH_UNFENCING_FAILED, reason="stderr" -+ report_codes.STONITH_UNFENCING_FAILED, reason=err_msg - ) - ], - expected_in_processor=False, - ) -+ -+ def test_device_status_failed(self): -+ err_msg = "stderr" -+ new_devices = ["device1", "device2", "device3", "device4"] -+ old_devices = new_devices[:-1] -+ ok_devices = new_devices[0:2] -+ err_device = new_devices[2] -+ for dev in ok_devices: -+ self.config.runner.scsi.get_status( -+ self.node, dev, name=f"runner.scsi.is_fenced.{dev}" -+ ) -+ self.config.runner.scsi.get_status( -+ self.node, -+ err_device, -+ name=f"runner.scsi.is_fenced.{err_device}", -+ stderr=err_msg, -+ return_code=1, -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: scsi.unfence_node( -+ self.env_assist.get_env(), -+ self.node, -+ old_devices, -+ new_devices, -+ ), -+ [ -+ fixture.error( -+ report_codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED, -+ device=err_device, -+ reason=err_msg, -+ ) -+ ], -+ expected_in_processor=False, -+ ) -+ -+ def test_unfencing_skipped_devices_are_fenced(self): -+ stdout_off = "Status: OFF" -+ for old_dev in self.old_devices: -+ self.config.runner.scsi.get_status( -+ self.node, -+ old_dev, -+ name=f"runner.scsi.is_fenced.{old_dev}", -+ stdout=stdout_off, -+ return_code=2, -+ ) -+ scsi.unfence_node( -+ self.env_assist.get_env(), -+ self.node, -+ self.old_devices, -+ self.new_devices, -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.info( -+ report_codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED, -+ devices=sorted(self.old_devices), -+ ) -+ ] -+ ) -diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -index 6ff6b99a..ed8f5d4f 100644 ---- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -@@ -1,3 +1,4 @@ -+# pylint: disable=too-many-lines - import json - from unittest import mock, TestCase - -@@ -297,7 +298,9 @@ class UpdateScsiDevicesMixin: - node_labels=self.existing_nodes - ) - self.config.http.scsi.unfence_node( -- unfence, node_labels=self.existing_nodes -+ original_devices=devices_before, -+ updated_devices=devices_updated, -+ node_labels=self.existing_nodes, - ) - self.config.env.push_cib( - resources=fixture_scsi( -@@ -449,14 +452,14 @@ class UpdateScsiDevicesFailuresMixin: - node_labels=self.existing_nodes - ) - self.config.http.scsi.unfence_node( -- DEVICES_2, - communication_list=[ - dict( - label=self.existing_nodes[0], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[0], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - was_connected=False, -@@ -466,8 +469,9 @@ class UpdateScsiDevicesFailuresMixin: - label=self.existing_nodes[1], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[1], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - output=json.dumps( -@@ -491,8 +495,9 @@ class UpdateScsiDevicesFailuresMixin: - label=self.existing_nodes[2], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[2], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - ), -@@ -504,7 +509,7 @@ class UpdateScsiDevicesFailuresMixin: - fixture.error( - reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, - node=self.existing_nodes[0], -- command="api/v1/scsi-unfence-node/v1", -+ command="api/v1/scsi-unfence-node/v2", - reason="errA", - ), - fixture.error( -@@ -517,20 +522,76 @@ class UpdateScsiDevicesFailuresMixin: - ] - ) - -+ def test_unfence_failure_unknown_command(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ communication_list = [ -+ dict( -+ label=node, -+ raw_data=json.dumps( -+ dict( -+ node=node, -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, -+ ) -+ ), -+ ) -+ for node in self.existing_nodes[0:2] -+ ] -+ communication_list.append( -+ dict( -+ label=self.existing_nodes[2], -+ response_code=404, -+ raw_data=json.dumps( -+ dict( -+ node=self.existing_nodes[2], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, -+ ) -+ ), -+ output=json.dumps( -+ dto.to_dict( -+ communication.dto.InternalCommunicationResultDto( -+ status=communication.const.COM_STATUS_UNKNOWN_CMD, -+ status_msg=( -+ "Unknown command '/api/v1/scsi-unfence-node/v2'" -+ ), -+ report_list=[], -+ data=None, -+ ) -+ ) -+ ), -+ ), -+ ) -+ self.config.http.scsi.unfence_node( -+ communication_list=communication_list -+ ) -+ self.env_assist.assert_raise_library_error(self.command()) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.PCSD_VERSION_TOO_OLD, -+ node=self.existing_nodes[2], -+ ), -+ ] -+ ) -+ - def test_unfence_failure_agent_script_failed(self): - self._unfence_failure_common_calls() - self.config.http.corosync.get_corosync_online_targets( - node_labels=self.existing_nodes - ) - self.config.http.scsi.unfence_node( -- DEVICES_2, - communication_list=[ - dict( - label=self.existing_nodes[0], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[0], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - ), -@@ -538,8 +599,9 @@ class UpdateScsiDevicesFailuresMixin: - label=self.existing_nodes[1], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[1], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - output=json.dumps( -@@ -563,8 +625,9 @@ class UpdateScsiDevicesFailuresMixin: - label=self.existing_nodes[2], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[2], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - ), -@@ -639,14 +702,14 @@ class UpdateScsiDevicesFailuresMixin: - ] - ) - self.config.http.scsi.unfence_node( -- DEVICES_2, - communication_list=[ - dict( - label=self.existing_nodes[0], - raw_data=json.dumps( - dict( -- devices=[DEV_2], - node=self.existing_nodes[0], -+ original_devices=DEVICES_1, -+ updated_devices=DEVICES_2, - ) - ), - ), -diff --git a/pcs_test/tools/command_env/config_http_scsi.py b/pcs_test/tools/command_env/config_http_scsi.py -index 0e9f63af..7150eef9 100644 ---- a/pcs_test/tools/command_env/config_http_scsi.py -+++ b/pcs_test/tools/command_env/config_http_scsi.py -@@ -14,7 +14,8 @@ class ScsiShortcuts: - - def unfence_node( - self, -- devices, -+ original_devices=(), -+ updated_devices=(), - node_labels=None, - communication_list=None, - name="http.scsi.unfence_node", -@@ -22,7 +23,8 @@ class ScsiShortcuts: - """ - Create a calls for node unfencing - -- list devices -- list of scsi devices -+ list original_devices -- list of scsi devices before an update -+ list updated_devices -- list of scsi devices after an update - list node_labels -- create success responses from these nodes - list communication_list -- use these custom responses - string name -- the key of this call -@@ -39,7 +41,13 @@ class ScsiShortcuts: - communication_list = [ - dict( - label=node, -- raw_data=json.dumps(dict(devices=devices, node=node)), -+ raw_data=json.dumps( -+ dict( -+ node=node, -+ original_devices=original_devices, -+ updated_devices=updated_devices, -+ ) -+ ), - ) - for node in node_labels - ] -@@ -47,7 +55,7 @@ class ScsiShortcuts: - self.__calls, - name, - communication_list, -- action="api/v1/scsi-unfence-node/v1", -+ action="api/v1/scsi-unfence-node/v2", - output=json.dumps( - to_dict( - communication.dto.InternalCommunicationResultDto( -diff --git a/pcs_test/tools/command_env/config_runner_scsi.py b/pcs_test/tools/command_env/config_runner_scsi.py -index 4b671bb7..3cee13d6 100644 ---- a/pcs_test/tools/command_env/config_runner_scsi.py -+++ b/pcs_test/tools/command_env/config_runner_scsi.py -@@ -35,7 +35,41 @@ class ScsiShortcuts: - os.path.join(settings.fence_agent_binaries, "fence_scsi"), - "--action=on", - "--devices", -- ",".join(devices), -+ ",".join(sorted(devices)), -+ f"--plug={node}", -+ ], -+ stdout=stdout, -+ stderr=stderr, -+ returncode=return_code, -+ ), -+ ) -+ -+ def get_status( -+ self, -+ node, -+ device, -+ stdout="", -+ stderr="", -+ return_code=0, -+ name="runner.scsi.is_fenced", -+ ): -+ """ -+ Create a call for getting scsi status -+ -+ string node -- a node from which is unfencing performed -+ str device -- a device to check -+ string stdout -- stdout from fence_scsi agent script -+ string stderr -- stderr from fence_scsi agent script -+ int return_code -- return code of the fence_scsi agent script -+ string name -- the key of this call -+ """ -+ self.__calls.place( -+ name, -+ RunnerCall( -+ [ -+ os.path.join(settings.fence_agent_binaries, "fence_scsi"), -+ "--action=status", -+ f"--devices={device}", - f"--plug={node}", - ], - stdout=stdout, -diff --git a/pcsd/api_v1.rb b/pcsd/api_v1.rb -index 7edeeabf..e55c2be7 100644 ---- a/pcsd/api_v1.rb -+++ b/pcsd/api_v1.rb -@@ -291,7 +291,7 @@ def route_api_v1(auth_user, params, request) - :only_superuser => false, - :permissions => Permissions::WRITE, - }, -- 'scsi-unfence-node/v1' => { -+ 'scsi-unfence-node/v2' => { - :cmd => 'scsi.unfence_node', - :only_superuser => false, - :permissions => Permissions::WRITE, -diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml -index 58ebcf0f..3954aa5d 100644 ---- a/pcsd/capabilities.xml -+++ b/pcsd/capabilities.xml -@@ -1892,11 +1892,13 @@ - pcs commands: stonith update-scsi-devices - - -- -+ - -- Unfence scsi devices on a cluster node. -+ Unfence scsi devices on a cluster node. In comparison with v1, only -+ newly added devices are unfenced. In case any existing device is -+ fenced, unfencing will be skipped. - -- daemon urls: /api/v1/scsi-unfence-node/v1 -+ daemon urls: /api/v1/scsi-unfence-node/v2 - - - --- -2.31.1 - diff --git a/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch b/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch deleted file mode 100644 index cdad5a1..0000000 --- a/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch +++ /dev/null @@ -1,3629 +0,0 @@ -From d20c356298eacec1a71a85c29f7d1f8b63fd8cb7 Mon Sep 17 00:00:00 2001 -From: Miroslav Lisik -Date: Fri, 6 Aug 2021 17:35:03 +0200 -Subject: [PATCH 1/2] add add/remove syntax for command `pcs stonith - update-scsi-devices` - ---- - CHANGELOG.md | 9 - - pcs/cli/common/lib_wrapper.py | 1 + - pcs/common/reports/codes.py | 39 + - pcs/common/reports/const.py | 4 + - pcs/common/reports/messages.py | 289 ++++ - pcs/common/reports/types.py | 2 + - pcs/common/str_tools.py | 26 +- - pcs/lib/commands/stonith.py | 307 +++- - pcs/pcs.8.in | 4 +- - pcs/stonith.py | 43 +- - pcs/usage.py | 13 +- - pcs_test/Makefile.am | 1 + - pcs_test/tier0/cli/test_stonith.py | 169 +- - .../tier0/common/reports/test_messages.py | 185 +++ - pcs_test/tier0/common/test_str_tools.py | 63 +- - pcs_test/tier0/lib/cib/test_stonith.py | 135 +- - .../test_stonith_update_scsi_devices.py | 1439 ++++++++++------- - pcsd/capabilities.xml | 8 + - 18 files changed, 2041 insertions(+), 696 deletions(-) - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index c15546ba..f768cc36 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,14 +1,5 @@ - # Change Log - --## [Unreleased] -- --### Fixed --- Fixed an error when creating a resource which defines 'depth' attribute for -- its operations ([rhbz#1998454]) -- --[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454 -- -- - ## [0.10.10] - 2021-08-19 - - ### Added -diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py -index 06410b41..2bf83485 100644 ---- a/pcs/cli/common/lib_wrapper.py -+++ b/pcs/cli/common/lib_wrapper.py -@@ -436,6 +436,7 @@ def load_module(env, middleware_factory, name): - "history_cleanup": stonith.history_cleanup, - "history_update": stonith.history_update, - "update_scsi_devices": stonith.update_scsi_devices, -+ "update_scsi_devices_add_remove": stonith.update_scsi_devices_add_remove, - }, - ) - -diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py -index 32898154..bbd61500 100644 ---- a/pcs/common/reports/codes.py -+++ b/pcs/common/reports/codes.py -@@ -12,6 +12,29 @@ SKIP_OFFLINE_NODES = F("SKIP_OFFLINE_NODES") - # messages - - -+ADD_REMOVE_ITEMS_NOT_SPECIFIED = M("ADD_REMOVE_ITEMS_NOT_SPECIFIED") -+ADD_REMOVE_ITEMS_DUPLICATION = M("ADD_REMOVE_ITEMS_DUPLICATION") -+ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER = M( -+ "ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER" -+) -+ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER = M( -+ "ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER" -+) -+ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME = M( -+ "ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME" -+) -+ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER = M( -+ "ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER" -+) -+ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER = M( -+ "ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER" -+) -+ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF = M( -+ "ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF" -+) -+ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD = M( -+ "ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD" -+) - AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE") - AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE") - AGENT_NAME_GUESSED = M("AGENT_NAME_GUESSED") -@@ -44,17 +67,23 @@ CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M( - CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED = M( - "CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED" - ) -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP = M( - "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP" - ) -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP = M( - "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP" - ) -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP = M( - "CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP" - ) -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE = M("CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE") -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF = M("CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF") -+# TODO: remove, use ADD_REMOVE reports - CANNOT_GROUP_RESOURCE_NO_RESOURCES = M("CANNOT_GROUP_RESOURCE_NO_RESOURCES") - CANNOT_GROUP_RESOURCE_WRONG_TYPE = M("CANNOT_GROUP_RESOURCE_WRONG_TYPE") - CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE = M("CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE") -@@ -448,13 +477,17 @@ SERVICE_COMMAND_ON_NODE_ERROR = M("SERVICE_COMMAND_ON_NODE_ERROR") - SERVICE_COMMAND_ON_NODE_SUCCESS = M("SERVICE_COMMAND_ON_NODE_SUCCESS") - SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM = M("SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM") - SYSTEM_WILL_RESET = M("SYSTEM_WILL_RESET") -+# TODO: remove, use ADD_REMOVE reports - TAG_ADD_REMOVE_IDS_DUPLICATION = M("TAG_ADD_REMOVE_IDS_DUPLICATION") -+# TODO: remove, use ADD_REMOVE reports - TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG = M( - "TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME = M( - "TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG = M( - "TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG" - ) -@@ -462,8 +495,11 @@ TAG_CANNOT_CONTAIN_ITSELF = M("TAG_CANNOT_CONTAIN_ITSELF") - TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED = M( - "TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF = M("TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF") -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_REMOVE_ADJACENT_ID = M("TAG_CANNOT_REMOVE_ADJACENT_ID") -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG = M( - "TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG" - ) -@@ -473,12 +509,15 @@ TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS = M( - TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED = M( - "TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD = M( - "TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED = M( - "TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED" - ) -+# TODO: remove, use ADD_REMOVE reports - TAG_IDS_NOT_IN_THE_TAG = M("TAG_IDS_NOT_IN_THE_TAG") - TMP_FILE_WRITE = M("TMP_FILE_WRITE") - UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE = M( -diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py -index c551338e..88725eb3 100644 ---- a/pcs/common/reports/const.py -+++ b/pcs/common/reports/const.py -@@ -1,4 +1,6 @@ - from .types import ( -+ AddRemoveContainerType, -+ AddRemoveItemType, - BoothConfigUsedWhere, - DefaultAddressSource, - FenceHistoryCommandType, -@@ -9,6 +11,8 @@ from .types import ( - ) - - -+ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE = AddRemoveContainerType("stonith") -+ADD_REMOVE_ITEM_TYPE_DEVICE = AddRemoveItemType("device") - BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE = BoothConfigUsedWhere( - "in a cluster resource" - ) -diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py -index a1c5db11..f9688437 100644 ---- a/pcs/common/reports/messages.py -+++ b/pcs/common/reports/messages.py -@@ -24,6 +24,7 @@ from pcs.common.str_tools import ( - format_list_custom_last_separator, - format_optional, - format_plural, -+ get_plural, - indent, - is_iterable_not_str, - ) -@@ -95,6 +96,14 @@ def _key_numeric(item: str) -> Tuple[int, str]: - return (int(item), item) if item.isdigit() else (-1, item) - - -+_add_remove_container_translation = { -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE: "stonith resource", -+} -+ -+_add_remove_item_translation = { -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE: "device", -+} -+ - _file_role_translation = { - file_type_codes.BOOTH_CONFIG: "Booth configuration", - file_type_codes.BOOTH_KEY: "Booth key", -@@ -129,6 +138,16 @@ _type_articles = { - } - - -+def _add_remove_container_str( -+ container: types.AddRemoveContainerType, -+) -> str: -+ return _add_remove_container_translation.get(container, container) -+ -+ -+def _add_remove_item_str(item: types.AddRemoveItemType) -> str: -+ return _add_remove_item_translation.get(item, item) -+ -+ - def _format_file_role(role: file_type_codes.FileTypeCode) -> str: - return _file_role_translation.get(role, role) - -@@ -2528,6 +2547,7 @@ class ResourceBundleAlreadyContainsAResource(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage): - """ -@@ -2551,6 +2571,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage): - """ -@@ -2573,6 +2594,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage): - """ -@@ -2593,6 +2615,7 @@ class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage): - return f"{resources} already {exist} in '{self.group_id}'" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceMoreThanOnce(ReportItemMessage): - """ -@@ -2610,6 +2633,7 @@ class CannotGroupResourceMoreThanOnce(ReportItemMessage): - return f"Resources specified more than once: {resources}" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceNoResources(ReportItemMessage): - """ -@@ -2623,6 +2647,7 @@ class CannotGroupResourceNoResources(ReportItemMessage): - return "No resources to add" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class CannotGroupResourceNextToItself(ReportItemMessage): - """ -@@ -6482,6 +6507,7 @@ class BoothTicketOperationFailed(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagAddRemoveIdsDuplication(ReportItemMessage): - """ -@@ -6500,6 +6526,7 @@ class TagAddRemoveIdsDuplication(ReportItemMessage): - return f"Ids to {action} must be unique, duplicate ids: {duplicate_ids}" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage): - """ -@@ -6522,6 +6549,7 @@ class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage): - """ -@@ -6540,6 +6568,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage): - return f"Ids cannot be added and removed at the same time: {idref_list}" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotAddReferenceIdsAlreadyInTheTag(ReportItemMessage): - """ -@@ -6591,6 +6620,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(ReportItemMessage): - return "Cannot create empty tag, no resource ids specified" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotPutIdNextToItself(ReportItemMessage): - """ -@@ -6607,6 +6637,7 @@ class TagCannotPutIdNextToItself(ReportItemMessage): - return f"Cannot put id '{self.adjacent_id}' next to itself." - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotRemoveAdjacentId(ReportItemMessage): - """ -@@ -6626,6 +6657,7 @@ class TagCannotRemoveAdjacentId(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotRemoveReferencesWithoutRemovingTag(ReportItemMessage): - """ -@@ -6678,6 +6710,7 @@ class TagCannotRemoveTagsNoTagsSpecified(ReportItemMessage): - return "Cannot remove tags, no tags to remove specified" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage): - """ -@@ -6697,6 +6730,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage): - ) - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage): - """ -@@ -6710,6 +6744,7 @@ class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage): - return "Cannot update tag, no ids to be added or removed specified" - - -+# TODO: remove, use ADD_REMOVE reports - @dataclass(frozen=True) - class TagIdsNotInTheTag(ReportItemMessage): - """ -@@ -6850,3 +6885,257 @@ class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage): - @property - def message(self) -> str: - return "Several options sets exist, please specify an option set ID" -+ -+ -+@dataclass(frozen=True) -+class AddRemoveItemsNotSpecified(ReportItemMessage): -+ """ -+ Cannot modify container, no add or remove items specified. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ _code = codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED -+ -+ @property -+ def message(self) -> str: -+ container = _add_remove_container_str(self.container_type) -+ items = get_plural(_add_remove_item_str(self.item_type)) -+ return ( -+ f"Cannot modify {container} '{self.container_id}', no {items} to " -+ "add or remove specified" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveItemsDuplication(ReportItemMessage): -+ """ -+ Duplicate items were found in add/remove item lists. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ duplicate_items_list -- list of duplicate items -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ duplicate_items_list: List[str] -+ _code = codes.ADD_REMOVE_ITEMS_DUPLICATION -+ -+ @property -+ def message(self) -> str: -+ items = get_plural(_add_remove_item_str(self.item_type)) -+ duplicate_items = format_list(self.duplicate_items_list) -+ return ( -+ f"{items.capitalize()} to add or remove must be unique, duplicate " -+ f"{items}: {duplicate_items}" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotAddItemsAlreadyInTheContainer(ReportItemMessage): -+ """ -+ Cannot add items already existing in the container. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ item_list -- list of items already in the container -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ item_list: List[str] -+ _code = codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER -+ -+ @property -+ def message(self) -> str: -+ items = format_plural( -+ self.item_list, _add_remove_item_str(self.item_type) -+ ) -+ item_list = format_list(self.item_list) -+ they = format_plural(self.item_list, "it") -+ are = format_plural(self.item_list, "is") -+ container = _add_remove_container_str(self.container_type) -+ return ( -+ f"Cannot add {items} {item_list}, {they} {are} already present in " -+ f"{container} '{self.container_id}'" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotRemoveItemsNotInTheContainer(ReportItemMessage): -+ """ -+ Cannot remove items not existing in the container. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ item_list -- list of items not in the container -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ item_list: List[str] -+ _code = codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER -+ -+ @property -+ def message(self) -> str: -+ items = format_plural( -+ self.item_list, _add_remove_item_str(self.item_type) -+ ) -+ item_list = format_list(self.item_list) -+ they = format_plural(self.item_list, "it") -+ are = format_plural(self.item_list, "is") -+ container = _add_remove_container_str(self.container_type) -+ items = format_plural( -+ self.item_list, _add_remove_item_str(self.item_type) -+ ) -+ return ( -+ f"Cannot remove {items} {item_list}, {they} {are} not present in " -+ f"{container} '{self.container_id}'" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(ReportItemMessage): -+ """ -+ Cannot add and remove items at the same time. Avoid operation without an -+ effect. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ item_list -- common items from add and remove item lists -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ item_list: List[str] -+ _code = codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME -+ -+ @property -+ def message(self) -> str: -+ items = format_plural( -+ self.item_list, _add_remove_item_str(self.item_type) -+ ) -+ item_list = format_list(self.item_list) -+ return ( -+ f"{items.capitalize()} cannot be added and removed at the same " -+ f"time: {item_list}" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotRemoveAllItemsFromTheContainer(ReportItemMessage): -+ """ -+ Cannot remove all items from a container. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ item_list -- common items from add and remove item lists -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ item_list: List[str] -+ _code = codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER -+ -+ @property -+ def message(self) -> str: -+ container = _add_remove_container_str(self.container_type) -+ items = get_plural(_add_remove_item_str(self.item_type)) -+ return ( -+ f"Cannot remove all {items} from {container} '{self.container_id}'" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveAdjacentItemNotInTheContainer(ReportItemMessage): -+ """ -+ Cannot put items next to an adjacent item in the container, because the -+ adjacent item does not exist in the container. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ adjacent_item_id -- id of an adjacent item -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ adjacent_item_id: str -+ _code = codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER -+ -+ @property -+ def message(self) -> str: -+ container = _add_remove_container_str(self.container_type) -+ item = _add_remove_item_str(self.item_type) -+ items = get_plural(item) -+ return ( -+ f"There is no {item} '{self.adjacent_item_id}' in the " -+ f"{container} '{self.container_id}', cannot add {items} next to it" -+ ) -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotPutItemNextToItself(ReportItemMessage): -+ """ -+ Cannot put an item into a container next to itself. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ adjacent_item_id -- id of an adjacent item -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ adjacent_item_id: str -+ _code = codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF -+ -+ @property -+ def message(self) -> str: -+ item = _add_remove_item_str(self.item_type) -+ return f"Cannot put {item} '{self.adjacent_item_id}' next to itself" -+ -+ -+@dataclass(frozen=True) -+class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(ReportItemMessage): -+ """ -+ Cannot specify adjacent item without items to add. -+ -+ container_type -- type of item container -+ item_type -- type of item in a container -+ container_id -- id of a container -+ adjacent_item_id -- id of an adjacent item -+ """ -+ -+ container_type: types.AddRemoveContainerType -+ item_type: types.AddRemoveItemType -+ container_id: str -+ adjacent_item_id: str -+ _code = codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD -+ -+ @property -+ def message(self) -> str: -+ item = _add_remove_item_str(self.item_type) -+ items = get_plural(item) -+ return ( -+ f"Cannot specify adjacent {item} '{self.adjacent_item_id}' without " -+ f"{items} to add" -+ ) -diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py -index fa7fdf4d..610c16f1 100644 ---- a/pcs/common/reports/types.py -+++ b/pcs/common/reports/types.py -@@ -1,5 +1,7 @@ - from typing import NewType - -+AddRemoveContainerType = NewType("AddRemoveContainerType", str) -+AddRemoveItemType = NewType("AddRemoveItemType", str) - BoothConfigUsedWhere = NewType("BoothConfigUsedWhere", str) - DefaultAddressSource = NewType("DefaultAddressSource", str) - FenceHistoryCommandType = NewType("FenceHistoryCommandType", str) -diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py -index 98fe5f50..b8dccc0c 100644 ---- a/pcs/common/str_tools.py -+++ b/pcs/common/str_tools.py -@@ -131,6 +131,23 @@ def _add_s(word): - return word + "s" - - -+def get_plural(singular: str) -> str: -+ """ -+ Take singular word form and return plural. -+ -+ singular -- singular word (like: is, do, node) -+ """ -+ common_plurals = { -+ "is": "are", -+ "has": "have", -+ "does": "do", -+ "it": "they", -+ } -+ if singular in common_plurals: -+ return common_plurals[singular] -+ return _add_s(singular) -+ -+ - def format_plural( - depends_on: Union[int, Iterable[Any]], - singular: str, -@@ -145,18 +162,11 @@ def format_plural( - singular -- singular word (like: is, do, node) - plural -- optional irregular plural form - """ -- common_plurals = { -- "is": "are", -- "has": "have", -- "does": "do", -- } - if not _is_multiple(depends_on): - return singular - if plural: - return plural -- if singular in common_plurals: -- return common_plurals[singular] -- return _add_s(singular) -+ return get_plural(singular) - - - T = TypeVar("T") -diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py -index 036e3fa5..6f26e7d3 100644 ---- a/pcs/lib/commands/stonith.py -+++ b/pcs/lib/commands/stonith.py -@@ -1,9 +1,15 @@ --from typing import Container, Iterable, Optional -+from collections import Counter -+from typing import Container, Iterable, List, Optional, Set, Tuple -+ -+from lxml.etree import _Element - - from pcs.common import reports -+from pcs.common.reports import ReportItemList -+from pcs.common.reports import ReportProcessor - from pcs.common.reports.item import ReportItem - from pcs.lib.cib import resource - from pcs.lib.cib import stonith -+from pcs.lib.cib.nvpair import INSTANCE_ATTRIBUTES_TAG, get_value - from pcs.lib.cib.resource.common import are_meta_disabled - from pcs.lib.cib.tools import IdProvider - from pcs.lib.commands.resource import ( -@@ -20,6 +26,7 @@ from pcs.lib.communication.tools import ( - ) - from pcs.lib.env import LibraryEnvironment - from pcs.lib.errors import LibraryError -+from pcs.lib.external import CommandRunner - from pcs.lib.node import get_existing_nodes_names - from pcs.lib.pacemaker.live import ( - FenceHistoryCommandErrorException, -@@ -268,55 +275,195 @@ def history_update(env: LibraryEnvironment): - ) from e - - --def update_scsi_devices( -- env: LibraryEnvironment, -- stonith_id: str, -- set_device_list: Iterable[str], -- force_flags: Container[reports.types.ForceCode] = (), --) -> None: -+def _validate_add_remove_items( -+ add_item_list: Iterable[str], -+ remove_item_list: Iterable[str], -+ current_item_list: Iterable[str], -+ container_type: reports.types.AddRemoveContainerType, -+ item_type: reports.types.AddRemoveItemType, -+ container_id: str, -+ adjacent_item_id: Optional[str] = None, -+ container_can_be_empty: bool = False, -+) -> ReportItemList: - """ -- Update scsi fencing devices without restart and affecting other resources. -+ Validate if items can be added or removed to or from a container. - -- env -- provides all for communication with externals -- stonith_id -- id of stonith resource -- set_device_list -- paths to the scsi devices that would be set for stonith -- resource -- force_flags -- list of flags codes -+ add_item_list -- items to be added -+ remove_item_list -- items to be removed -+ current_item_list -- items currently in the container -+ container_type -- container type -+ item_type -- item type -+ container_id -- id of the container -+ adjacent_item_id -- an adjacent item in the container -+ container_can_be_empty -- flag to decide if container can be left empty - """ -- if not is_getting_resource_digest_supported(env.cmd_runner()): -- raise LibraryError( -+ # pylint: disable=too-many-locals -+ report_list: ReportItemList = [] -+ if not add_item_list and not remove_item_list: -+ report_list.append( - ReportItem.error( -- reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported() -+ reports.messages.AddRemoveItemsNotSpecified( -+ container_type, item_type, container_id -+ ) - ) - ) -- cib = env.get_cib() -- if not set_device_list: -- env.report_processor.report( -+ -+ def _get_duplicate_items(item_list: Iterable[str]) -> Set[str]: -+ return {item for item, count in Counter(item_list).items() if count > 1} -+ -+ duplicate_items_list = _get_duplicate_items( -+ add_item_list -+ ) | _get_duplicate_items(remove_item_list) -+ if duplicate_items_list: -+ report_list.append( - ReportItem.error( -- reports.messages.InvalidOptionValue( -- "devices", "", None, cannot_be_empty=True -+ reports.messages.AddRemoveItemsDuplication( -+ container_type, -+ item_type, -+ container_id, -+ sorted(duplicate_items_list), -+ ) -+ ) -+ ) -+ already_present = set(add_item_list).intersection(current_item_list) -+ # report only if an adjacent id is not defined, because we want to allow -+ # to move items when adjacent_item_id is specified -+ if adjacent_item_id is None and already_present: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotAddItemsAlreadyInTheContainer( -+ container_type, -+ item_type, -+ container_id, -+ sorted(already_present), -+ ) -+ ) -+ ) -+ missing_items = set(remove_item_list).difference(current_item_list) -+ if missing_items: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotRemoveItemsNotInTheContainer( -+ container_type, -+ item_type, -+ container_id, -+ sorted(missing_items), - ) - ) - ) -+ common_items = set(add_item_list) & set(remove_item_list) -+ if common_items: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( -+ container_type, -+ item_type, -+ container_id, -+ sorted(common_items), -+ ) -+ ) -+ ) -+ if not container_can_be_empty and not add_item_list: -+ remaining_items = set(current_item_list).difference(remove_item_list) -+ if not remaining_items: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotRemoveAllItemsFromTheContainer( -+ container_type, -+ item_type, -+ container_id, -+ list(current_item_list), -+ ) -+ ) -+ ) -+ if adjacent_item_id: -+ if adjacent_item_id not in current_item_list: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveAdjacentItemNotInTheContainer( -+ container_type, -+ item_type, -+ container_id, -+ adjacent_item_id, -+ ) -+ ) -+ ) -+ if adjacent_item_id in add_item_list: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotPutItemNextToItself( -+ container_type, -+ item_type, -+ container_id, -+ adjacent_item_id, -+ ) -+ ) -+ ) -+ if not add_item_list: -+ report_list.append( -+ ReportItem.error( -+ reports.messages.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd( -+ container_type, -+ item_type, -+ container_id, -+ adjacent_item_id, -+ ) -+ ) -+ ) -+ return report_list -+ -+ -+def _update_scsi_devices_get_element_and_devices( -+ runner: CommandRunner, -+ report_processor: ReportProcessor, -+ cib: _Element, -+ stonith_id: str, -+) -> Tuple[_Element, List[str]]: -+ """ -+ Do checks and return stonith element and list of current scsi devices. -+ Raise LibraryError if checks fail. -+ -+ runner -- command runner instance -+ report_processor -- tool for warning/info/error reporting -+ cib -- cib element -+ stonith_id -- id of stonith resource -+ """ -+ if not is_getting_resource_digest_supported(runner): -+ raise LibraryError( -+ ReportItem.error( -+ reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported() -+ ) -+ ) - ( - stonith_el, - report_list, - ) = stonith.validate_stonith_restartless_update(cib, stonith_id) -- if env.report_processor.report_list(report_list).has_errors: -+ if report_processor.report_list(report_list).has_errors: - raise LibraryError() -- # for mypy, this should not happen because exeption would be raised -+ # for mypy, this should not happen because exception would be raised - if stonith_el is None: - raise AssertionError("stonith element is None") -- -- stonith.update_scsi_devices_without_restart( -- env.cmd_runner(), -- env.get_cluster_state(), -- stonith_el, -- IdProvider(cib), -- set_device_list, -+ current_device_list = get_value( -+ INSTANCE_ATTRIBUTES_TAG, stonith_el, "devices" - ) -+ if current_device_list is None: -+ raise AssertionError("current_device_list is None") -+ return stonith_el, current_device_list.split(",") -+ -+ -+def _unfencing_scsi_devices( -+ env: LibraryEnvironment, -+ device_list: Iterable[str], -+ force_flags: Container[reports.types.ForceCode] = (), -+) -> None: -+ """ -+ Unfence scsi devices provided in device_list if it is possible to connect -+ to pcsd and corosync is running. - -- # Unfencing -+ env -- provides all for communication with externals -+ device_list -- devices to be unfenced -+ force_flags -- list of flags codes -+ """ - cluster_nodes_names, nodes_report_list = get_existing_nodes_names( - env.get_corosync_conf(), - error_on_missing_name=True, -@@ -340,8 +487,104 @@ def update_scsi_devices( - online_corosync_target_list = run_and_raise( - env.get_node_communicator(), com_cmd - ) -- com_cmd = Unfence(env.report_processor, sorted(set_device_list)) -+ com_cmd = Unfence(env.report_processor, sorted(device_list)) - com_cmd.set_targets(online_corosync_target_list) - run_and_raise(env.get_node_communicator(), com_cmd) - -+ -+def update_scsi_devices( -+ env: LibraryEnvironment, -+ stonith_id: str, -+ set_device_list: Iterable[str], -+ force_flags: Container[reports.types.ForceCode] = (), -+) -> None: -+ """ -+ Update scsi fencing devices without restart and affecting other resources. -+ -+ env -- provides all for communication with externals -+ stonith_id -- id of stonith resource -+ set_device_list -- paths to the scsi devices that would be set for stonith -+ resource -+ force_flags -- list of flags codes -+ """ -+ if not set_device_list: -+ env.report_processor.report( -+ ReportItem.error( -+ reports.messages.InvalidOptionValue( -+ "devices", "", None, cannot_be_empty=True -+ ) -+ ) -+ ) -+ runner = env.cmd_runner() -+ ( -+ stonith_el, -+ current_device_list, -+ ) = _update_scsi_devices_get_element_and_devices( -+ runner, env.report_processor, env.get_cib(), stonith_id -+ ) -+ if env.report_processor.has_errors: -+ raise LibraryError() -+ stonith.update_scsi_devices_without_restart( -+ runner, -+ env.get_cluster_state(), -+ stonith_el, -+ IdProvider(stonith_el), -+ set_device_list, -+ ) -+ devices_for_unfencing = set(set_device_list).difference(current_device_list) -+ if devices_for_unfencing: -+ _unfencing_scsi_devices(env, devices_for_unfencing, force_flags) -+ env.push_cib() -+ -+ -+def update_scsi_devices_add_remove( -+ env: LibraryEnvironment, -+ stonith_id: str, -+ add_device_list: Iterable[str], -+ remove_device_list: Iterable[str], -+ force_flags: Container[reports.types.ForceCode] = (), -+) -> None: -+ """ -+ Update scsi fencing devices without restart and affecting other resources. -+ -+ env -- provides all for communication with externals -+ stonith_id -- id of stonith resource -+ add_device_list -- paths to the scsi devices that would be added to the -+ stonith resource -+ remove_device_list -- paths to the scsi devices that would be removed from -+ the stonith resource -+ force_flags -- list of flags codes -+ """ -+ runner = env.cmd_runner() -+ ( -+ stonith_el, -+ current_device_list, -+ ) = _update_scsi_devices_get_element_and_devices( -+ runner, env.report_processor, env.get_cib(), stonith_id -+ ) -+ if env.report_processor.report_list( -+ _validate_add_remove_items( -+ add_device_list, -+ remove_device_list, -+ current_device_list, -+ reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ reports.const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ stonith_el.get("id", ""), -+ ) -+ ).has_errors: -+ raise LibraryError() -+ updated_device_set = ( -+ set(current_device_list) -+ .union(add_device_list) -+ .difference(remove_device_list) -+ ) -+ stonith.update_scsi_devices_without_restart( -+ env.cmd_runner(), -+ env.get_cluster_state(), -+ stonith_el, -+ IdProvider(stonith_el), -+ updated_device_set, -+ ) -+ if add_device_list: -+ _unfencing_scsi_devices(env, add_device_list, force_flags) - env.push_cib() -diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in -index ac093d69..1695d75c 100644 ---- a/pcs/pcs.8.in -+++ b/pcs/pcs.8.in -@@ -664,8 +664,8 @@ pcs stonith create MyFence fence_virt 'pcmk_host_map=n1:p1;n2:p2,p3' - update [stonith device options] - Add/Change options to specified stonith id. - .TP --update\-scsi\-devices set [...] --Update scsi fencing devices without affecting other resources. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi. -+update\-scsi\-devices (set [...]) | (add [...] delete|remove [...] ) -+Update scsi fencing devices without affecting other resources. You must specify either list of set devices or at least one device for add or delete/remove devices. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi. - .TP - delete - Remove stonith id from configuration. -diff --git a/pcs/stonith.py b/pcs/stonith.py -index c7eb14de..6ed8b751 100644 ---- a/pcs/stonith.py -+++ b/pcs/stonith.py -@@ -894,24 +894,43 @@ def stonith_update_scsi_devices(lib, argv, modifiers): - * --skip-offline - skip unreachable nodes - """ - modifiers.ensure_only_supported("--request-timeout", "--skip-offline") -+ force_flags = [] -+ if modifiers.get("--skip-offline"): -+ force_flags.append(reports.codes.SKIP_OFFLINE_NODES) -+ - if len(argv) < 2: - raise CmdLineInputError() - stonith_id = argv[0] - parsed_args = parse_args.group_by_keywords( - argv[1:], -- ["set"], -+ ["set", "add", "remove", "delete"], - keyword_repeat_allowed=False, - only_found_keywords=True, - ) -- set_args = parsed_args["set"] if "set" in parsed_args else [] -- if not set_args: -- raise CmdLineInputError( -- show_both_usage_and_message=True, -- hint="You must specify set devices to be updated", -- ) -- force_flags = [] -- if modifiers.get("--skip-offline"): -- force_flags.append(reports.codes.SKIP_OFFLINE_NODES) -- lib.stonith.update_scsi_devices( -- stonith_id, set_args, force_flags=force_flags -+ cmd_exception = CmdLineInputError( -+ show_both_usage_and_message=True, -+ hint=( -+ "You must specify either list of set devices or at least one device" -+ " for add or delete/remove devices" -+ ), - ) -+ if "set" in parsed_args and {"add", "remove", "delete"} & set( -+ parsed_args.keys() -+ ): -+ raise cmd_exception -+ if "set" in parsed_args: -+ if not parsed_args["set"]: -+ raise cmd_exception -+ lib.stonith.update_scsi_devices( -+ stonith_id, parsed_args["set"], force_flags=force_flags -+ ) -+ else: -+ for key in ("add", "remove", "delete"): -+ if key in parsed_args and not parsed_args[key]: -+ raise cmd_exception -+ lib.stonith.update_scsi_devices_add_remove( -+ stonith_id, -+ parsed_args.get("add", []), -+ parsed_args.get("delete", []) + parsed_args.get("remove", []), -+ force_flags=force_flags, -+ ) -diff --git a/pcs/usage.py b/pcs/usage.py -index 38e21ed9..66e097f1 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -1289,11 +1289,14 @@ Commands: - update [stonith device options] - Add/Change options to specified stonith id. - -- update-scsi-devices set [...] -- Update scsi fencing devices without affecting other resources. Stonith -- resource must be running on one cluster node. Each device will be -- unfenced on each cluster node running cluster. Supported fence agents: -- fence_scsi. -+ update-scsi-devices (set [...]) -+ | (add [...] delete|remove -+ [device-path>...]) -+ Update scsi fencing devices without affecting other resources. You must -+ specify either list of set devices or at least one device for add or -+ delete/remove devices. Stonith resource must be running on one cluster -+ node. Each device will be unfenced on each cluster node running -+ cluster. Supported fence agents: fence_scsi. - - delete - Remove stonith id from configuration. -diff --git a/pcs_test/Makefile.am b/pcs_test/Makefile.am -index b4df00e2..c7346f96 100644 ---- a/pcs_test/Makefile.am -+++ b/pcs_test/Makefile.am -@@ -236,6 +236,7 @@ EXTRA_DIST = \ - tier0/lib/commands/test_stonith_agent.py \ - tier0/lib/commands/test_stonith_history.py \ - tier0/lib/commands/test_stonith.py \ -+ tier0/lib/commands/test_stonith_update_scsi_devices.py \ - tier0/lib/commands/test_ticket.py \ - tier0/lib/communication/__init__.py \ - tier0/lib/communication/test_booth.py \ -diff --git a/pcs_test/tier0/cli/test_stonith.py b/pcs_test/tier0/cli/test_stonith.py -index 5bc18f3c..a54b442e 100644 ---- a/pcs_test/tier0/cli/test_stonith.py -+++ b/pcs_test/tier0/cli/test_stonith.py -@@ -149,15 +149,41 @@ class SbdDeviceSetup(TestCase): - - - class StonithUpdateScsiDevices(TestCase): -+ # pylint: disable=too-many-public-methods - def setUp(self): - self.lib = mock.Mock(spec_set=["stonith"]) -- self.stonith = mock.Mock(spec_set=["update_scsi_devices"]) -+ self.stonith = mock.Mock( -+ spec_set=["update_scsi_devices", "update_scsi_devices_add_remove"] -+ ) - self.lib.stonith = self.stonith - - def assert_called_with(self, stonith_id, set_devices, force_flags): - self.stonith.update_scsi_devices.assert_called_once_with( - stonith_id, set_devices, force_flags=force_flags - ) -+ self.stonith.update_scsi_devices_add_remove.assert_not_called() -+ -+ def assert_add_remove_called_with( -+ self, stonith_id, add_devices, remove_devices, force_flags -+ ): -+ self.stonith.update_scsi_devices_add_remove.assert_called_once_with( -+ stonith_id, add_devices, remove_devices, force_flags=force_flags -+ ) -+ self.stonith.update_scsi_devices.assert_not_called() -+ -+ def assert_bad_syntax_cli_exception(self, args): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd(args) -+ self.assertEqual(cm.exception.message, None) -+ self.assertEqual( -+ cm.exception.hint, -+ ( -+ "You must specify either list of set devices or at least one " -+ "device for add or delete/remove devices" -+ ), -+ ) -+ self.stonith.update_scsi_devices.assert_not_called() -+ self.stonith.update_scsi_devices_add_remove.assert_not_called() - - def call_cmd(self, argv, modifiers=None): - stonith.stonith_update_scsi_devices( -@@ -174,44 +200,141 @@ class StonithUpdateScsiDevices(TestCase): - self.call_cmd(["stonith-id"]) - self.assertEqual(cm.exception.message, None) - -- def test_not_set_keyword(self): -+ def test_unknown_keyword(self): - with self.assertRaises(CmdLineInputError) as cm: - self.call_cmd(["stonith-id", "unset"]) - self.assertEqual(cm.exception.message, None) - -- def test_only_set_keyword(self): -- with self.assertRaises(CmdLineInputError) as cm: -- self.call_cmd(["stonith-id", "set"]) -- self.assertEqual(cm.exception.message, None) -- self.assertEqual( -- cm.exception.hint, "You must specify set devices to be updated" -- ) -- -- def test_one_device(self): -- self.call_cmd(["stonith-id", "set", "device1"]) -- self.assert_called_with("stonith-id", ["device1"], []) -- -- def test_more_devices(self): -- self.call_cmd(["stonith-id", "set", "device1", "device2"]) -- self.assert_called_with("stonith-id", ["device1", "device2"], []) -- - def test_supported_options(self): - self.call_cmd( -- ["stonith-id", "set", "device1", "device2"], -+ ["stonith-id", "set", "d1", "d2"], - {"skip-offline": True, "request-timeout": 60}, - ) - self.assert_called_with( - "stonith-id", -- ["device1", "device2"], -+ ["d1", "d2"], - [reports.codes.SKIP_OFFLINE_NODES], - ) - - def test_unsupported_options(self): - with self.assertRaises(CmdLineInputError) as cm: -- self.call_cmd( -- ["stonith-id", "set", "device1", "device2"], {"force": True} -- ) -+ self.call_cmd(["stonith-id", "set", "d1", "d2"], {"force": True}) - self.assertEqual( - cm.exception.message, - "Specified option '--force' is not supported in this command", - ) -+ -+ def test_only_set_keyword(self): -+ self.assert_bad_syntax_cli_exception(["stonith-id", "set"]) -+ -+ def test_only_add_keyword(self): -+ self.assert_bad_syntax_cli_exception(["stonith-id", "add"]) -+ -+ def test_only_remove_keyword(self): -+ self.assert_bad_syntax_cli_exception(["stonith-id", "remove"]) -+ -+ def test_only_delete_keyword(self): -+ self.assert_bad_syntax_cli_exception(["stonith-id", "delete"]) -+ -+ def test_add_and_empty_remove(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "add", "d1", "remove"] -+ ) -+ -+ def test_add_and_empty_delete(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "add", "d1", "delete"] -+ ) -+ -+ def test_empty_add_and_remove(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "add", "remove", "d1"] -+ ) -+ -+ def test_empty_add_and_delete(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "add", "delete", "d1"] -+ ) -+ -+ def test_empty_remove_and_delete(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "remove", "delete", "d1"] -+ ) -+ -+ def test_empty_delete_and_remove(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "delete", "remove", "d1"] -+ ) -+ -+ def test_empty_add_empty_remove_empty_delete(self): -+ self.assert_bad_syntax_cli_exception( -+ ["stonith-id", "add", "delete", "remove"] -+ ) -+ -+ def test_set_add_remove_delete_devices(self): -+ self.assert_bad_syntax_cli_exception( -+ [ -+ "stonith-id", -+ "set", -+ "add", -+ "d2", -+ "remove", -+ "d3", -+ "delete", -+ "d4", -+ ] -+ ) -+ -+ def test_set_devices(self): -+ self.call_cmd(["stonith-id", "set", "d1", "d2"]) -+ self.assert_called_with("stonith-id", ["d1", "d2"], []) -+ -+ def test_add_devices(self): -+ self.call_cmd(["stonith-id", "add", "d1", "d2"]) -+ self.assert_add_remove_called_with("stonith-id", ["d1", "d2"], [], []) -+ -+ def test_remove_devices(self): -+ self.call_cmd(["stonith-id", "remove", "d1", "d2"]) -+ self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], []) -+ -+ def test_delete_devices(self): -+ self.call_cmd(["stonith-id", "delete", "d1", "d2"]) -+ self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], []) -+ -+ def test_add_remove_devices(self): -+ self.call_cmd(["stonith-id", "add", "d1", "d2", "remove", "d3", "d4"]) -+ self.assert_add_remove_called_with( -+ "stonith-id", ["d1", "d2"], ["d3", "d4"], [] -+ ) -+ -+ def test_add_delete_devices(self): -+ self.call_cmd(["stonith-id", "add", "d1", "d2", "delete", "d3", "d4"]) -+ self.assert_add_remove_called_with( -+ "stonith-id", ["d1", "d2"], ["d3", "d4"], [] -+ ) -+ -+ def test_add_delete_remove_devices(self): -+ self.call_cmd( -+ [ -+ "stonith-id", -+ "add", -+ "d1", -+ "d2", -+ "delete", -+ "d3", -+ "d4", -+ "remove", -+ "d5", -+ ] -+ ) -+ self.assert_add_remove_called_with( -+ "stonith-id", ["d1", "d2"], ["d3", "d4", "d5"], [] -+ ) -+ -+ def test_remove_delete_devices(self): -+ self.call_cmd( -+ ["stonith-id", "remove", "d2", "d1", "delete", "d4", "d3"] -+ ) -+ self.assert_add_remove_called_with( -+ "stonith-id", [], ["d4", "d3", "d2", "d1"], [] -+ ) -diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py -index 0cb97138..b0826cfd 100644 ---- a/pcs_test/tier0/common/reports/test_messages.py -+++ b/pcs_test/tier0/common/reports/test_messages.py -@@ -1761,6 +1761,7 @@ class ResourceBundleAlreadyContainsAResource(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest): - def test_success(self): - self.assert_message_from_report( -@@ -1772,6 +1773,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest): - def test_success(self): - self.assert_message_from_report( -@@ -1783,6 +1785,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceAlreadyInTheGroup(NameBuildTest): - def test_single_resource(self): - self.assert_message_from_report( -@@ -1797,6 +1800,7 @@ class CannotGroupResourceAlreadyInTheGroup(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceMoreThanOnce(NameBuildTest): - def test_single_resource(self): - self.assert_message_from_report( -@@ -1811,6 +1815,7 @@ class CannotGroupResourceMoreThanOnce(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceNoResources(NameBuildTest): - def test_success(self): - self.assert_message_from_report( -@@ -1818,6 +1823,7 @@ class CannotGroupResourceNoResources(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class CannotGroupResourceNextToItself(NameBuildTest): - def test_success(self): - self.assert_message_from_report( -@@ -4836,6 +4842,7 @@ class BoothTicketOperationFailed(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagAddRemoveIdsDuplication(NameBuildTest): - def test_message_add(self): - self.assert_message_from_report( -@@ -4855,6 +4862,7 @@ class TagAddRemoveIdsDuplication(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagAdjacentReferenceIdNotInTheTag(NameBuildTest): - def test_messag(self): - self.assert_message_from_report( -@@ -4866,6 +4874,7 @@ class TagAdjacentReferenceIdNotInTheTag(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest): - def test_message_one_item(self): - self.assert_message_from_report( -@@ -4885,6 +4894,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotAddReferenceIdsAlreadyInTheTag(NameBuildTest): - def test_message_singular(self): - self.assert_message_from_report( -@@ -4920,6 +4930,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotPutIdNextToItself(NameBuildTest): - def test_message(self): - self.assert_message_from_report( -@@ -4928,6 +4939,7 @@ class TagCannotPutIdNextToItself(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotRemoveAdjacentId(NameBuildTest): - def test_message(self): - self.assert_message_from_report( -@@ -4936,6 +4948,7 @@ class TagCannotRemoveAdjacentId(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotRemoveReferencesWithoutRemovingTag(NameBuildTest): - def test_message(self): - self.assert_message_from_report( -@@ -4974,6 +4987,7 @@ class TagCannotRemoveTagsNoTagsSpecified(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest): - def test_message(self): - self.assert_message_from_report( -@@ -4982,6 +4996,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagCannotUpdateTagNoIdsSpecified(NameBuildTest): - def test_message(self): - self.assert_message_from_report( -@@ -4990,6 +5005,7 @@ class TagCannotUpdateTagNoIdsSpecified(NameBuildTest): - ) - - -+# TODO: remove, use ADD_REMOVE reports - class TagIdsNotInTheTag(NameBuildTest): - def test_message_singular(self): - self.assert_message_from_report( -@@ -5080,3 +5096,172 @@ class CibNvsetAmbiguousProvideNvsetId(NameBuildTest): - const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE - ), - ) -+ -+ -+class AddRemoveItemsNotSpecified(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ ( -+ "Cannot modify stonith resource 'container-id', no devices to " -+ "add or remove specified" -+ ), -+ reports.AddRemoveItemsNotSpecified( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ), -+ ) -+ -+ -+class AddRemoveItemsDuplication(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ ( -+ "Devices to add or remove must be unique, duplicate devices: " -+ "'dup1', 'dup2'" -+ ), -+ reports.AddRemoveItemsDuplication( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["dup2", "dup1"], -+ ), -+ ) -+ -+ -+class AddRemoveCannotAddItemsAlreadyInTheContainer(NameBuildTest): -+ def test_message_plural(self): -+ self.assert_message_from_report( -+ "Cannot add devices 'i1', 'i2', they are already present in stonith" -+ " resource 'container-id'", -+ reports.AddRemoveCannotAddItemsAlreadyInTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i2", "i1"], -+ ), -+ ) -+ -+ def test_message_singular(self): -+ self.assert_message_from_report( -+ "Cannot add device 'i1', it is already present in stonith resource " -+ "'container-id'", -+ reports.AddRemoveCannotAddItemsAlreadyInTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i1"], -+ ), -+ ) -+ -+ -+class AddRemoveCannotRemoveItemsNotInTheContainer(NameBuildTest): -+ def test_message_plural(self): -+ self.assert_message_from_report( -+ ( -+ "Cannot remove devices 'i1', 'i2', they are not present in " -+ "stonith resource 'container-id'" -+ ), -+ reports.AddRemoveCannotRemoveItemsNotInTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i2", "i1"], -+ ), -+ ) -+ -+ def test_message_singular(self): -+ self.assert_message_from_report( -+ ( -+ "Cannot remove device 'i1', it is not present in " -+ "stonith resource 'container-id'" -+ ), -+ reports.AddRemoveCannotRemoveItemsNotInTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i1"], -+ ), -+ ) -+ -+ -+class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(NameBuildTest): -+ def test_message_plural(self): -+ self.assert_message_from_report( -+ "Devices cannot be added and removed at the same time: 'i1', 'i2'", -+ reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i2", "i1"], -+ ), -+ ) -+ -+ def test_message_singular(self): -+ self.assert_message_from_report( -+ "Device cannot be added and removed at the same time: 'i1'", -+ reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i1"], -+ ), -+ ) -+ -+ -+class AddRemoveCannotRemoveAllItemsFromTheContainer(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ "Cannot remove all devices from stonith resource 'container-id'", -+ reports.AddRemoveCannotRemoveAllItemsFromTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ ["i1", "i2"], -+ ), -+ ) -+ -+ -+class AddRemoveAdjacentItemNotInTheContainer(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ ( -+ "There is no device 'adjacent-item-id' in the stonith resource " -+ "'container-id', cannot add devices next to it" -+ ), -+ reports.AddRemoveAdjacentItemNotInTheContainer( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ "adjacent-item-id", -+ ), -+ ) -+ -+ -+class AddRemoveCannotPutItemNextToItself(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ "Cannot put device 'adjacent-item-id' next to itself", -+ reports.AddRemoveCannotPutItemNextToItself( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ "adjacent-item-id", -+ ), -+ ) -+ -+ -+class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(NameBuildTest): -+ def test_message(self): -+ self.assert_message_from_report( -+ ( -+ "Cannot specify adjacent device 'adjacent-item-id' without " -+ "devices to add" -+ ), -+ reports.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd( -+ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ const.ADD_REMOVE_ITEM_TYPE_DEVICE, -+ "container-id", -+ "adjacent-item-id", -+ ), -+ ) -diff --git a/pcs_test/tier0/common/test_str_tools.py b/pcs_test/tier0/common/test_str_tools.py -index 97c1d223..b0028a88 100644 ---- a/pcs_test/tier0/common/test_str_tools.py -+++ b/pcs_test/tier0/common/test_str_tools.py -@@ -1,5 +1,5 @@ - # pylint: disable=protected-access --from unittest import TestCase, mock -+from unittest import TestCase - - from pcs.common import str_tools as tools - -@@ -124,73 +124,48 @@ class AddSTest(TestCase): - self.assertEqual(tools._add_s("church"), "churches") - - --@mock.patch("pcs.common.str_tools._add_s") --@mock.patch("pcs.common.str_tools._is_multiple") -+class GetPluralTest(TestCase): -+ def test_common_plural(self): -+ self.assertEqual("are", tools.get_plural("is")) -+ -+ def test_add_s(self): -+ self.assertEqual("pieces", tools.get_plural("piece")) -+ -+ - class FormatPluralTest(TestCase): -- def test_is_sg(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = False -+ def test_is_sg(self): - self.assertEqual("is", tools.format_plural(1, "is")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(1) - -- def test_is_pl(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = True -+ def test_is_pl(self): - self.assertEqual("are", tools.format_plural(2, "is")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(2) - -- def test_do_sg(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = False -+ def test_do_sg(self): - self.assertEqual("does", tools.format_plural("he", "does")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with("he") - -- def test_do_pl(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = True -+ def test_do_pl(self): - self.assertEqual("do", tools.format_plural(["he", "she"], "does")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(["he", "she"]) - -- def test_have_sg(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = False -+ def test_have_sg(self): - self.assertEqual("has", tools.format_plural("he", "has")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with("he") - -- def test_have_pl(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = True -+ def test_have_pl(self): - self.assertEqual("have", tools.format_plural(["he", "she"], "has")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(["he", "she"]) - -- def test_plural_sg(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = False -+ def test_plural_sg(self): - self.assertEqual( - "singular", tools.format_plural(1, "singular", "plural") - ) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(1) - -- def test_plural_pl(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = True -+ def test_plural_pl(self): - self.assertEqual( - "plural", tools.format_plural(10, "singular", "plural") - ) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(10) - -- def test_regular_sg(self, mock_is_multiple, mock_add_s): -- mock_is_multiple.return_value = False -+ def test_regular_sg(self): - self.assertEqual("greeting", tools.format_plural(1, "greeting")) -- mock_add_s.assert_not_called() -- mock_is_multiple.assert_called_once_with(1) - -- def test_regular_pl(self, mock_is_multiple, mock_add_s): -- mock_add_s.return_value = "greetings" -- mock_is_multiple.return_value = True -+ def test_regular_pl(self): - self.assertEqual("greetings", tools.format_plural(10, "greeting")) -- mock_add_s.assert_called_once_with("greeting") -- mock_is_multiple.assert_called_once_with(10) - - - class FormatList(TestCase): -diff --git a/pcs_test/tier0/lib/cib/test_stonith.py b/pcs_test/tier0/lib/cib/test_stonith.py -index ef7571ce..df059121 100644 ---- a/pcs_test/tier0/lib/cib/test_stonith.py -+++ b/pcs_test/tier0/lib/cib/test_stonith.py -@@ -2,8 +2,12 @@ from unittest import TestCase - - from lxml import etree - -+from pcs.common import reports - from pcs.lib.cib import stonith - -+from pcs_test.tools import fixture -+from pcs_test.tools.assertions import assert_report_item_list_equal -+ - - class IsStonithEnabled(TestCase): - def test_not_set(self): -@@ -149,8 +153,129 @@ class GetMisconfiguredResources(TestCase): - ) - - --class ValidateStonithDeviceExistsAndSupported(TestCase): -- """ -- tested in: -- pcs_test.tier0.lib.commands.test_stonith_update_scsi_devices.TestUpdateScsiDevicesFailures -- """ -+class ValidateStonithRestartlessUpdate(TestCase): -+ RESOURCES = etree.fromstring( -+ """ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """ -+ ) -+ -+ def assert_unsupported_stonith_agent(self, resource_id, resource_type): -+ stonith_el, report_list = stonith.validate_stonith_restartless_update( -+ self.RESOURCES, resource_id -+ ) -+ self.assertEqual( -+ stonith_el, -+ self.RESOURCES.find(f".//primitive[@id='{resource_id}']"), -+ ) -+ assert_report_item_list_equal( -+ report_list, -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, -+ resource_id=resource_id, -+ resource_type=resource_type, -+ supported_stonith_types=["fence_scsi"], -+ ) -+ ], -+ ) -+ -+ def assert_no_devices(self, resource_id): -+ stonith_el, report_list = stonith.validate_stonith_restartless_update( -+ self.RESOURCES, resource_id -+ ) -+ self.assertEqual( -+ stonith_el, -+ self.RESOURCES.find(f".//primitive[@id='{resource_id}']"), -+ ) -+ assert_report_item_list_equal( -+ report_list, -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ "no devices option configured for stonith device " -+ f"'{resource_id}'" -+ ), -+ reason_type="other", -+ ) -+ ], -+ ) -+ -+ def test_supported(self): -+ stonith_el, report_list = stonith.validate_stonith_restartless_update( -+ self.RESOURCES, "supported" -+ ) -+ self.assertEqual( -+ stonith_el, self.RESOURCES.find(".//primitive[@id='supported']") -+ ) -+ assert_report_item_list_equal(report_list, []) -+ -+ def test_nonexistent_id(self): -+ stonith_el, report_list = stonith.validate_stonith_restartless_update( -+ self.RESOURCES, "non-existent" -+ ) -+ self.assertEqual(stonith_el, None) -+ assert_report_item_list_equal( -+ report_list, -+ [ -+ fixture.error( -+ reports.codes.ID_NOT_FOUND, -+ id="non-existent", -+ expected_types=["primitive"], -+ context_type="resources", -+ context_id="", -+ ) -+ ], -+ ) -+ -+ def test_not_a_resource_id(self): -+ stonith_el, report_list = stonith.validate_stonith_restartless_update( -+ self.RESOURCES, "empty-instance_attributes-devices" -+ ) -+ self.assertEqual(stonith_el, None) -+ assert_report_item_list_equal( -+ report_list, -+ [ -+ fixture.error( -+ reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, -+ id="empty-instance_attributes-devices", -+ expected_types=["primitive"], -+ current_type="nvpair", -+ ) -+ ], -+ ) -+ -+ def test_devices_empty(self): -+ self.assert_no_devices("empty") -+ -+ def test_missing_devices_attr(self): -+ self.assert_no_devices("no-devices") -+ -+ def test_unsupported_class(self): -+ self.assert_unsupported_stonith_agent("cp-01", "Dummy") -+ -+ def test_unsupported_provider(self): -+ self.assert_unsupported_stonith_agent( -+ "unsupported_provider", "fence_scsi" -+ ) -+ -+ def test_unsupported_type(self): -+ self.assert_unsupported_stonith_agent("unsupported_type", "fence_xvm") -diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -index 3bc51325..6ff6b99a 100644 ---- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py -@@ -3,6 +3,7 @@ from unittest import mock, TestCase - - - from pcs_test.tools import fixture -+from pcs_test.tools.assertions import assert_report_item_list_equal - from pcs_test.tools.command_env import get_env_tools - from pcs_test.tools.misc import get_test_resource as rc - -@@ -13,6 +14,10 @@ from pcs.common import ( - reports, - ) - from pcs.common.interface import dto -+from pcs.common.reports.const import ( -+ ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ ADD_REMOVE_ITEM_TYPE_DEVICE, -+) - from pcs.common.tools import timeout_to_seconds - - from .cluster.common import ( -@@ -28,6 +33,10 @@ DEFAULT_DIGEST = _DIGEST + "0" - ALL_DIGEST = _DIGEST + "1" - NONPRIVATE_DIGEST = _DIGEST + "2" - NONRELOADABLE_DIGEST = _DIGEST + "3" -+DEV_1 = "/dev/sda" -+DEV_2 = "/dev/sdb" -+DEV_3 = "/dev/sdc" -+DEV_4 = "/dev/sdd" - DEVICES_1 = ("/dev/sda",) - DEVICES_2 = ("/dev/sda", "/dev/sdb") - DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc") -@@ -197,13 +206,9 @@ FIXTURE_CRM_MON_RES_STOPPED = f""" - """ - - --@mock.patch.object( -- settings, -- "pacemaker_api_result_schema", -- rc("pcmk_api_rng/api-result.rng"), --) --class UpdateScsiDevices(TestCase): -+class UpdateScsiDevicesMixin: - def setUp(self): -+ # pylint: disable=invalid-name - self.env_assist, self.config = get_env_tools(self) - - self.existing_nodes = ["node1", "node2", "node3"] -@@ -217,14 +222,18 @@ class UpdateScsiDevices(TestCase): - self, - devices_before=DEVICES_1, - devices_updated=DEVICES_2, -+ devices_add=(), -+ devices_remove=(), -+ unfence=None, - resource_ops=DEFAULT_OPS, - lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, - lrm_start_ops=DEFAULT_LRM_START_OPS, - lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, - lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, - ): -+ # pylint: disable=too-many-arguments - # pylint: disable=too-many-locals -- self.config.runner.pcmk.is_resource_digests_supported() -+ devices_value = ",".join(sorted(devices_updated)) - self.config.runner.cib.load( - resources=fixture_scsi( - devices=devices_before, resource_ops=resource_ops -@@ -235,16 +244,17 @@ class UpdateScsiDevices(TestCase): - lrm_monitor_ops=lrm_monitor_ops, - ), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -- devices_opt = "devices={}".format(",".join(devices_updated)) -+ devices_opt = "devices={}".format(devices_value) - self.config.runner.pcmk.resource_digests( - SCSI_STONITH_ID, - SCSI_NODE, - name="start.op.digests", - stdout=fixture_digests_xml( -- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated) -+ SCSI_STONITH_ID, SCSI_NODE, devices=devices_value - ), - args=[devices_opt], - ) -@@ -272,22 +282,23 @@ class UpdateScsiDevices(TestCase): - stdout=fixture_digests_xml( - SCSI_STONITH_ID, - SCSI_NODE, -- devices=",".join(devices_updated), -+ devices=devices_value, - ), - args=args, - ) -- self.config.corosync_conf.load_content( -- corosync_conf_fixture( -- self.existing_corosync_nodes, -- get_two_node(len(self.existing_corosync_nodes)), -+ if unfence: -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture( -+ self.existing_corosync_nodes, -+ get_two_node(len(self.existing_corosync_nodes)), -+ ) -+ ) -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ unfence, node_labels=self.existing_nodes - ) -- ) -- self.config.http.corosync.get_corosync_online_targets( -- node_labels=self.existing_nodes -- ) -- self.config.http.scsi.unfence_node( -- devices_updated, node_labels=self.existing_nodes -- ) - self.config.env.push_cib( - resources=fixture_scsi( - devices=devices_updated, resource_ops=resource_ops -@@ -298,113 +309,25 @@ class UpdateScsiDevices(TestCase): - lrm_monitor_ops=lrm_monitor_ops_updated, - ), - ) -- stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated -- ) -+ if devices_add or devices_remove: -+ stonith.update_scsi_devices_add_remove( -+ self.env_assist.get_env(), -+ SCSI_STONITH_ID, -+ devices_add, -+ devices_remove, -+ ) -+ else: -+ stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated -+ ) - self.env_assist.assert_reports([]) - -- def test_update_1_to_1_devices(self): -- self.assert_command_success( -- devices_before=DEVICES_1, devices_updated=DEVICES_1 -- ) -- -- def test_update_2_to_2_devices(self): -- self.assert_command_success( -- devices_before=DEVICES_1, devices_updated=DEVICES_1 -- ) -- -- def test_update_1_to_2_devices(self): -- self.assert_command_success() -- -- def test_update_1_to_3_devices(self): -- self.assert_command_success( -- devices_before=DEVICES_1, devices_updated=DEVICES_3 -- ) -- -- def test_update_3_to_1_devices(self): -- self.assert_command_success( -- devices_before=DEVICES_3, devices_updated=DEVICES_1 -- ) -- -- def test_update_3_to_2_devices(self): -- self.assert_command_success( -- devices_before=DEVICES_3, devices_updated=DEVICES_2 -- ) -- -- def test_default_monitor(self): -- self.assert_command_success() -- -- def test_no_monitor_ops(self): -- self.assert_command_success( -- resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=() -- ) -- -- def test_1_monitor_with_timeout(self): -- self.assert_command_success( -- resource_ops=(("monitor", "30s", "10s", None),), -- lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), -- lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), -- ) -- -- def test_2_monitor_ops_with_timeouts(self): -- self.assert_command_success( -- resource_ops=( -- ("monitor", "30s", "10s", None), -- ("monitor", "40s", "20s", None), -- ), -- lrm_monitor_ops=( -- ("30000", DEFAULT_DIGEST, None, None), -- ("40000", DEFAULT_DIGEST, None, None), -- ), -- lrm_monitor_ops_updated=( -- ("30000", ALL_DIGEST, None, None), -- ("40000", ALL_DIGEST, None, None), -- ), -- ) -- -- def test_2_monitor_ops_with_one_timeout(self): -- self.assert_command_success( -- resource_ops=( -- ("monitor", "30s", "10s", None), -- ("monitor", "60s", None, None), -- ), -- lrm_monitor_ops=( -- ("30000", DEFAULT_DIGEST, None, None), -- ("60000", DEFAULT_DIGEST, None, None), -- ), -- lrm_monitor_ops_updated=( -- ("30000", ALL_DIGEST, None, None), -- ("60000", ALL_DIGEST, None, None), -- ), -- ) -- -- def test_various_start_ops_one_lrm_start_op(self): -- self.assert_command_success( -- resource_ops=( -- ("monitor", "60s", None, None), -- ("start", "0s", "40s", None), -- ("start", "0s", "30s", "1"), -- ("start", "10s", "5s", None), -- ("start", "20s", None, None), -- ), -- ) -- -- def test_1_nonrecurring_start_op_with_timeout(self): -- self.assert_command_success( -- resource_ops=( -- ("monitor", "60s", None, None), -- ("start", "0s", "40s", None), -- ), -- ) - -+class UpdateScsiDevicesFailuresMixin: -+ def command(self, force_flags=()): -+ raise NotImplementedError - --@mock.patch.object( -- settings, -- "pacemaker_api_result_schema", -- rc("pcmk_api_rng/api-result.rng"), --) --class TestUpdateScsiDevicesFailures(TestCase): -- # pylint: disable=too-many-public-methods -+ # pylint: disable=invalid-name - def setUp(self): - self.env_assist, self.config = get_env_tools(self) - -@@ -416,13 +339,12 @@ class TestUpdateScsiDevicesFailures(TestCase): - self.config.env.set_known_nodes(self.existing_nodes) - - def test_pcmk_doesnt_support_digests(self): -+ self.config.runner.cib.load(resources=fixture_scsi()) - self.config.runner.pcmk.is_resource_digests_supported( - is_supported=False - ) - self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, () -- ), -+ self.command(), - [ - fixture.error( - reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED, -@@ -431,134 +353,557 @@ class TestUpdateScsiDevicesFailures(TestCase): - expected_in_processor=False, - ) - -- def test_devices_cannot_be_empty(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi()) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, () -- ) -- ) -- self.env_assist.assert_reports( -- [ -- fixture.error( -- reports.codes.INVALID_OPTION_VALUE, -- option_name="devices", -- option_value="", -- allowed_values=None, -- cannot_be_empty=True, -- forbidden_characters=None, -- ) -- ] -+ def test_node_missing_name_and_missing_auth_token(self): -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), - ) -- -- def test_nonexistant_id(self): - self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi()) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), "non-existent-id", DEVICES_2 -- ) -- ) -- self.env_assist.assert_reports( -- [ -- fixture.error( -- reports.codes.ID_NOT_FOUND, -- id="non-existent-id", -- expected_types=["primitive"], -- context_type="cib", -- context_id="", -- ) -- ] -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -- -- def test_not_a_resource_id(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi()) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), -- f"{SCSI_STONITH_ID}-instance_attributes-devices", -- DEVICES_2, -- ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=["devices={}".format(",".join(DEVICES_2))], - ) -- self.env_assist.assert_reports( -- [ -- fixture.error( -- reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, -- id=f"{SCSI_STONITH_ID}-instance_attributes-devices", -- expected_types=["primitive"], -- current_type="nvpair", -- ) -- ] -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="monitor.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -+ ), -+ args=[ -+ "devices={}".format(",".join(DEVICES_2)), -+ "CRM_meta_interval=60000", -+ ], - ) -- -- def test_not_supported_resource_type(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi()) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), "dummy", DEVICES_2 -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture( -+ self.existing_corosync_nodes -+ + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], - ) - ) -+ self.config.env.set_known_nodes(self.existing_nodes[:-1]) -+ self.env_assist.assert_raise_library_error(self.command()) - self.env_assist.assert_reports( - [ - fixture.error( -- reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, -- resource_id="dummy", -- resource_type="Dummy", -- supported_stonith_types=["fence_scsi"], -- ) -- ] -- ) -- -- def test_devices_option_missing(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi(devices=None)) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ) -- ) -- self.env_assist.assert_reports( -- [ -+ reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ), - fixture.error( -- reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -- reason=( -- "no devices option configured for stonith device " -- f"'{SCSI_STONITH_ID}'" -- ), -- reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -- ) -+ reports.codes.HOST_NOT_FOUND, -+ host_list=[self.existing_nodes[-1]], -+ ), - ] - ) - -- def test_devices_option_empty(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi(devices="")) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ) -- ) -- self.env_assist.assert_reports( -- [ -- fixture.error( -- reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -- reason=( -- "no devices option configured for stonith device " -- f"'{SCSI_STONITH_ID}'" -- ), -- reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -- ) -- ] -+ def _unfence_failure_common_calls(self): -+ devices = ",".join(DEVICES_2) -+ self.config.runner.cib.load( -+ resources=fixture_scsi(), -+ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), - ) -- -- def test_stonith_resource_is_not_running(self): - self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load(resources=fixture_scsi()) - self.config.runner.pcmk.load_state( -- resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES -+ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="start.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[f"devices={devices}"], -+ ) -+ self.config.runner.pcmk.resource_digests( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ name="monitor.op.digests", -+ stdout=fixture_digests_xml( -+ SCSI_STONITH_ID, -+ SCSI_NODE, -+ devices=devices, -+ ), -+ args=[ -+ f"devices={devices}", -+ "CRM_meta_interval=60000", -+ ], -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.existing_corosync_nodes) -+ ) -+ -+ def test_unfence_failure_unable_to_connect(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[0], -+ ) -+ ), -+ was_connected=False, -+ error_msg="errA", -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[1], -+ ) -+ ), -+ output=json.dumps( -+ dto.to_dict( -+ communication.dto.InternalCommunicationResultDto( -+ status=communication.const.COM_STATUS_ERROR, -+ status_msg="error", -+ report_list=[ -+ reports.ReportItem.error( -+ reports.messages.StonithUnfencingFailed( -+ "errB" -+ ) -+ ).to_dto() -+ ], -+ data=None, -+ ) -+ ) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[2], -+ ) -+ ), -+ ), -+ ], -+ ) -+ self.env_assist.assert_raise_library_error(self.command()) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.existing_nodes[0], -+ command="api/v1/scsi-unfence-node/v1", -+ reason="errA", -+ ), -+ fixture.error( -+ reports.codes.STONITH_UNFENCING_FAILED, -+ reason="errB", -+ context=reports.dto.ReportItemContextDto( -+ node=self.existing_nodes[1], -+ ), -+ ), -+ ] -+ ) -+ -+ def test_unfence_failure_agent_script_failed(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ node_labels=self.existing_nodes -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[0], -+ ) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[1], -+ ) -+ ), -+ output=json.dumps( -+ dto.to_dict( -+ communication.dto.InternalCommunicationResultDto( -+ status=communication.const.COM_STATUS_ERROR, -+ status_msg="error", -+ report_list=[ -+ reports.ReportItem.error( -+ reports.messages.StonithUnfencingFailed( -+ "errB" -+ ) -+ ).to_dto() -+ ], -+ data=None, -+ ) -+ ) -+ ), -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[2], -+ ) -+ ), -+ ), -+ ], -+ ) -+ self.env_assist.assert_raise_library_error(self.command()) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.STONITH_UNFENCING_FAILED, -+ reason="errB", -+ context=reports.dto.ReportItemContextDto( -+ node=self.existing_nodes[1], -+ ), -+ ), -+ ] -+ ) -+ -+ def test_corosync_targets_unable_to_connect(self): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ output='{"corosync":true}', -+ ), -+ ] -+ + [ -+ dict( -+ label=node, -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ) -+ for node in self.existing_nodes[1:] -+ ] -+ ) -+ self.env_assist.assert_raise_library_error(self.command()) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ force_code=reports.codes.SKIP_OFFLINE_NODES, -+ node=node, -+ command="remote/status", -+ reason="an error", -+ ) -+ for node in self.existing_nodes[1:] -+ ] -+ ) -+ -+ def test_corosync_targets_skip_offline_unfence_node_running_corosync( -+ self, -+ ): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ output='{"corosync":true}', -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ output='{"corosync":false}', -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ ] -+ ) -+ self.config.http.scsi.unfence_node( -+ DEVICES_2, -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ raw_data=json.dumps( -+ dict( -+ devices=[DEV_2], -+ node=self.existing_nodes[0], -+ ) -+ ), -+ ), -+ ], -+ ) -+ self.config.env.push_cib( -+ resources=fixture_scsi(devices=DEVICES_2), -+ status=_fixture_status_lrm_ops( -+ SCSI_STONITH_ID, -+ lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, -+ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, -+ ), -+ ) -+ self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES])() -+ self.env_assist.assert_reports( -+ [ -+ fixture.warn( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.existing_nodes[2], -+ command="remote/status", -+ reason="an error", -+ ), -+ ] -+ ) -+ -+ def test_corosync_targets_unable_to_perform_unfencing_operation( -+ self, -+ ): -+ self._unfence_failure_common_calls() -+ self.config.http.corosync.get_corosync_online_targets( -+ communication_list=[ -+ dict( -+ label=self.existing_nodes[0], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ dict( -+ label=self.existing_nodes[1], -+ was_connected=False, -+ errno=7, -+ error_msg="an error", -+ ), -+ dict( -+ label=self.existing_nodes[2], -+ output='{"corosync":false}', -+ ), -+ ] -+ ) -+ self.config.http.scsi.unfence_node([DEV_2], communication_list=[]) -+ self.env_assist.assert_raise_library_error( -+ self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES]) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.warn( -+ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=node, -+ command="remote/status", -+ reason="an error", -+ ) -+ for node in self.existing_nodes[0:2] -+ ] -+ + [ -+ fixture.error( -+ reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, -+ ), -+ ] -+ ) -+ -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class UpdateScsiDevices(UpdateScsiDevicesMixin, TestCase): -+ def test_update_1_to_1_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_1, -+ devices_updated=DEVICES_1, -+ ) -+ -+ def test_update_2_to_2_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_2, -+ devices_updated=DEVICES_2, -+ ) -+ -+ def test_update_1_to_2_devices(self): -+ self.assert_command_success(unfence=[DEV_2]) -+ -+ def test_update_1_to_3_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_1, -+ devices_updated=DEVICES_3, -+ unfence=[DEV_2, DEV_3], -+ ) -+ -+ def test_update_3_to_1_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_3, -+ devices_updated=DEVICES_1, -+ ) -+ -+ def test_update_3_to_2_devices(self): -+ self.assert_command_success( -+ devices_before=DEVICES_3, -+ devices_updated=DEVICES_2, -+ ) -+ -+ def test_update_add_2_to_2_remove_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2], -+ devices_updated=[DEV_2, DEV_3, DEV_4], -+ unfence=[DEV_3, DEV_4], -+ ) -+ -+ def test_default_monitor(self): -+ self.assert_command_success(unfence=[DEV_2]) -+ -+ def test_no_monitor_ops(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=(), -+ lrm_monitor_ops=(), -+ lrm_monitor_ops_updated=(), -+ ) -+ -+ def test_1_monitor_with_timeout(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=(("monitor", "30s", "10s", None),), -+ lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), -+ lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), -+ ) -+ -+ def test_2_monitor_ops_with_timeouts(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=( -+ ("monitor", "30s", "10s", None), -+ ("monitor", "40s", "20s", None), -+ ), -+ lrm_monitor_ops=( -+ ("30000", DEFAULT_DIGEST, None, None), -+ ("40000", DEFAULT_DIGEST, None, None), -+ ), -+ lrm_monitor_ops_updated=( -+ ("30000", ALL_DIGEST, None, None), -+ ("40000", ALL_DIGEST, None, None), -+ ), -+ ) -+ -+ def test_2_monitor_ops_with_one_timeout(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=( -+ ("monitor", "30s", "10s", None), -+ ("monitor", "60s", None, None), -+ ), -+ lrm_monitor_ops=( -+ ("30000", DEFAULT_DIGEST, None, None), -+ ("60000", DEFAULT_DIGEST, None, None), -+ ), -+ lrm_monitor_ops_updated=( -+ ("30000", ALL_DIGEST, None, None), -+ ("60000", ALL_DIGEST, None, None), -+ ), -+ ) -+ -+ def test_various_start_ops_one_lrm_start_op(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=( -+ ("monitor", "60s", None, None), -+ ("start", "0s", "40s", None), -+ ("start", "0s", "30s", "1"), -+ ("start", "10s", "5s", None), -+ ("start", "20s", None, None), -+ ), -+ ) -+ -+ def test_1_nonrecurring_start_op_with_timeout(self): -+ self.assert_command_success( -+ unfence=[DEV_2], -+ resource_ops=( -+ ("monitor", "60s", None, None), -+ ("start", "0s", "40s", None), -+ ), -+ ) -+ -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class TestUpdateScsiDevicesFailures(UpdateScsiDevicesFailuresMixin, TestCase): -+ def command(self, force_flags=()): -+ return lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), -+ SCSI_STONITH_ID, -+ DEVICES_2, -+ force_flags=force_flags, -+ ) -+ -+ def test_devices_cannot_be_empty(self): -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), SCSI_STONITH_ID, () -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.INVALID_OPTION_VALUE, -+ option_name="devices", -+ option_value="", -+ allowed_values=None, -+ cannot_be_empty=True, -+ forbidden_characters=None, -+ ) -+ ] -+ ) -+ -+ def test_nonexistant_id(self): -+ """ -+ lower level tested in -+ pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate -+ """ -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices( -+ self.env_assist.get_env(), "non-existent-id", DEVICES_2 -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ reports.codes.ID_NOT_FOUND, -+ id="non-existent-id", -+ expected_types=["primitive"], -+ context_type="cib", -+ context_id="", -+ ) -+ ] -+ ) -+ -+ def test_stonith_resource_is_not_running(self): -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES - ) - self.env_assist.assert_raise_library_error( - lambda: stonith.update_scsi_devices( -@@ -575,8 +920,8 @@ class TestUpdateScsiDevicesFailures(TestCase): - ) - - def test_stonith_resource_is_running_on_more_than_one_node(self): -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -599,7 +944,6 @@ class TestUpdateScsiDevicesFailures(TestCase): - - def test_lrm_op_missing_digest_attributes(self): - devices = ",".join(DEVICES_2) -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load( - resources=fixture_scsi(), - status=_fixture_status_lrm_ops_base( -@@ -607,6 +951,7 @@ class TestUpdateScsiDevicesFailures(TestCase): - f'', - ), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -637,7 +982,6 @@ class TestUpdateScsiDevicesFailures(TestCase): - - def test_crm_resource_digests_missing(self): - devices = ",".join(DEVICES_2) -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load( - resources=fixture_scsi(), - status=_fixture_status_lrm_ops_base( -@@ -648,6 +992,7 @@ class TestUpdateScsiDevicesFailures(TestCase): - ), - ), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -680,11 +1025,11 @@ class TestUpdateScsiDevicesFailures(TestCase): - ) - - def test_no_lrm_start_op(self): -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load( - resources=fixture_scsi(), - status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -705,7 +1050,6 @@ class TestUpdateScsiDevicesFailures(TestCase): - ) - - def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self): -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load( - resources=fixture_scsi( - resource_ops=( -@@ -716,6 +1060,7 @@ class TestUpdateScsiDevicesFailures(TestCase): - ), - status=_fixture_status_lrm_ops(SCSI_STONITH_ID), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -746,13 +1091,13 @@ class TestUpdateScsiDevicesFailures(TestCase): - ) - - def test_lrm_monitor_ops_not_found(self): -- self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.cib.load( - resources=fixture_scsi( - resource_ops=(("monitor", "30s", None, None),) - ), - status=_fixture_status_lrm_ops(SCSI_STONITH_ID), - ) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.config.runner.pcmk.load_state( - resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES - ) -@@ -783,371 +1128,353 @@ class TestUpdateScsiDevicesFailures(TestCase): - expected_in_processor=False, - ) - -- def test_node_missing_name_and_missing_auth_token(self): -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load( -- resources=fixture_scsi(), -- status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -- ) -- self.config.runner.pcmk.load_state( -- resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -- ) -- self.config.runner.pcmk.resource_digests( -- SCSI_STONITH_ID, -- SCSI_NODE, -- name="start.op.digests", -- stdout=fixture_digests_xml( -- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -- ), -- args=["devices={}".format(",".join(DEVICES_2))], -- ) -- self.config.runner.pcmk.resource_digests( -- SCSI_STONITH_ID, -- SCSI_NODE, -- name="monitor.op.digests", -- stdout=fixture_digests_xml( -- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) -- ), -- args=[ -- "devices={}".format(",".join(DEVICES_2)), -- "CRM_meta_interval=60000", -- ], -- ) -- self.config.corosync_conf.load_content( -- corosync_conf_fixture( -- self.existing_corosync_nodes -- + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], -- ) -- ) -- self.config.env.set_known_nodes(self.existing_nodes[:-1]) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ), -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class UpdateScsiDevicesAddRemove(UpdateScsiDevicesMixin, TestCase): -+ def test_add_1_to_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1], -+ devices_updated=[DEV_1, DEV_2], -+ devices_add=[DEV_2], -+ devices_remove=[], -+ unfence=[DEV_2], - ) -- self.env_assist.assert_reports( -- [ -- fixture.error( -- reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -- fatal=True, -- ), -- fixture.error( -- reports.codes.HOST_NOT_FOUND, -- host_list=[self.existing_nodes[-1]], -- ), -- ] -+ -+ def test_add_2_to_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1], -+ devices_updated=[DEV_1, DEV_2, DEV_3], -+ devices_add=[DEV_2, DEV_3], -+ devices_remove=[], -+ unfence=[DEV_2, DEV_3], - ) - -- def _unfence_failure_common_calls(self): -- devices = ",".join(DEVICES_2) -- self.config.runner.pcmk.is_resource_digests_supported() -- self.config.runner.cib.load( -- resources=fixture_scsi(), -- status=_fixture_status_lrm_ops(SCSI_STONITH_ID), -+ def test_add_2_to_2_and_remove_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2], -+ devices_updated=[DEV_2, DEV_3, DEV_4], -+ devices_add=[DEV_3, DEV_4], -+ devices_remove=[DEV_1], -+ unfence=[DEV_3, DEV_4], - ) -- self.config.runner.pcmk.load_state( -- resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES -+ -+ def test_remove_1_from_2(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2], -+ devices_updated=[DEV_2], -+ devices_add=[], -+ devices_remove=[DEV_1], - ) -- self.config.runner.pcmk.resource_digests( -- SCSI_STONITH_ID, -- SCSI_NODE, -- name="start.op.digests", -- stdout=fixture_digests_xml( -- SCSI_STONITH_ID, -- SCSI_NODE, -- devices=devices, -- ), -- args=[f"devices={devices}"], -+ -+ def test_remove_2_from_3(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2, DEV_3], -+ devices_updated=[DEV_3], -+ devices_add=[], -+ devices_remove=[DEV_2, DEV_1], - ) -- self.config.runner.pcmk.resource_digests( -- SCSI_STONITH_ID, -- SCSI_NODE, -- name="monitor.op.digests", -- stdout=fixture_digests_xml( -- SCSI_STONITH_ID, -- SCSI_NODE, -- devices=devices, -- ), -- args=[ -- f"devices={devices}", -- "CRM_meta_interval=60000", -- ], -+ -+ def test_remove_2_from_3_add_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2, DEV_3], -+ devices_updated=[DEV_3, DEV_4], -+ devices_add=[DEV_4], -+ devices_remove=[DEV_2, DEV_1], -+ unfence=[DEV_4], - ) -- self.config.corosync_conf.load_content( -- corosync_conf_fixture(self.existing_corosync_nodes) -+ -+ def test_add_1_remove_1(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2], -+ devices_updated=[DEV_2, DEV_3], -+ devices_add=[DEV_3], -+ devices_remove=[DEV_1], -+ unfence=[DEV_3], - ) - -- def test_unfence_failure_unable_to_connect(self): -- self._unfence_failure_common_calls() -- self.config.http.corosync.get_corosync_online_targets( -- node_labels=self.existing_nodes -+ def test_add_2_remove_2(self): -+ self.assert_command_success( -+ devices_before=[DEV_1, DEV_2], -+ devices_updated=[DEV_3, DEV_4], -+ devices_add=[DEV_3, DEV_4], -+ devices_remove=[DEV_1, DEV_2], -+ unfence=[DEV_3, DEV_4], - ) -- self.config.http.scsi.unfence_node( -- DEVICES_2, -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[0]) -- ), -- was_connected=False, -- error_msg="errA", -- ), -- dict( -- label=self.existing_nodes[1], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[1]) -- ), -- output=json.dumps( -- dto.to_dict( -- communication.dto.InternalCommunicationResultDto( -- status=communication.const.COM_STATUS_ERROR, -- status_msg="error", -- report_list=[ -- reports.ReportItem.error( -- reports.messages.StonithUnfencingFailed( -- "errB" -- ) -- ).to_dto() -- ], -- data=None, -- ) -- ) -- ), -- ), -- dict( -- label=self.existing_nodes[2], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[2]) -- ), -- ), -- ], -+ -+ -+@mock.patch.object( -+ settings, -+ "pacemaker_api_result_schema", -+ rc("pcmk_api_rng/api-result.rng"), -+) -+class TestUpdateScsiDevicesAddRemoveFailures( -+ UpdateScsiDevicesFailuresMixin, TestCase -+): -+ def command(self, force_flags=()): -+ return lambda: stonith.update_scsi_devices_add_remove( -+ self.env_assist.get_env(), -+ SCSI_STONITH_ID, -+ [DEV_2], -+ [], -+ force_flags=force_flags, - ) -+ -+ def test_add_remove_are_empty(self): -+ """ -+ lower level tested in -+ pcs_test/tier0/lib/test_validate.ValidateAddRemoveItems -+ """ -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() - self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ), -+ lambda: stonith.update_scsi_devices_add_remove( -+ self.env_assist.get_env(), SCSI_STONITH_ID, (), () -+ ) - ) - self.env_assist.assert_reports( - [ - fixture.error( -- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -- node=self.existing_nodes[0], -- command="api/v1/scsi-unfence-node/v1", -- reason="errA", -- ), -+ reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED, -+ container_type=reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, -+ item_type="device", -+ container_id=SCSI_STONITH_ID, -+ ) -+ ] -+ ) -+ -+ def test_not_supported_resource_type(self): -+ """ -+ lower level tested in -+ pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate -+ """ -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices_add_remove( -+ self.env_assist.get_env(), "dummy", [DEV_2], [DEV_1] -+ ) -+ ) -+ self.env_assist.assert_reports( -+ [ - fixture.error( -- reports.codes.STONITH_UNFENCING_FAILED, -- reason="errB", -- context=reports.dto.ReportItemContextDto( -- node=self.existing_nodes[1], -- ), -- ), -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, -+ resource_id="dummy", -+ resource_type="Dummy", -+ supported_stonith_types=["fence_scsi"], -+ ) - ] - ) - -- def test_unfence_failure_agent_script_failed(self): -- self._unfence_failure_common_calls() -- self.config.http.corosync.get_corosync_online_targets( -- node_labels=self.existing_nodes -+ def test_stonith_resource_is_running_on_more_than_one_node(self): -+ self.config.runner.cib.load(resources=fixture_scsi()) -+ self.config.runner.pcmk.is_resource_digests_supported() -+ self.config.runner.pcmk.load_state( -+ resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES - ) -- self.config.http.scsi.unfence_node( -- DEVICES_2, -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[0]) -- ), -- ), -- dict( -- label=self.existing_nodes[1], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[1]) -- ), -- output=json.dumps( -- dto.to_dict( -- communication.dto.InternalCommunicationResultDto( -- status=communication.const.COM_STATUS_ERROR, -- status_msg="error", -- report_list=[ -- reports.ReportItem.error( -- reports.messages.StonithUnfencingFailed( -- "errB" -- ) -- ).to_dto() -- ], -- data=None, -- ) -- ) -- ), -- ), -- dict( -- label=self.existing_nodes[2], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[2]) -+ self.env_assist.assert_raise_library_error( -+ lambda: stonith.update_scsi_devices_add_remove( -+ self.env_assist.get_env(), SCSI_STONITH_ID, [DEV_2], [] -+ ), -+ [ -+ fixture.error( -+ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, -+ reason=( -+ f"resource '{SCSI_STONITH_ID}' is running on more than " -+ "1 node" - ), -- ), -+ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, -+ ) - ], -+ expected_in_processor=False, - ) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ), -+ -+ -+class ValidateAddRemoveItems(TestCase): -+ CONTAINER_TYPE = ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE -+ ITEM_TYPE = ADD_REMOVE_ITEM_TYPE_DEVICE -+ CONTAINER_ID = "container_id" -+ -+ def _validate( -+ self, add, remove, current=None, adjacent=None, can_be_empty=False -+ ): -+ # pylint: disable=protected-access -+ return stonith._validate_add_remove_items( -+ add, -+ remove, -+ current, -+ self.CONTAINER_TYPE, -+ self.ITEM_TYPE, -+ self.CONTAINER_ID, -+ adjacent, -+ can_be_empty, - ) -- self.env_assist.assert_reports( -+ -+ def test_success_add_and_remove(self): -+ assert_report_item_list_equal( -+ self._validate(["a1"], ["c3"], ["b2", "c3"]), [] -+ ) -+ -+ def test_success_add_only(self): -+ assert_report_item_list_equal(self._validate(["b2"], [], ["a1"]), []) -+ -+ def test_success_remove_only(self): -+ assert_report_item_list_equal( -+ self._validate([], ["b2"], ["a1", "b2"]), [] -+ ) -+ -+ def test_add_remove_items_not_specified(self): -+ assert_report_item_list_equal( -+ self._validate([], [], ["a1", "b2", "c3"]), - [ - fixture.error( -- reports.codes.STONITH_UNFENCING_FAILED, -- reason="errB", -- context=reports.dto.ReportItemContextDto( -- node=self.existing_nodes[1], -- ), -- ), -- ] -+ reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ ) -+ ], - ) - -- def test_corosync_targets_unable_to_connect(self): -- self._unfence_failure_common_calls() -- self.config.http.corosync.get_corosync_online_targets( -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- output='{"corosync":true}', -- ), -- ] -- + [ -- dict( -- label=node, -- was_connected=False, -- errno=7, -- error_msg="an error", -+ def test_add_remove_items_duplications(self): -+ assert_report_item_list_equal( -+ self._validate(["b2", "b2"], ["a1", "a1"], ["a1", "c3"]), -+ [ -+ fixture.error( -+ reports.codes.ADD_REMOVE_ITEMS_DUPLICATION, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ duplicate_items_list=["a1", "b2"], - ) -- for node in self.existing_nodes[1:] -- ] -+ ], - ) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 -- ), -+ -+ def test_add_items_already_in_container(self): -+ assert_report_item_list_equal( -+ self._validate(["a1", "b2"], [], ["a1", "b2", "c3"]), -+ [ -+ fixture.error( -+ reports.codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ item_list=["a1", "b2"], -+ ), -+ ], - ) -- self.env_assist.assert_reports( -+ -+ def test_remove_items_not_in_container(self): -+ assert_report_item_list_equal( -+ self._validate([], ["a1", "b2"], ["c3"]), - [ - fixture.error( -- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -- force_code=reports.codes.SKIP_OFFLINE_NODES, -- node=node, -- command="remote/status", -- reason="an error", -+ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ item_list=["a1", "b2"], - ) -- for node in self.existing_nodes[1:] -- ] -+ ], - ) - -- def test_corosync_targets_skip_offline_unfence_node_running_corosync( -- self, -- ): -- self._unfence_failure_common_calls() -- self.config.http.corosync.get_corosync_online_targets( -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- output='{"corosync":true}', -+ def test_add_remove_items_at_the_same_time(self): -+ assert_report_item_list_equal( -+ self._validate( -+ ["a1", "a1", "b2", "b2"], ["b2", "b2", "a1", "a1"], ["c3"] -+ ), -+ [ -+ fixture.error( -+ reports.codes.ADD_REMOVE_ITEMS_DUPLICATION, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ duplicate_items_list=["a1", "b2"], - ), -- dict( -- label=self.existing_nodes[1], -- output='{"corosync":false}', -+ fixture.error( -+ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ item_list=["a1", "b2"], - ), -- dict( -- label=self.existing_nodes[2], -- was_connected=False, -- errno=7, -- error_msg="an error", -+ fixture.error( -+ reports.codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ item_list=["a1", "b2"], - ), -- ] -+ ], - ) -- self.config.http.scsi.unfence_node( -- DEVICES_2, -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- raw_data=json.dumps( -- dict(devices=DEVICES_2, node=self.existing_nodes[0]) -- ), -+ -+ def test_remove_all_items(self): -+ assert_report_item_list_equal( -+ self._validate([], ["a1", "b2"], ["a1", "b2"]), -+ [ -+ fixture.error( -+ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ item_list=["a1", "b2"], - ), - ], - ) -- self.config.env.push_cib( -- resources=fixture_scsi(devices=DEVICES_2), -- status=_fixture_status_lrm_ops( -- SCSI_STONITH_ID, -- lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, -- lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, -- ), -+ -+ def test_remove_all_items_can_be_empty(self): -+ assert_report_item_list_equal( -+ self._validate([], ["a1", "b2"], ["a1", "b2"], can_be_empty=True), -+ [], - ) -- stonith.update_scsi_devices( -- self.env_assist.get_env(), -- SCSI_STONITH_ID, -- DEVICES_2, -- force_flags=[reports.codes.SKIP_OFFLINE_NODES], -+ -+ def test_remove_all_items_and_add_new_one(self): -+ assert_report_item_list_equal( -+ self._validate(["c3"], ["a1", "b2"], ["a1", "b2"]), -+ [], - ) -- self.env_assist.assert_reports( -+ -+ def test_missing_adjacent_item(self): -+ assert_report_item_list_equal( -+ self._validate(["a1", "b2"], [], ["c3"], adjacent="d4"), - [ -- fixture.warn( -- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -- node=self.existing_nodes[2], -- command="remote/status", -- reason="an error", -+ fixture.error( -+ reports.codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ adjacent_item_id="d4", - ), -- ] -+ ], - ) - -- def test_corosync_targets_unable_to_perform_unfencing_operation( -- self, -- ): -- self._unfence_failure_common_calls() -- self.config.http.corosync.get_corosync_online_targets( -- communication_list=[ -- dict( -- label=self.existing_nodes[0], -- was_connected=False, -- errno=7, -- error_msg="an error", -- ), -- dict( -- label=self.existing_nodes[1], -- was_connected=False, -- errno=7, -- error_msg="an error", -- ), -- dict( -- label=self.existing_nodes[2], -- output='{"corosync":false}', -+ def test_adjacent_item_in_add_list(self): -+ assert_report_item_list_equal( -+ self._validate(["a1", "b2"], [], ["a1"], adjacent="a1"), -+ [ -+ fixture.error( -+ reports.codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ adjacent_item_id="a1", - ), -- ] -- ) -- self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[]) -- self.env_assist.assert_raise_library_error( -- lambda: stonith.update_scsi_devices( -- self.env_assist.get_env(), -- SCSI_STONITH_ID, -- DEVICES_2, -- force_flags=[reports.codes.SKIP_OFFLINE_NODES], -- ), -+ ], - ) -- self.env_assist.assert_reports( -+ -+ def test_adjacent_item_without_add_list(self): -+ assert_report_item_list_equal( -+ self._validate([], ["b2"], ["a1", "b2"], adjacent="a1"), - [ -- fixture.warn( -- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -- node=node, -- command="remote/status", -- reason="an error", -- ) -- for node in self.existing_nodes[0:2] -- ] -- + [ - fixture.error( -- reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, -+ reports.codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD, -+ container_type=self.CONTAINER_TYPE, -+ item_type=self.ITEM_TYPE, -+ container_id=self.CONTAINER_ID, -+ adjacent_item_id="a1", - ), -- ] -+ ], - ) -diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml -index 745b05ad..58ebcf0f 100644 ---- a/pcsd/capabilities.xml -+++ b/pcsd/capabilities.xml -@@ -1884,6 +1884,14 @@ - pcs commands: stonith update-scsi-devices - - -+ -+ -+ Update scsi fencing devices without affecting other resources using -+ add/remove cli syntax. -+ -+ pcs commands: stonith update-scsi-devices -+ -+ - - - Unfence scsi devices on a cluster node. --- -2.31.1 - diff --git a/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch b/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch deleted file mode 100644 index 4616131..0000000 --- a/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 189c73e31f5033413fc4483e40d0bfc78d77f962 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Fri, 27 Aug 2021 12:05:18 +0200 -Subject: [PATCH 1/2] fix creating resources with depth operation attribute - ---- - CHANGELOG.md | 9 +++++++++ - pcs/lib/cib/resource/operations.py | 2 +- - 2 files changed, 10 insertions(+), 1 deletion(-) - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index f768cc36..c15546ba 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,5 +1,14 @@ - # Change Log - -+## [Unreleased] -+ -+### Fixed -+- Fixed an error when creating a resource which defines 'depth' attribute for -+ its operations ([rhbz#1998454]) -+ -+[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454 -+ -+ - ## [0.10.10] - 2021-08-19 - - ### Added -diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py -index 390db71a..44b2e7dd 100644 ---- a/pcs/lib/cib/resource/operations.py -+++ b/pcs/lib/cib/resource/operations.py -@@ -197,7 +197,7 @@ def _action_dto_to_dict( - ) -> Dict[str, str]: - result = dict( - filter( -- lambda item: item[0] != "deph" and item[1] not in (None, ""), -+ lambda item: item[0] != "depth" and item[1] not in (None, ""), - to_dict(dto).items(), - ) - ) --- -2.31.1 - diff --git a/SOURCES/bz2042433-01-fix-creating-empty-cib.patch b/SOURCES/bz2022463-01-fix-creating-empty-cib.patch similarity index 96% rename from SOURCES/bz2042433-01-fix-creating-empty-cib.patch rename to SOURCES/bz2022463-01-fix-creating-empty-cib.patch index df3f45e..1437dd1 100644 --- a/SOURCES/bz2042433-01-fix-creating-empty-cib.patch +++ b/SOURCES/bz2022463-01-fix-creating-empty-cib.patch @@ -1,7 +1,7 @@ -From eae00a30e6eb682e60ec1ace4ec6633591254e15 Mon Sep 17 00:00:00 2001 +From f0342f110bdb4a7421532b85ca0f49070c7e5c1e Mon Sep 17 00:00:00 2001 From: Tomas Jelinek Date: Thu, 13 Jan 2022 17:32:38 +0100 -Subject: [PATCH] fix creating empty cib +Subject: [PATCH 4/5] fix creating empty cib --- pcs/utils.py | 21 +++++++++++---------- diff --git a/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch b/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch new file mode 100644 index 0000000..e45d0b9 --- /dev/null +++ b/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch @@ -0,0 +1,25 @@ +From 6b4b0c0026e5077044e4e908d093cb613ae2e94e Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Mon, 6 Dec 2021 16:06:31 +0100 +Subject: [PATCH 1/3] fix enabling corosync-qdevice + +--- + pcsd/remote.rb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index c49db116..3574d665 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -2515,7 +2515,7 @@ def qdevice_client_enable(param, request, auth_user) + unless allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' + end +- if not ServiceChecker.new('corosync', enabled: true).is_enabled?('corosync') ++ if not ServiceChecker.new(['corosync'], enabled: true).is_enabled?('corosync') + return pcsd_success('corosync is not enabled, skipping') + elsif enable_service('corosync-qdevice') + return pcsd_success('corosync-qdevice enabled') +-- +2.31.1 + diff --git a/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch b/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch new file mode 100644 index 0000000..e11b09e --- /dev/null +++ b/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch @@ -0,0 +1,86 @@ +From 082bded126151e4f4b4667a1d8337db741828da6 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 16 Dec 2021 14:12:58 +0100 +Subject: [PATCH 1/5] skip checking of scsi devices to be removed before + unfencing to be added devices + +--- + pcs/lib/commands/scsi.py | 3 ++- + pcs_test/tier0/lib/commands/test_scsi.py | 21 +++++++++++++++++---- + 2 files changed, 19 insertions(+), 5 deletions(-) + +diff --git a/pcs/lib/commands/scsi.py b/pcs/lib/commands/scsi.py +index ff20a563..ab732805 100644 +--- a/pcs/lib/commands/scsi.py ++++ b/pcs/lib/commands/scsi.py +@@ -31,7 +31,8 @@ def unfence_node( + return + fence_scsi_bin = os.path.join(settings.fence_agent_binaries, "fence_scsi") + fenced_devices = [] +- for device in original_devices: ++ # do not check devices being removed ++ for device in sorted(set(original_devices) & set(updated_devices)): + stdout, stderr, return_code = env.cmd_runner().run( + [ + fence_scsi_bin, +diff --git a/pcs_test/tier0/lib/commands/test_scsi.py b/pcs_test/tier0/lib/commands/test_scsi.py +index 8ef9836a..bc2357a9 100644 +--- a/pcs_test/tier0/lib/commands/test_scsi.py ++++ b/pcs_test/tier0/lib/commands/test_scsi.py +@@ -13,10 +13,13 @@ class TestUnfenceNode(TestCase): + self.old_devices = ["device1", "device3"] + self.new_devices = ["device3", "device0", "device2"] + self.added_devices = set(self.new_devices) - set(self.old_devices) ++ self.check_devices = sorted( ++ set(self.old_devices) & set(self.new_devices) ++ ) + self.node = "node1" + + def test_success_devices_to_unfence(self): +- for old_dev in self.old_devices: ++ for old_dev in self.check_devices: + self.config.runner.scsi.get_status( + self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" + ) +@@ -38,9 +41,19 @@ class TestUnfenceNode(TestCase): + ) + self.env_assist.assert_reports([]) + ++ def test_success_replace_unavailable_device(self): ++ self.config.runner.scsi.unfence_node(self.node, {"device2"}) ++ scsi.unfence_node( ++ self.env_assist.get_env(), ++ self.node, ++ {"device1"}, ++ {"device2"}, ++ ) ++ self.env_assist.assert_reports([]) ++ + def test_unfencing_failure(self): + err_msg = "stderr" +- for old_dev in self.old_devices: ++ for old_dev in self.check_devices: + self.config.runner.scsi.get_status( + self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" + ) +@@ -98,7 +111,7 @@ class TestUnfenceNode(TestCase): + + def test_unfencing_skipped_devices_are_fenced(self): + stdout_off = "Status: OFF" +- for old_dev in self.old_devices: ++ for old_dev in self.check_devices: + self.config.runner.scsi.get_status( + self.node, + old_dev, +@@ -116,7 +129,7 @@ class TestUnfenceNode(TestCase): + [ + fixture.info( + report_codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED, +- devices=sorted(self.old_devices), ++ devices=sorted(self.check_devices), + ) + ] + ) +-- +2.31.1 + diff --git a/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch b/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch new file mode 100644 index 0000000..455dcda --- /dev/null +++ b/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch @@ -0,0 +1,41 @@ +From 46b079a93d1817f9c1d6a7403c70b30f59d19c20 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 4 Jan 2022 12:56:56 +0100 +Subject: [PATCH 2/5] Make ocf:linbit:drbd agent pass OCF validation + +--- + data/ocf-1.0.rng | 18 ++++++++---------- + 1 file changed, 8 insertions(+), 10 deletions(-) + +diff --git a/data/ocf-1.0.rng b/data/ocf-1.0.rng +index 36ba4611..1e14a83b 100644 +--- a/data/ocf-1.0.rng ++++ b/data/ocf-1.0.rng +@@ -169,16 +169,14 @@ RNGs. Thank you. + + + +- +- +- boolean +- string +- integer +- second +- int +- time +- +- ++ ++ + +