import pcs-0.10.6-4.el8

This commit is contained in:
CentOS Sources 2020-11-03 06:56:01 -05:00 committed by Andrew Lukoshko
parent 3043f1d354
commit 8bef2a30ed
22 changed files with 9656 additions and 9185 deletions

29
.gitignore vendored
View File

@ -1,20 +1,23 @@
SOURCES/HAM-logo.png
SOURCES/backports-3.11.4.gem
SOURCES/backports-3.17.2.gem
SOURCES/dacite-1.5.0.tar.gz
SOURCES/daemons-1.3.1.gem
SOURCES/ethon-0.11.0.gem
SOURCES/dataclasses-0.6.tar.gz
SOURCES/ethon-0.12.0.gem
SOURCES/eventmachine-1.2.7.gem
SOURCES/ffi-1.9.25.gem
SOURCES/ffi-1.13.1.gem
SOURCES/json-2.3.0.gem
SOURCES/mustermann-1.0.3.gem
SOURCES/mustermann-1.1.1.gem
SOURCES/open4-1.3.4-1.gem
SOURCES/pcs-0.10.4.tar.gz
SOURCES/pcs-web-ui-0.1.2.tar.gz
SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
SOURCES/pcs-0.10.6.tar.gz
SOURCES/pcs-web-ui-0.1.4.tar.gz
SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz
SOURCES/pyagentx-0.4.pcs.2.tar.gz
SOURCES/rack-2.0.6.gem
SOURCES/rack-protection-2.0.4.gem
SOURCES/rack-test-1.0.0.gem
SOURCES/sinatra-2.0.4.gem
SOURCES/rack-2.2.3.gem
SOURCES/rack-protection-2.0.8.1.gem
SOURCES/rack-test-1.1.0.gem
SOURCES/ruby2_keywords-0.0.2.gem
SOURCES/sinatra-2.0.8.1.gem
SOURCES/thin-1.7.2.gem
SOURCES/tilt-2.0.9.gem
SOURCES/tornado-6.0.3.tar.gz
SOURCES/tilt-2.0.10.gem
SOURCES/tornado-6.0.4.tar.gz

View File

@ -1,20 +1,23 @@
679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png
edf08f3a0d9e202048857d78ddda44e59294084c SOURCES/backports-3.11.4.gem
28b63a742124da6c9575a1c5e7d7331ef93600b2 SOURCES/backports-3.17.2.gem
c14ee49221d8e1b09364b5f248bc3da12484f675 SOURCES/dacite-1.5.0.tar.gz
e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem
3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem
81079b734108084eea0ae1c05a1cab0e806a3a1d SOURCES/dataclasses-0.6.tar.gz
921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem
7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem
86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem
cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem
0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem
2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem
50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem
41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem
d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz
8ac1291ce8f56073b74149ac56acc094337a3298 SOURCES/pcs-web-ui-0.1.2.tar.gz
52599fe9c17bda8cc0cad1acf830a9114b8b6db6 SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
73fafb4228326c14a799f0cccbcb734ab7ba2bfa SOURCES/pcs-0.10.6.tar.gz
d67de4d5cefd9ba3cde45c7ec4a5d1e9b1e6032a SOURCES/pcs-web-ui-0.1.4.tar.gz
3e09042e3dc32c992451ba4c0454f2879f0d3f40 SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz
3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz
b15267e1f94e69238a00a6f1bd48fb7683c03a78 SOURCES/rack-2.0.6.gem
c1376e5678322b401d988d261762a78bf2cf3361 SOURCES/rack-protection-2.0.4.gem
4c99cf0a82372a1bc5968c1551d9e606b68b4879 SOURCES/rack-test-1.0.0.gem
1c85f05c874bc8c0bf9c40291ea2d430090cdfd9 SOURCES/sinatra-2.0.4.gem
345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem
1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem
b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem
0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem
04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem
41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem
55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem
126c66189fc5b26a39c9b54eb17254652cca8b27 SOURCES/tornado-6.0.3.tar.gz
d265c822a6b228392d899e9eb5114613d65e6967 SOURCES/tilt-2.0.10.gem
e177f2a092dc5f23b0b3078e40adf52e17a9f8a6 SOURCES/tornado-6.0.4.tar.gz

File diff suppressed because it is too large Load Diff

View File

@ -1,130 +0,0 @@
From 8058591d0d79942bf6c61f105a180592bac7cf69 Mon Sep 17 00:00:00 2001
From: Ondrej Mular <omular@redhat.com>
Date: Thu, 28 Nov 2019 16:57:24 +0100
Subject: [PATCH 2/3] fix error msg when cluster is not set up
---
CHANGELOG.md | 4 +++
pcs/cluster.py | 3 +++
pcs/lib/commands/qdevice.py | 2 ++
pcs_test/tier0/lib/commands/test_qdevice.py | 27 +++++++++++++++++++--
4 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 889436c3..5a7ec377 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,7 +6,11 @@
- It is possible to configure a disaster-recovery site and display its status
([rhbz#1676431])
+### Fixed
+- Error messages in cases when cluster is not set up ([rhbz#1743731])
+
[rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431
+[rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731
## [0.10.4] - 2019-11-28
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 9473675f..0e9b3365 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -190,6 +190,9 @@ def start_cluster(argv):
wait_for_nodes_started(nodes, wait_timeout)
return
+ if not utils.hasCorosyncConf():
+ utils.err("cluster is not currently configured on this node")
+
print("Starting Cluster...")
service_list = ["corosync"]
if utils.need_to_handle_qdevice_service():
diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
index 3d7af234..41f7c296 100644
--- a/pcs/lib/commands/qdevice.py
+++ b/pcs/lib/commands/qdevice.py
@@ -81,6 +81,8 @@ def qdevice_start(lib_env, model):
start qdevice now on local host
"""
_check_model(model)
+ if not qdevice_net.qdevice_initialized():
+ raise LibraryError(reports.qdevice_not_initialized(model))
_service_start(lib_env, qdevice_net.qdevice_start)
def qdevice_stop(lib_env, model, proceed_if_used=False):
diff --git a/pcs_test/tier0/lib/commands/test_qdevice.py b/pcs_test/tier0/lib/commands/test_qdevice.py
index b2c83ca4..af23db61 100644
--- a/pcs_test/tier0/lib/commands/test_qdevice.py
+++ b/pcs_test/tier0/lib/commands/test_qdevice.py
@@ -689,6 +689,7 @@ class QdeviceNetDisableTest(QdeviceTestCase):
)
+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized")
@mock.patch("pcs.lib.external.start_service")
@mock.patch.object(
LibraryEnvironment,
@@ -696,9 +697,11 @@ class QdeviceNetDisableTest(QdeviceTestCase):
lambda self: "mock_runner"
)
class QdeviceNetStartTest(QdeviceTestCase):
- def test_success(self, mock_net_start):
+ def test_success(self, mock_net_start, mock_qdevice_initialized):
+ mock_qdevice_initialized.return_value = True
lib.qdevice_start(self.lib_env, "net")
mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+ mock_qdevice_initialized.assert_called_once_with()
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -719,11 +722,12 @@ class QdeviceNetStartTest(QdeviceTestCase):
]
)
- def test_failed(self, mock_net_start):
+ def test_failed(self, mock_net_start, mock_qdevice_initialized):
mock_net_start.side_effect = StartServiceError(
"test service",
"test error"
)
+ mock_qdevice_initialized.return_value = True
assert_raise_library_error(
lambda: lib.qdevice_start(self.lib_env, "net"),
@@ -737,6 +741,7 @@ class QdeviceNetStartTest(QdeviceTestCase):
)
)
mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+ mock_qdevice_initialized.assert_called_once_with()
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -750,6 +755,24 @@ class QdeviceNetStartTest(QdeviceTestCase):
]
)
+ def test_qdevice_not_initialized(
+ self, mock_net_start, mock_qdevice_initialized
+ ):
+ mock_qdevice_initialized.return_value = False
+
+ assert_raise_library_error(
+ lambda: lib.qdevice_start(self.lib_env, "net"),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ {
+ "model": "net",
+ }
+ )
+ )
+ mock_net_start.assert_not_called()
+ mock_qdevice_initialized.assert_called_once_with()
+
@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
@mock.patch("pcs.lib.external.stop_service")
--
2.21.0

View File

@ -1,40 +0,0 @@
From e4ab588efe0f4cc6b5fcf0853293c93bd4f31604 Mon Sep 17 00:00:00 2001
From: Ondrej Mular <omular@redhat.com>
Date: Wed, 29 Jan 2020 13:13:45 +0100
Subject: [PATCH 4/7] link to sbd man page from `sbd enable` doc
---
pcs/pcs.8 | 2 +-
pcs/usage.py | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 651fda83..ff2ba0b0 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -531,7 +531,7 @@ history update
Update fence history from all nodes.
.TP
sbd enable [watchdog=<path>[@<node>]]... [device=<path>[@<node>]]... [<SBD_OPTION>=<value>]... [\fB\-\-no\-watchdog\-validation\fR]
-Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped.
+Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped.
.B WARNING: Cluster has to be restarted in order to apply these changes.
diff --git a/pcs/usage.py b/pcs/usage.py
index e4f5af32..30c63964 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -1147,7 +1147,8 @@ Commands:
Enable SBD in cluster. Default path for watchdog device is
/dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5),
SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and
- SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node.
+ SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It
+ is possible to specify up to 3 devices per node.
If --no-watchdog-validation is specified, validation of watchdogs will
be skipped.
--
2.21.1

View File

@ -1,636 +0,0 @@
From e56f42bf31ae0a52618fe8754fd0b2ae623e6a7a Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Thu, 12 Dec 2019 14:46:44 +0100
Subject: [PATCH 1/7] squash bz1781303 fix safe-disabling clones, groups,
bundles
fix simulate_cib_error report
Putting only one CIB in the report is not enough info. Both original and
changed CIB as well as crm_simulate output would be needed. All that
info can be seen in debug messages. So there is no need to put it in the
report.
---
pcs/cli/common/console_report.py | 7 +-
pcs/lib/cib/resource/common.py | 21 +-
pcs/lib/commands/resource.py | 27 +-
pcs/lib/pacemaker/live.py | 8 +-
pcs/lib/reports.py | 4 +-
.../tier0/cli/common/test_console_report.py | 10 +-
.../tier0/lib/cib/test_resource_common.py | 60 ++++-
.../resource/test_resource_enable_disable.py | 242 +++++++++++++++++-
pcs_test/tier0/lib/pacemaker/test_live.py | 7 -
9 files changed, 350 insertions(+), 36 deletions(-)
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index d349c823..60dbb2a0 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -1269,8 +1269,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
,
codes.CIB_SIMULATE_ERROR: lambda info:
- "Unable to simulate changes in CIB: {reason}\n{cib}"
- .format(**info)
+ "Unable to simulate changes in CIB{_reason}"
+ .format(
+ _reason=format_optional(info["reason"], ": {0}"),
+ **info
+ )
,
codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET: lambda info:
diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
index f1891003..e30c5e69 100644
--- a/pcs/lib/cib/resource/common.py
+++ b/pcs/lib/cib/resource/common.py
@@ -1,8 +1,9 @@
from collections import namedtuple
from typing import (
cast,
+ List,
Optional,
- Sequence,
+ Set,
)
from xml.etree.ElementTree import Element
@@ -114,7 +115,23 @@ def find_primitives(resource_el):
return [resource_el]
return []
-def get_inner_resources(resource_el: Element) -> Sequence[Element]:
+def get_all_inner_resources(resource_el: Element) -> Set[Element]:
+ """
+ Return all inner resources (both direct and indirect) of a resource
+ Example: for a clone containing a group, this function will return both
+ the group and the resources inside the group
+
+ resource_el -- resource element to get its inner resources
+ """
+ all_inner: Set[Element] = set()
+ to_process = set([resource_el])
+ while to_process:
+ new_inner = get_inner_resources(to_process.pop())
+ to_process.update(set(new_inner) - all_inner)
+ all_inner.update(new_inner)
+ return all_inner
+
+def get_inner_resources(resource_el: Element) -> List[Element]:
"""
Return list of inner resources (direct descendants) of a resource
specified as resource_el.
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
index 1b652ea4..4f975c7f 100644
--- a/pcs/lib/commands/resource.py
+++ b/pcs/lib/commands/resource.py
@@ -802,7 +802,28 @@ def disable_safe(env, resource_ids, strict, wait):
with resource_environment(
env, wait, resource_ids, _ensure_disabled_after_wait(True)
) as resources_section:
- _disable_validate_and_edit_cib(env, resources_section, resource_ids)
+ id_provider = IdProvider(resources_section)
+ resource_el_list = _find_resources_or_raise(
+ resources_section,
+ resource_ids
+ )
+ env.report_processor.process_list(
+ _resource_list_enable_disable(
+ resource_el_list,
+ resource.common.disable,
+ id_provider,
+ env.get_cluster_state()
+ )
+ )
+
+ inner_resources_names_set = set()
+ for resource_el in resource_el_list:
+ inner_resources_names_set.update({
+ inner_resource_el.get("id")
+ for inner_resource_el
+ in resource.common.get_all_inner_resources(resource_el)
+ })
+
plaintext_status, transitions, dummy_cib = simulate_cib(
env.cmd_runner(),
get_root(resources_section)
@@ -830,6 +851,10 @@ def disable_safe(env, resource_ids, strict, wait):
exclude=resource_ids
)
)
+
+ # Stopping a clone stops all its inner resources. That should not block
+ # stopping the clone.
+ other_affected = other_affected - inner_resources_names_set
if other_affected:
raise LibraryError(
reports.resource_disable_affects_other_resources(
diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
index 83274af0..233f2e2d 100644
--- a/pcs/lib/pacemaker/live.py
+++ b/pcs/lib/pacemaker/live.py
@@ -271,7 +271,7 @@ def simulate_cib_xml(runner, cib_xml):
transitions_file = write_tmpfile(None)
except OSError as e:
raise LibraryError(
- reports.cib_simulate_error(format_os_error(e), cib_xml)
+ reports.cib_simulate_error(format_os_error(e))
)
cmd = [
@@ -284,7 +284,7 @@ def simulate_cib_xml(runner, cib_xml):
stdout, stderr, retval = runner.run(cmd, stdin_string=cib_xml)
if retval != 0:
raise LibraryError(
- reports.cib_simulate_error(stderr.strip(), cib_xml)
+ reports.cib_simulate_error(stderr.strip())
)
try:
@@ -297,7 +297,7 @@ def simulate_cib_xml(runner, cib_xml):
return stdout, transitions_xml, new_cib_xml
except OSError as e:
raise LibraryError(
- reports.cib_simulate_error(format_os_error(e), cib_xml)
+ reports.cib_simulate_error(format_os_error(e))
)
def simulate_cib(runner, cib):
@@ -319,7 +319,7 @@ def simulate_cib(runner, cib):
)
except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
raise LibraryError(
- reports.cib_simulate_error(str(e), cib_xml)
+ reports.cib_simulate_error(str(e))
)
### wait for idle
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index 1f081007..c9b4a25d 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -1935,18 +1935,16 @@ def cib_diff_error(reason, cib_old, cib_new):
}
)
-def cib_simulate_error(reason, cib):
+def cib_simulate_error(reason):
"""
cannot simulate effects a CIB would have on a live cluster
string reason -- error description
- string cib -- the CIB whose effects were to be simulated
"""
return ReportItem.error(
report_codes.CIB_SIMULATE_ERROR,
info={
"reason": reason,
- "cib": cib,
}
)
diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py
index 0d0c2457..29e9614d 100644
--- a/pcs_test/tier0/cli/common/test_console_report.py
+++ b/pcs_test/tier0/cli/common/test_console_report.py
@@ -2238,8 +2238,14 @@ class CibDiffError(NameBuildTest):
class CibSimulateError(NameBuildTest):
def test_success(self):
self.assert_message_from_report(
- "Unable to simulate changes in CIB: error message\n<cib />",
- reports.cib_simulate_error("error message", "<cib />")
+ "Unable to simulate changes in CIB: error message",
+ reports.cib_simulate_error("error message")
+ )
+
+ def test_empty_reason(self):
+ self.assert_message_from_report(
+ "Unable to simulate changes in CIB",
+ reports.cib_simulate_error("")
)
diff --git a/pcs_test/tier0/lib/cib/test_resource_common.py b/pcs_test/tier0/lib/cib/test_resource_common.py
index ebba09da..cd716ba2 100644
--- a/pcs_test/tier0/lib/cib/test_resource_common.py
+++ b/pcs_test/tier0/lib/cib/test_resource_common.py
@@ -200,10 +200,12 @@ class FindOneOrMoreResources(TestCase):
class FindResourcesMixin:
+ _iterable_type = list
+
def assert_find_resources(self, input_resource_id, output_resource_ids):
self.assertEqual(
- output_resource_ids,
- [
+ self._iterable_type(output_resource_ids),
+ self._iterable_type([
element.get("id", "")
for element in
self._tested_fn(
@@ -211,7 +213,7 @@ class FindResourcesMixin:
'.//*[@id="{0}"]'.format(input_resource_id)
)
)
- ]
+ ])
)
def test_group(self):
@@ -235,6 +237,27 @@ class FindResourcesMixin:
def test_bundle_with_primitive(self):
self.assert_find_resources("H-bundle", ["H"])
+ def test_primitive(self):
+ raise NotImplementedError()
+
+ def test_primitive_in_clone(self):
+ raise NotImplementedError()
+
+ def test_primitive_in_master(self):
+ raise NotImplementedError()
+
+ def test_primitive_in_group(self):
+ raise NotImplementedError()
+
+ def test_primitive_in_bundle(self):
+ raise NotImplementedError()
+
+ def test_cloned_group(self):
+ raise NotImplementedError()
+
+ def test_mastered_group(self):
+ raise NotImplementedError()
+
class FindPrimitives(TestCase, FindResourcesMixin):
_tested_fn = staticmethod(common.find_primitives)
@@ -266,6 +289,37 @@ class FindPrimitives(TestCase, FindResourcesMixin):
self.assert_find_resources("F-master", ["F1", "F2"])
+class GetAllInnerResources(TestCase, FindResourcesMixin):
+ _iterable_type = set
+ _tested_fn = staticmethod(common.get_all_inner_resources)
+
+ def test_primitive(self):
+ self.assert_find_resources("A", set())
+
+ def test_primitive_in_clone(self):
+ self.assert_find_resources("B", set())
+
+ def test_primitive_in_master(self):
+ self.assert_find_resources("C", set())
+
+ def test_primitive_in_group(self):
+ self.assert_find_resources("D1", set())
+ self.assert_find_resources("D2", set())
+ self.assert_find_resources("E1", set())
+ self.assert_find_resources("E2", set())
+ self.assert_find_resources("F1", set())
+ self.assert_find_resources("F2", set())
+
+ def test_primitive_in_bundle(self):
+ self.assert_find_resources("H", set())
+
+ def test_cloned_group(self):
+ self.assert_find_resources("E-clone", {"E", "E1", "E2"})
+
+ def test_mastered_group(self):
+ self.assert_find_resources("F-master", {"F", "F1", "F2"})
+
+
class GetInnerResources(TestCase, FindResourcesMixin):
_tested_fn = staticmethod(common.get_inner_resources)
diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
index 634f0f33..62899940 100644
--- a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
+++ b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
@@ -1729,12 +1729,6 @@ class DisableSimulate(TestCase):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some stderr",
- # curently, there is no way to normalize xml with our lxml
- # version 4.2.3, so this never passes equality tests
- # cib=self.config.calls.get(
- # "runner.pcmk.simulate_cib"
- # ).check_stdin.expected_stdin
- # ,
),
],
expected_in_processor=False
@@ -1988,12 +1982,6 @@ class DisableSafeMixin():
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some stderr",
- # curently, there is no way to normalize xml with our lxml
- # version 4.2.3, so this never passes equality tests
- # cib=self.config.calls.get(
- # "runner.pcmk.simulate_cib"
- # ).check_stdin.expected_stdin
- # ,
),
],
expected_in_processor=False
@@ -2118,6 +2106,236 @@ class DisableSafeMixin():
fixture.report_resource_not_running("B"),
])
+ def test_inner_resources(self, mock_write_tmpfile):
+ cib_xml = """
+ <resources>
+ <primitive id="A" />
+ <clone id="B-clone">
+ <primitive id="B" />
+ </clone>
+ <master id="C-master">
+ <primitive id="C" />
+ </master>
+ <group id="D">
+ <primitive id="D1" />
+ <primitive id="D2" />
+ </group>
+ <clone id="E-clone">
+ <group id="E">
+ <primitive id="E1" />
+ <primitive id="E2" />
+ </group>
+ </clone>
+ <master id="F-master">
+ <group id="F">
+ <primitive id="F1" />
+ <primitive id="F2" />
+ </group>
+ </master>
+ <bundle id="G-bundle" />
+ <bundle id="H-bundle">
+ <primitive id="H" />
+ </bundle>
+ </resources>
+ """
+ status_xml = """
+ <resources>
+ <resource id="A" managed="true" />
+ <clone id="B-clone" managed="true" multi_state="false"
+ unique="false"
+ >
+ <resource id="B" managed="true" />
+ <resource id="B" managed="true" />
+ </clone>
+ <clone id="C-master" managed="true" multi_state="true"
+ unique="false"
+ >
+ <resource id="C" managed="true" />
+ <resource id="C" managed="true" />
+ </clone>
+ <group id="D" number_resources="2">
+ <resource id="D1" managed="true" />
+ <resource id="D2" managed="true" />
+ </group>
+ <clone id="E-clone" managed="true" multi_state="false"
+ unique="false"
+ >
+ <group id="E:0" number_resources="2">
+ <resource id="E1" managed="true" />
+ <resource id="E2" managed="true" />
+ </group>
+ <group id="E:1" number_resources="2">
+ <resource id="E1" managed="true" />
+ <resource id="E2" managed="true" />
+ </group>
+ </clone>
+ <clone id="F-master" managed="true" multi_state="true"
+ unique="false"
+ >
+ <group id="F:0" number_resources="2">
+ <resource id="F1" managed="true" />
+ <resource id="F2" managed="true" />
+ </group>
+ <group id="F:1" number_resources="2">
+ <resource id="F1" managed="true" />
+ <resource id="F2" managed="true" />
+ </group>
+ </clone>
+ <bundle id="H-bundle" type="docker" image="pcmktest:http"
+ unique="false" managed="true" failed="false"
+ >
+ <replica id="0">
+ <resource id="H" />
+ </replica>
+ <replica id="1">
+ <resource id="H" />
+ </replica>
+ </bundle>
+ </resources>
+ """
+ synapses = []
+ index = 0
+ for res_name, is_clone in [
+ ("A", False),
+ ("B", True),
+ ("C", True),
+ ("D1", False),
+ ("D2", False),
+ ("E1", True),
+ ("E2", True),
+ ("F1", True),
+ ("F2", True),
+ ("H", False),
+ ]:
+ if is_clone:
+ synapses.append(f"""
+ <synapse>
+ <action_set>
+ <rsc_op id="{index}" operation="stop" on_node="node1">
+ <primitive id="{res_name}" long_id="{res_name}:0" />
+ </rsc_op>
+ </action_set>
+ </synapse>
+ <synapse>
+ <action_set>
+ <rsc_op id="{index + 1}" operation="stop" on_node="node2">
+ <primitive id="{res_name}" long_id="{res_name}:1" />
+ </rsc_op>
+ </action_set>
+ </synapse>
+ """)
+ index += 2
+ else:
+ synapses.append(f"""
+ <synapse>
+ <action_set>
+ <rsc_op id="{index}" operation="stop" on_node="node1">
+ <primitive id="{res_name}" />
+ </rsc_op>
+ </action_set>
+ </synapse>
+ """)
+ index += 1
+ transitions_xml = (
+ "<transition_graph>" + "\n".join(synapses) + "</transition_graph>"
+ )
+
+ self.tmpfile_transitions.read.return_value = transitions_xml
+ mock_write_tmpfile.side_effect = [
+ self.tmpfile_new_cib, self.tmpfile_transitions,
+ AssertionError("No other write_tmpfile call expected")
+ ]
+ (self.config
+ .runner.cib.load(resources=cib_xml)
+ .runner.pcmk.load_state(resources=status_xml)
+ )
+ self.config.runner.pcmk.simulate_cib(
+ self.tmpfile_new_cib.name,
+ self.tmpfile_transitions.name,
+ stdout="simulate output",
+ resources="""
+ <resources>
+ <primitive id="A" />
+ <clone id="B-clone">
+ <meta_attributes id="B-clone-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="B-clone-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <primitive id="B" />
+ </clone>
+ <master id="C-master">
+ <meta_attributes id="C-master-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="C-master-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <primitive id="C" />
+ </master>
+ <group id="D">
+ <meta_attributes id="D-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="D-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <primitive id="D1" />
+ <primitive id="D2" />
+ </group>
+ <clone id="E-clone">
+ <meta_attributes id="E-clone-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="E-clone-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <group id="E">
+ <primitive id="E1" />
+ <primitive id="E2" />
+ </group>
+ </clone>
+ <master id="F-master">
+ <meta_attributes id="F-master-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="F-master-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <group id="F">
+ <primitive id="F1" />
+ <primitive id="F2" />
+ </group>
+ </master>
+ <bundle id="G-bundle" />
+ <bundle id="H-bundle">
+ <meta_attributes id="H-bundle-meta_attributes">
+ <nvpair name="target-role" value="Stopped"
+ id="H-bundle-meta_attributes-target-role"
+ />
+ </meta_attributes>
+ <primitive id="H" />
+ </bundle>
+ </resources>
+ """
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable_safe(
+ self.env_assist.get_env(),
+ ["B-clone", "C-master", "D", "E-clone", "F-master", "H-bundle"],
+ self.strict,
+ False,
+ ),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES,
+ disabled_resource_list=[
+ "B-clone", "C-master", "D", "E-clone", "F-master",
+ "H-bundle"
+ ],
+ affected_resource_list=["A"],
+ crm_simulate_plaintext_output="simulate output",
+ ),
+ ],
+ expected_in_processor=False
+ )
+
@mock.patch("pcs.lib.pacemaker.live.write_tmpfile")
class DisableSafe(DisableSafeMixin, TestCase):
strict = False
diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py
index dfebcb17..1ea5454e 100644
--- a/pcs_test/tier0/lib/pacemaker/test_live.py
+++ b/pcs_test/tier0/lib/pacemaker/test_live.py
@@ -686,7 +686,6 @@ class SimulateCibXml(LibraryPacemakerTest):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some error",
- cib="<cib />",
),
)
mock_runner.run.assert_not_called()
@@ -703,7 +702,6 @@ class SimulateCibXml(LibraryPacemakerTest):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some error",
- cib="<cib />",
),
)
mock_runner.run.assert_not_called()
@@ -729,7 +727,6 @@ class SimulateCibXml(LibraryPacemakerTest):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some error",
- cib="<cib />",
),
)
@@ -755,7 +752,6 @@ class SimulateCibXml(LibraryPacemakerTest):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some error",
- cib="<cib />",
),
)
@@ -782,7 +778,6 @@ class SimulateCibXml(LibraryPacemakerTest):
fixture.error(
report_codes.CIB_SIMULATE_ERROR,
reason="some error",
- cib="<cib />",
),
)
@@ -819,7 +814,6 @@ class SimulateCib(TestCase):
"Start tag expected, '<' not found, line 1, column 1 "
"(<string>, line 1)"
),
- cib=self.cib_xml,
),
)
@@ -835,7 +829,6 @@ class SimulateCib(TestCase):
"Start tag expected, '<' not found, line 1, column 1 "
"(<string>, line 1)"
),
- cib=self.cib_xml,
),
)
--
2.21.1

View File

@ -1,533 +0,0 @@
From 770252b476bc342ea08da2bc5b83de713463d14a Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Thu, 12 Mar 2020 15:32:31 +0100
Subject: [PATCH 1/2] send request from python to ruby more directly
Rack protection middleware is launched before
TornadoCommunicationMiddleware. When request parts are unpacked in
TornadoCommunicationMiddleware they are not checked by rack protection.
This commit changes communication between python and ruby - request is
sent to ruby more directly (without need to unpack request in sinatra
middleware).
---
pcs/daemon/ruby_pcsd.py | 217 ++++++++++++++--------
pcs_test/tier0/daemon/app/fixtures_app.py | 7 +-
pcs_test/tier0/daemon/test_ruby_pcsd.py | 61 ++----
pcsd/rserver.rb | 39 ++--
4 files changed, 175 insertions(+), 149 deletions(-)
diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py
index e612f8da..53c53eaf 100644
--- a/pcs/daemon/ruby_pcsd.py
+++ b/pcs/daemon/ruby_pcsd.py
@@ -7,8 +7,8 @@ from time import time as now
import pycurl
from tornado.gen import convert_yielded
from tornado.web import HTTPError
-from tornado.httputil import split_host_and_port, HTTPServerRequest
-from tornado.httpclient import AsyncHTTPClient
+from tornado.httputil import HTTPServerRequest, HTTPHeaders
+from tornado.httpclient import AsyncHTTPClient, HTTPClientError
from tornado.curl_httpclient import CurlError
@@ -29,6 +29,11 @@ RUBY_LOG_LEVEL_MAP = {
"DEBUG": logging.DEBUG,
}
+__id_dict = {"id": 0}
+def get_request_id():
+ __id_dict["id"] += 1
+ return __id_dict["id"]
+
class SinatraResult(namedtuple("SinatraResult", "headers, status, body")):
@classmethod
def from_response(cls, response):
@@ -60,6 +65,59 @@ def process_response_logs(rb_log_list):
group_id=group_id
)
+class RubyDaemonRequest(namedtuple(
+ "RubyDaemonRequest",
+ "request_type, path, query, headers, method, body"
+)):
+ def __new__(
+ cls,
+ request_type,
+ http_request: HTTPServerRequest = None,
+ payload=None,
+ ):
+ headers = http_request.headers if http_request else HTTPHeaders()
+ headers.add("X-Pcsd-Type", request_type)
+ if payload:
+ headers.add(
+ "X-Pcsd-Payload",
+ b64encode(json.dumps(payload).encode()).decode()
+ )
+ return super(RubyDaemonRequest, cls).__new__(
+ cls,
+ request_type,
+ http_request.path if http_request else "",
+ http_request.query if http_request else "",
+ headers,
+ http_request.method if http_request else "GET",
+ http_request.body if http_request else None,
+ )
+
+ @property
+ def url(self):
+ # We do not need location for communication with ruby itself since we
+ # communicate via unix socket. But it is required by AsyncHTTPClient so
+ # "localhost" is used.
+ query = f"?{self.query}" if self.query else ""
+ return f"localhost/{self.path}{query}"
+
+ @property
+ def is_get(self):
+ return self.method.upper() == "GET"
+
+ @property
+ def has_http_request_detail(self):
+ return self.path or self.query or self.method != "GET" or self.body
+
+def log_ruby_daemon_request(label, request: RubyDaemonRequest):
+ log.pcsd.debug("%s type: '%s'", label, request.request_type)
+ if request.has_http_request_detail:
+ log.pcsd.debug("%s path: '%s'", label, request.path)
+ if request.query:
+ log.pcsd.debug("%s query: '%s'", label, request.query)
+ log.pcsd.debug("%s method: '%s'", label, request.method)
+ if request.body:
+ log.pcsd.debug("%s body: '%s'", label, request.body)
+
class Wrapper:
def __init__(self, pcsd_ruby_socket, debug=False):
self.__debug = debug
@@ -67,74 +125,87 @@ class Wrapper:
self.__client = AsyncHTTPClient()
self.__pcsd_ruby_socket = pcsd_ruby_socket
- @staticmethod
- def get_sinatra_request(request: HTTPServerRequest):
- host, port = split_host_and_port(request.host)
- return {"env": {
- "PATH_INFO": request.path,
- "QUERY_STRING": request.query,
- "REMOTE_ADDR": request.remote_ip,
- "REMOTE_HOST": request.host,
- "REQUEST_METHOD": request.method,
- "REQUEST_URI": f"{request.protocol}://{request.host}{request.uri}",
- "SCRIPT_NAME": "",
- "SERVER_NAME": host,
- "SERVER_PORT": port,
- "SERVER_PROTOCOL": request.version,
- "HTTP_HOST": request.host,
- "HTTP_ACCEPT": "*/*",
- "HTTP_COOKIE": ";".join([
- v.OutputString() for v in request.cookies.values()
- ]),
- "HTTPS": "on" if request.protocol == "https" else "off",
- "HTTP_VERSION": request.version,
- "REQUEST_PATH": request.path,
- "rack.input": request.body.decode("utf8"),
- }}
-
def prepare_curl_callback(self, curl):
curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket)
curl.setopt(pycurl.TIMEOUT, 70)
- async def send_to_ruby(self, request_json):
- # We do not need location for communication with ruby itself since we
- # communicate via unix socket. But it is required by AsyncHTTPClient so
- # "localhost" is used.
- tornado_request = b64encode(request_json.encode()).decode()
- return (await self.__client.fetch(
- "localhost",
- method="POST",
- body=f"TORNADO_REQUEST={tornado_request}",
- prepare_curl_callback=self.prepare_curl_callback,
- )).body
-
- async def run_ruby(self, request_type, request=None):
- """
- request_type: SINATRA_GUI|SINATRA_REMOTE|SYNC_CONFIGS
- request: result of get_sinatra_request|None
- i.e. it has structure returned by get_sinatra_request if the request
- is not None - so we can get SERVER_NAME and SERVER_PORT
- """
- request = request or {}
- request.update({"type": request_type})
- request_json = json.dumps(request)
-
- if self.__debug:
- log.pcsd.debug("Ruby daemon request: '%s'", request_json)
+ async def send_to_ruby(self, request: RubyDaemonRequest):
try:
- ruby_response = await self.send_to_ruby(request_json)
+ return (await self.__client.fetch(
+ request.url,
+ headers=request.headers,
+ method=request.method,
+ # Tornado enforces body=None for GET method:
+ # Even with `allow_nonstandard_methods` we disallow GET with a
+ # body (because libcurl doesn't allow it unless we use
+ # CUSTOMREQUEST). While the spec doesn't forbid clients from
+ # sending a body, it arguably disallows the server from doing
+ # anything with them.
+ body=(request.body if not request.is_get else None),
+ prepare_curl_callback=self.prepare_curl_callback,
+ )).body
except CurlError as e:
+ # This error we can get e.g. when ruby daemon is down.
log.pcsd.error(
"Cannot connect to ruby daemon (message: '%s'). Is it running?",
e
)
raise HTTPError(500)
+ except HTTPClientError as e:
+ # This error we can get e.g. when rack protection raises exception.
+ log.pcsd.error(
+ (
+ "Got error from ruby daemon (message: '%s')."
+ " Try checking system logs (e.g. journal, systemctl status"
+ " pcsd.service) for more information.."
+ ),
+ e
+ )
+ raise HTTPError(500)
+
+ async def run_ruby(
+ self,
+ request_type,
+ http_request: HTTPServerRequest = None,
+ payload=None,
+ ):
+ request = RubyDaemonRequest(request_type, http_request, payload)
+ request_id = get_request_id()
+
+ def log_request():
+ log_ruby_daemon_request(
+ f"Ruby daemon request (id: {request_id})",
+ request,
+ )
+
+ if self.__debug:
+ log_request()
+
+ return self.process_ruby_response(
+ f"Ruby daemon response (id: {request_id})",
+ log_request,
+ await self.send_to_ruby(request),
+ )
+
+ def process_ruby_response(self, label, log_request, ruby_response):
+ """
+ Return relevant part of unpacked ruby response. As a side effect
+ relevant logs are writen.
+ string label -- is used as a log prefix
+ callable log_request -- is used to log request when some errors happen;
+ we want to log request before error even if there is not debug mode
+ string ruby_response -- body of response from ruby; it should contain
+ json with dictionary with response specific keys
+ """
try:
response = json.loads(ruby_response)
if "error" in response:
+ if not self.__debug:
+ log_request()
log.pcsd.error(
- "Ruby daemon response contains an error: '%s'",
+ "%s contains an error: '%s'",
+ label,
json.dumps(response)
)
raise HTTPError(500)
@@ -144,56 +215,52 @@ class Wrapper:
body = b64decode(response.pop("body"))
if self.__debug:
log.pcsd.debug(
- "Ruby daemon response (without logs and body): '%s'",
+ "%s (without logs and body): '%s'",
+ label,
json.dumps(response)
)
- log.pcsd.debug("Ruby daemon response body: '%s'", body)
+ log.pcsd.debug("%s body: '%s'", label, body)
response["body"] = body
elif self.__debug:
log.pcsd.debug(
- "Ruby daemon response (without logs): '%s'",
+ "%s (without logs): '%s'",
+ label,
json.dumps(response)
)
process_response_logs(logs)
return response
except (json.JSONDecodeError, binascii.Error) as e:
if self.__debug:
- log.pcsd.debug("Ruby daemon response: '%s'", ruby_response)
+ log.pcsd.debug("%s: '%s'", label, ruby_response)
+ else:
+ log_request()
+
log.pcsd.error("Cannot decode json from ruby pcsd wrapper: '%s'", e)
raise HTTPError(500)
async def request_gui(
self, request: HTTPServerRequest, user, groups, is_authenticated
) -> SinatraResult:
- sinatra_request = self.get_sinatra_request(request)
# Sessions handling was removed from ruby. However, some session
# information is needed for ruby code (e.g. rendering some parts of
# templates). So this information must be sent to ruby by another way.
- sinatra_request.update({
- "session": {
+ return SinatraResult.from_response(
+ await convert_yielded(self.run_ruby(SINATRA_GUI, request, {
"username": user,
"groups": groups,
"is_authenticated": is_authenticated,
- }
- })
- response = await convert_yielded(self.run_ruby(
- SINATRA_GUI,
- sinatra_request
- ))
- return SinatraResult.from_response(response)
+ }))
+ )
async def request_remote(self, request: HTTPServerRequest) -> SinatraResult:
- response = await convert_yielded(self.run_ruby(
- SINATRA_REMOTE,
- self.get_sinatra_request(request)
- ))
- return SinatraResult.from_response(response)
+ return SinatraResult.from_response(
+ await convert_yielded(self.run_ruby(SINATRA_REMOTE, request))
+ )
async def sync_configs(self):
try:
- response = await convert_yielded(self.run_ruby(SYNC_CONFIGS))
- return response["next"]
+ return (await convert_yielded(self.run_ruby(SYNC_CONFIGS)))["next"]
except HTTPError:
log.pcsd.error("Config synchronization failed")
return int(now()) + DEFAULT_SYNC_CONFIG_DELAY
diff --git a/pcs_test/tier0/daemon/app/fixtures_app.py b/pcs_test/tier0/daemon/app/fixtures_app.py
index 8d5b8f4c..590203b4 100644
--- a/pcs_test/tier0/daemon/app/fixtures_app.py
+++ b/pcs_test/tier0/daemon/app/fixtures_app.py
@@ -20,7 +20,12 @@ class RubyPcsdWrapper(ruby_pcsd.Wrapper):
self.headers = {"Some": "value"}
self.body = b"Success action"
- async def run_ruby(self, request_type, request=None):
+ async def run_ruby(
+ self,
+ request_type,
+ http_request=None,
+ payload=None,
+ ):
if request_type != self.request_type:
raise AssertionError(
f"Wrong request type: expected '{self.request_type}'"
diff --git a/pcs_test/tier0/daemon/test_ruby_pcsd.py b/pcs_test/tier0/daemon/test_ruby_pcsd.py
index 28f14c87..32eb74cc 100644
--- a/pcs_test/tier0/daemon/test_ruby_pcsd.py
+++ b/pcs_test/tier0/daemon/test_ruby_pcsd.py
@@ -4,7 +4,7 @@ from base64 import b64encode
from unittest import TestCase, mock
from urllib.parse import urlencode
-from tornado.httputil import HTTPServerRequest
+from tornado.httputil import HTTPServerRequest, HTTPHeaders
from tornado.testing import AsyncTestCase, gen_test
from tornado.web import HTTPError
@@ -22,46 +22,17 @@ def create_http_request():
return HTTPServerRequest(
method="POST",
uri="/pcsd/uri",
- headers={"Cookie": "cookie1=first;cookie2=second"},
+ headers=HTTPHeaders({"Cookie": "cookie1=first;cookie2=second"}),
body=str.encode(urlencode({"post-key": "post-value"})),
host="pcsd-host:2224"
)
-class GetSinatraRequest(TestCase):
- def test_translate_request(self):
- # pylint: disable=invalid-name
- self.maxDiff = None
- self.assertEqual(
- create_wrapper().get_sinatra_request(create_http_request()),
- {
- 'env': {
- 'HTTPS': 'off',
- 'HTTP_ACCEPT': '*/*',
- 'HTTP_COOKIE': 'cookie1=first;cookie2=second',
- 'HTTP_HOST': 'pcsd-host:2224',
- 'HTTP_VERSION': 'HTTP/1.0',
- 'PATH_INFO': '/pcsd/uri',
- 'QUERY_STRING': '',
- 'REMOTE_ADDR': None, # It requires complicated request args
- 'REMOTE_HOST': 'pcsd-host:2224',
- 'REQUEST_METHOD': 'POST',
- 'REQUEST_PATH': '/pcsd/uri',
- 'REQUEST_URI': 'http://pcsd-host:2224/pcsd/uri',
- 'SCRIPT_NAME': '',
- 'SERVER_NAME': 'pcsd-host',
- 'SERVER_PORT': 2224,
- 'SERVER_PROTOCOL': 'HTTP/1.0',
- 'rack.input': 'post-key=post-value'
- }
- }
- )
-
patch_ruby_pcsd = create_patcher(ruby_pcsd)
class RunRuby(AsyncTestCase):
def setUp(self):
self.ruby_response = ""
- self.request = self.create_request()
+ self.request = ruby_pcsd.RubyDaemonRequest(ruby_pcsd.SYNC_CONFIGS)
self.wrapper = create_wrapper()
patcher = mock.patch.object(
self.wrapper,
@@ -72,14 +43,10 @@ class RunRuby(AsyncTestCase):
patcher.start()
super().setUp()
- async def send_to_ruby(self, request_json):
- self.assertEqual(json.loads(request_json), self.request)
+ async def send_to_ruby(self, ruby_request):
+ self.assertEqual(ruby_request, self.request)
return self.ruby_response
- @staticmethod
- def create_request(_type=ruby_pcsd.SYNC_CONFIGS):
- return {"type": _type}
-
def set_run_result(self, run_result):
self.ruby_response = json.dumps({**run_result, "logs": []})
@@ -125,10 +92,10 @@ class RunRuby(AsyncTestCase):
"body": b64encode(str.encode(body)).decode(),
})
http_request = create_http_request()
- self.request = {
- **self.create_request(ruby_pcsd.SINATRA_REMOTE),
- **self.wrapper.get_sinatra_request(http_request),
- }
+ self.request = ruby_pcsd.RubyDaemonRequest(
+ ruby_pcsd.SINATRA_REMOTE,
+ http_request,
+ )
result = yield self.wrapper.request_remote(http_request)
self.assert_sinatra_result(result, headers, status, body)
@@ -148,15 +115,15 @@ class RunRuby(AsyncTestCase):
"body": b64encode(str.encode(body)).decode(),
})
http_request = create_http_request()
- self.request = {
- **self.create_request(ruby_pcsd.SINATRA_GUI),
- **self.wrapper.get_sinatra_request(http_request),
- "session": {
+ self.request = ruby_pcsd.RubyDaemonRequest(
+ ruby_pcsd.SINATRA_GUI,
+ http_request,
+ {
"username": user,
"groups": groups,
"is_authenticated": is_authenticated,
}
- }
+ )
result = yield self.wrapper.request_gui(
http_request,
user=user,
diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
index 6002a73c..4b58f252 100644
--- a/pcsd/rserver.rb
+++ b/pcsd/rserver.rb
@@ -11,42 +11,25 @@ def pack_response(response)
return [200, {}, [response.to_json.to_str]]
end
-def unpack_request(transport_env)
- return JSON.parse(Base64.strict_decode64(
- transport_env["rack.request.form_hash"]["TORNADO_REQUEST"]
- ))
-end
-
class TornadoCommunicationMiddleware
def initialize(app)
@app = app
end
- def call(transport_env)
+ def call(env)
Thread.current[:pcsd_logger_container] = []
begin
- request = unpack_request(transport_env)
+ type = env["HTTP_X_PCSD_TYPE"]
- if ["sinatra_gui", "sinatra_remote"].include?(request["type"])
- if request["type"] == "sinatra_gui"
- session = request["session"]
+ if ["sinatra_gui", "sinatra_remote"].include?(type)
+ if type == "sinatra_gui"
+ session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"]))
Thread.current[:tornado_username] = session["username"]
Thread.current[:tornado_groups] = session["groups"]
Thread.current[:tornado_is_authenticated] = session["is_authenticated"]
end
- # Keys rack.input and rack.errors are required. We make sure they are
- # there.
- request_env = request["env"]
- request_env["rack.input"] = StringIO.new(request_env["rack.input"])
- request_env["rack.errors"] = StringIO.new()
-
- status, headers, body = @app.call(request_env)
-
- rack_errors = request_env['rack.errors'].string()
- if not rack_errors.empty?()
- $logger.error(rack_errors)
- end
+ status, headers, body = @app.call(env)
return pack_response({
:status => status,
@@ -56,16 +39,20 @@ class TornadoCommunicationMiddleware
})
end
- if request["type"] == "sync_configs"
+ if type == "sync_configs"
return pack_response({
:next => Time.now.to_i + run_cfgsync(),
:logs => Thread.current[:pcsd_logger_container],
})
end
- raise "Unexpected value for key 'type': '#{request['type']}'"
+ return pack_response({
+ :error => "Unexpected value for key 'type': '#{type}'"
+ })
rescue => e
- return pack_response({:error => "Processing request error: '#{e}'"})
+ return pack_response({
+ :error => "Processing request error: '#{e}' '#{e.backtrace}'"
+ })
end
end
end
--
2.21.1

View File

@ -1,367 +0,0 @@
From 9fbeeed4e43dc37800de3c3f0cf6f7520dc31ccf Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Mon, 20 Jan 2020 12:34:55 +0100
Subject: [PATCH] tests: update for pacemaker-2.0.3-4
---
pcs_test/tier0/test_resource.py | 59 +++++++++++++-------------
pcs_test/tier0/test_stonith.py | 75 +++++++++++++++++----------------
pcs_test/tools/assertions.py | 24 +++++++++--
3 files changed, 88 insertions(+), 70 deletions(-)
diff --git a/pcs_test/tier0/test_resource.py b/pcs_test/tier0/test_resource.py
index b8b85dd2..45d98dff 100644
--- a/pcs_test/tier0/test_resource.py
+++ b/pcs_test/tier0/test_resource.py
@@ -10,6 +10,7 @@ from pcs_test.tier0.cib_resource.common import ResourceTest
from pcs_test.tools.assertions import (
ac,
AssertPcsMixin,
+ assert_pcs_status,
)
from pcs_test.tools.bin_mock import get_mock_settings
from pcs_test.tools.cib import get_assert_pcs_effect_mixin
@@ -953,11 +954,11 @@ monitor interval=20 (A-monitor-interval-20)
o,r = pcs(temp_cib, "resource status")
assert r == 0
if PCMK_2_0_3_PLUS:
- ac(o,"""\
+ assert_pcs_status(o,"""\
* Resource Group: AGroup:
- * A1\t(ocf::heartbeat:Dummy):\t Stopped
- * A2\t(ocf::heartbeat:Dummy):\t Stopped
- * A3\t(ocf::heartbeat:Dummy):\t Stopped
+ * A1\t(ocf::heartbeat:Dummy):\tStopped
+ * A2\t(ocf::heartbeat:Dummy):\tStopped
+ * A3\t(ocf::heartbeat:Dummy):\tStopped
""")
else:
ac(o,"""\
@@ -1208,19 +1209,19 @@ monitor interval=20 (A-monitor-interval-20)
output, returnVal = pcs(temp_cib, "resource")
assert returnVal == 0
if PCMK_2_0_3_PLUS:
- ac(output, """\
- * F\t(ocf::heartbeat:Dummy):\t Stopped
- * G\t(ocf::heartbeat:Dummy):\t Stopped
- * H\t(ocf::heartbeat:Dummy):\t Stopped
+ assert_pcs_status(output, """\
+ * F\t(ocf::heartbeat:Dummy):\tStopped
+ * G\t(ocf::heartbeat:Dummy):\tStopped
+ * H\t(ocf::heartbeat:Dummy):\tStopped
* Resource Group: RGA:
- * A\t(ocf::heartbeat:Dummy):\t Stopped
- * B\t(ocf::heartbeat:Dummy):\t Stopped
- * C\t(ocf::heartbeat:Dummy):\t Stopped
- * E\t(ocf::heartbeat:Dummy):\t Stopped
- * D\t(ocf::heartbeat:Dummy):\t Stopped
- * K\t(ocf::heartbeat:Dummy):\t Stopped
- * J\t(ocf::heartbeat:Dummy):\t Stopped
- * I\t(ocf::heartbeat:Dummy):\t Stopped
+ * A\t(ocf::heartbeat:Dummy):\tStopped
+ * B\t(ocf::heartbeat:Dummy):\tStopped
+ * C\t(ocf::heartbeat:Dummy):\tStopped
+ * E\t(ocf::heartbeat:Dummy):\tStopped
+ * D\t(ocf::heartbeat:Dummy):\tStopped
+ * K\t(ocf::heartbeat:Dummy):\tStopped
+ * J\t(ocf::heartbeat:Dummy):\tStopped
+ * I\t(ocf::heartbeat:Dummy):\tStopped
""")
else:
ac(output, """\
@@ -2004,9 +2005,9 @@ monitor interval=20 (A-monitor-interval-20)
o,r = pcs(temp_cib, "resource")
if PCMK_2_0_3_PLUS:
- ac(o,"""\
+ assert_pcs_status(o,"""\
* Resource Group: AG:
- * D1\t(ocf::heartbeat:Dummy):\t Stopped
+ * D1\t(ocf::heartbeat:Dummy):\tStopped
* Clone Set: D0-clone [D0]:
""")
else:
@@ -2348,10 +2349,10 @@ monitor interval=20 (A-monitor-interval-20)
o,r = pcs(temp_cib, "resource status")
assert r == 0
if PCMK_2_0_3_PLUS:
- ac(o,"""\
+ assert_pcs_status(o,"""\
* Resource Group: DGroup:
- * D1\t(ocf::heartbeat:Dummy):\t Stopped
- * D2\t(ocf::heartbeat:Dummy):\t Stopped
+ * D1\t(ocf::heartbeat:Dummy):\tStopped
+ * D2\t(ocf::heartbeat:Dummy):\tStopped
""")
else:
ac(o,"""\
@@ -3560,12 +3561,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
assert retVal == 0
output, retVal = pcs(temp_cib, "resource status")
if PCMK_2_0_3_PLUS:
- ac(output, outdent(
+ assert_pcs_status(output, outdent(
"""\
* Resource Group: dummies:
- * dummy1\t(ocf::heartbeat:Dummy):\t Stopped
- * dummy2\t(ocf::heartbeat:Dummy):\t Stopped
- * dummy3\t(ocf::heartbeat:Dummy):\t Stopped
+ * dummy1\t(ocf::heartbeat:Dummy):\tStopped
+ * dummy2\t(ocf::heartbeat:Dummy):\tStopped
+ * dummy3\t(ocf::heartbeat:Dummy):\tStopped
"""
))
else:
@@ -3652,12 +3653,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
assert retVal == 0
output, retVal = pcs(temp_cib, "resource status")
if PCMK_2_0_3_PLUS:
- ac(output, outdent(
+ assert_pcs_status(output, outdent(
"""\
* Resource Group: dummies:
- * dummy1\t(ocf::heartbeat:Dummy):\t Stopped
- * dummy2\t(ocf::heartbeat:Dummy):\t Stopped
- * dummy3\t(ocf::heartbeat:Dummy):\t Stopped
+ * dummy1\t(ocf::heartbeat:Dummy):\tStopped
+ * dummy2\t(ocf::heartbeat:Dummy):\tStopped
+ * dummy3\t(ocf::heartbeat:Dummy):\tStopped
"""
))
else:
diff --git a/pcs_test/tier0/test_stonith.py b/pcs_test/tier0/test_stonith.py
index 46938e75..097a79b9 100644
--- a/pcs_test/tier0/test_stonith.py
+++ b/pcs_test/tier0/test_stonith.py
@@ -517,13 +517,13 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
- * n1-apc1\t(stonith:fence_apc):\t Stopped
- * n1-apc2\t(stonith:fence_apc):\t Stopped
- * n2-apc1\t(stonith:fence_apc):\t Stopped
- * n2-apc2\t(stonith:fence_apc):\t Stopped
- * n2-apc3\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
+ * n1-apc1\t(stonith:fence_apc):\tStopped
+ * n1-apc2\t(stonith:fence_apc):\tStopped
+ * n2-apc1\t(stonith:fence_apc):\tStopped
+ * n2-apc2\t(stonith:fence_apc):\tStopped
+ * n2-apc3\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Level 2 - n1-apc1,n1-apc2,n2-apc2
@@ -531,7 +531,7 @@ class StonithTest(TestCase, AssertPcsMixin):
Level 1 - n2-ipmi
Level 2 - n2-apc1,n2-apc2,n2-apc3
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -559,12 +559,12 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
- * n1-apc1\t(stonith:fence_apc):\t Stopped
- * n1-apc2\t(stonith:fence_apc):\t Stopped
- * n2-apc1\t(stonith:fence_apc):\t Stopped
- * n2-apc3\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
+ * n1-apc1\t(stonith:fence_apc):\tStopped
+ * n1-apc2\t(stonith:fence_apc):\tStopped
+ * n2-apc1\t(stonith:fence_apc):\tStopped
+ * n2-apc3\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Level 2 - n1-apc1,n1-apc2
@@ -572,7 +572,7 @@ class StonithTest(TestCase, AssertPcsMixin):
Level 1 - n2-ipmi
Level 2 - n2-apc1,n2-apc3
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -599,11 +599,11 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
- * n1-apc1\t(stonith:fence_apc):\t Stopped
- * n1-apc2\t(stonith:fence_apc):\t Stopped
- * n2-apc3\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
+ * n1-apc1\t(stonith:fence_apc):\tStopped
+ * n1-apc2\t(stonith:fence_apc):\tStopped
+ * n2-apc3\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Level 2 - n1-apc1,n1-apc2
@@ -611,7 +611,7 @@ class StonithTest(TestCase, AssertPcsMixin):
Level 1 - n2-ipmi
Level 2 - n2-apc3
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -637,17 +637,17 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
- * n1-apc1\t(stonith:fence_apc):\t Stopped
- * n1-apc2\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
+ * n1-apc1\t(stonith:fence_apc):\tStopped
+ * n1-apc2\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Level 2 - n1-apc1,n1-apc2
Target: rh7-2
Level 1 - n2-ipmi
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -671,16 +671,16 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
- * n1-apc2\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
+ * n1-apc2\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Level 2 - n1-apc2
Target: rh7-2
Level 1 - n2-ipmi
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -704,14 +704,14 @@ class StonithTest(TestCase, AssertPcsMixin):
if PCMK_2_0_3_PLUS:
self.assert_pcs_success("stonith", outdent(
"""\
- * n1-ipmi\t(stonith:fence_apc):\t Stopped
- * n2-ipmi\t(stonith:fence_apc):\t Stopped
+ * n1-ipmi\t(stonith:fence_apc):\tStopped
+ * n2-ipmi\t(stonith:fence_apc):\tStopped
Target: rh7-1
Level 1 - n1-ipmi
Target: rh7-2
Level 1 - n2-ipmi
"""
- ))
+ ), despace=True)
else:
self.assert_pcs_success("stonith", outdent(
"""\
@@ -1219,9 +1219,9 @@ class LevelConfig(LevelTestsBase):
if PCMK_2_0_3_PLUS:
result = outdent(
"""\
- * F1\t(stonith:fence_apc):\t Stopped
- * F2\t(stonith:fence_apc):\t Stopped
- * F3\t(stonith:fence_apc):\t Stopped
+ * F1\t(stonith:fence_apc):\tStopped
+ * F2\t(stonith:fence_apc):\tStopped
+ * F3\t(stonith:fence_apc):\tStopped
"""
)
else:
@@ -1234,7 +1234,8 @@ class LevelConfig(LevelTestsBase):
)
self.assert_pcs_success(
"stonith",
- result + "\n".join(indent(self.config_lines, 1)) + "\n"
+ result + "\n".join(indent(self.config_lines, 1)) + "\n",
+ despace=True
)
self.pcs_runner.mock_settings["corosync_conf_file"] = rc(
"corosync.conf"
diff --git a/pcs_test/tools/assertions.py b/pcs_test/tools/assertions.py
index db8f4df5..a2b7b4ac 100644
--- a/pcs_test/tools/assertions.py
+++ b/pcs_test/tools/assertions.py
@@ -59,7 +59,8 @@ class AssertPcsMixin:
)
def assert_pcs_success(
- self, command, stdout_full=None, stdout_start=None, stdout_regexp=None
+ self, command, stdout_full=None, stdout_start=None, stdout_regexp=None,
+ despace=False
):
full = stdout_full
if (
@@ -75,7 +76,8 @@ class AssertPcsMixin:
stdout_full=full,
stdout_start=stdout_start,
stdout_regexp=stdout_regexp,
- returncode=0
+ returncode=0,
+ despace=despace,
)
def assert_pcs_fail(
@@ -99,7 +101,7 @@ class AssertPcsMixin:
def assert_pcs_result(
self, command, stdout_full=None, stdout_start=None, stdout_regexp=None,
- returncode=0
+ returncode=0, despace=False
):
msg = (
"Please specify exactly one: stdout_start or stdout_full or"
@@ -162,7 +164,11 @@ class AssertPcsMixin:
)
else:
expected_full = self.__prepare_output(stdout_full)
- if stdout != expected_full:
+ if (
+ (despace and _despace(stdout) != _despace(expected_full))
+ or
+ (not despace and stdout != expected_full)
+ ):
self.assertEqual(
stdout,
expected_full,
@@ -386,3 +392,13 @@ def __report_item_equal(real_report_item, report_item_info):
)
)
)
+
+def assert_pcs_status(status1, status2):
+ if _despace(status1) != _despace(status2):
+ raise AssertionError(
+ "strings not equal:\n{0}".format(prepare_diff(status1, status2))
+ )
+
+def _despace(string):
+ # ignore whitespace changes between various pacemaker versions
+ return re.sub(r"[ \t]+", " ", string)
--
2.20.1

View File

@ -1,541 +0,0 @@
From ac0305a8b6bb040ef06dcbfff309c91321400d44 Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Mon, 27 Jan 2020 17:05:42 +0100
Subject: [PATCH 3/7] fix detecting fence history support
---
pcs/lib/commands/stonith.py | 38 ++++++++------
pcs/lib/pacemaker/live.py | 45 +++++++++-------
.../crm_mon.rng.with_fence_history.xml | 13 -----
.../crm_mon.rng.without_fence_history.xml | 9 ----
pcs_test/tier0/lib/commands/test_status.py | 35 +++----------
.../lib/commands/test_stonith_history.py | 52 ++++++-------------
pcs_test/tier0/lib/pacemaker/test_live.py | 31 ++++++++++-
.../tools/command_env/config_runner_pcmk.py | 41 +++++++++++++++
pcs_test/tools/command_env/mock_runner.py | 1 +
9 files changed, 141 insertions(+), 124 deletions(-)
delete mode 100644 pcs_test/resources/crm_mon.rng.with_fence_history.xml
delete mode 100644 pcs_test/resources/crm_mon.rng.without_fence_history.xml
diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
index c0849a54..ff87c852 100644
--- a/pcs/lib/commands/stonith.py
+++ b/pcs/lib/commands/stonith.py
@@ -1,3 +1,7 @@
+from typing import (
+ Optional,
+)
+
from pcs.lib import reports
from pcs.lib.cib import resource
from pcs.lib.cib.resource.common import are_meta_disabled
@@ -6,13 +10,14 @@ from pcs.lib.commands.resource import (
_ensure_disabled_after_wait,
resource_environment
)
+from pcs.lib.env import LibraryEnvironment
from pcs.lib.errors import LibraryError
from pcs.lib.pacemaker.live import (
FenceHistoryCommandErrorException,
fence_history_cleanup,
fence_history_text,
fence_history_update,
- is_fence_history_supported,
+ is_fence_history_supported_management,
)
from pcs.lib.pacemaker.values import validate_id
from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
@@ -162,51 +167,54 @@ def create_in_group(
put_after_adjacent,
)
-def history_get_text(env, node=None):
+def history_get_text(env: LibraryEnvironment, node: Optional[str] = None):
"""
Get full fencing history in plain text
- LibraryEnvironment env
- string node -- get history for the specified node or all nodes if None
+ env
+ node -- get history for the specified node or all nodes if None
"""
- if not is_fence_history_supported():
+ runner = env.cmd_runner()
+ if not is_fence_history_supported_management(runner):
raise LibraryError(reports.fence_history_not_supported())
try:
- return fence_history_text(env.cmd_runner(), node)
+ return fence_history_text(runner, node)
except FenceHistoryCommandErrorException as e:
raise LibraryError(
reports.fence_history_command_error(str(e), "show")
)
-def history_cleanup(env, node=None):
+def history_cleanup(env: LibraryEnvironment, node: Optional[str] = None):
"""
Clear fencing history
- LibraryEnvironment env
- string node -- clear history for the specified node or all nodes if None
+ env
+ node -- clear history for the specified node or all nodes if None
"""
- if not is_fence_history_supported():
+ runner = env.cmd_runner()
+ if not is_fence_history_supported_management(runner):
raise LibraryError(reports.fence_history_not_supported())
try:
- return fence_history_cleanup(env.cmd_runner(), node)
+ return fence_history_cleanup(runner, node)
except FenceHistoryCommandErrorException as e:
raise LibraryError(
reports.fence_history_command_error(str(e), "cleanup")
)
-def history_update(env):
+def history_update(env: LibraryEnvironment):
"""
Update fencing history in a cluster (sync with other nodes)
- LibraryEnvironment env
+ env
"""
- if not is_fence_history_supported():
+ runner = env.cmd_runner()
+ if not is_fence_history_supported_management(runner):
raise LibraryError(reports.fence_history_not_supported())
try:
- return fence_history_update(env.cmd_runner())
+ return fence_history_update(runner)
except FenceHistoryCommandErrorException as e:
raise LibraryError(
reports.fence_history_command_error(str(e), "update")
diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
index 233f2e2d..d6741441 100644
--- a/pcs/lib/pacemaker/live.py
+++ b/pcs/lib/pacemaker/live.py
@@ -1,6 +1,7 @@
import os.path
import re
from typing import (
+ Iterable,
List,
Tuple,
)
@@ -56,7 +57,7 @@ def get_cluster_status_text(
cmd.extend(["--show-detail", "--show-node-attributes", "--failcounts"])
# by default, pending and failed actions are displayed
# with verbose==True, we display the whole history
- if is_fence_history_supported():
+ if is_fence_history_supported_status(runner):
cmd.append("--fence-history=3")
stdout, stderr, retval = runner.run(cmd)
@@ -523,25 +524,15 @@ def _resource_move_ban_clear(
### fence history
-def is_fence_history_supported():
- try:
- crm_mon_rng = xml_fromstring(open(settings.crm_mon_schema, "r").read())
- # Namespaces must be provided otherwise xpath won't match anything.
- # 'None' namespace is not supported, so we rename it.
- namespaces_map = {
- "ns": crm_mon_rng.nsmap.pop(None)
- }
- history_elements = crm_mon_rng.xpath(
- ".//ns:element[@name='fence_history']",
- namespaces=namespaces_map
- )
- if history_elements:
- return True
- except (EnvironmentError, etree.XMLSyntaxError):
- # if we cannot tell for sure fence_history is supported, we will
- # continue as if it was not supported
- pass
- return False
+def is_fence_history_supported_status(runner: CommandRunner) -> bool:
+ return _is_in_pcmk_tool_help(
+ runner, "crm_mon", ["--fence-history"]
+ )
+
+def is_fence_history_supported_management(runner: CommandRunner) -> bool:
+ return _is_in_pcmk_tool_help(
+ runner, "stonith_admin", ["--history", "--broadcast", "--cleanup"]
+ )
def fence_history_cleanup(runner, node=None):
return _run_fence_history_command(runner, "--cleanup", node)
@@ -583,3 +574,17 @@ def __is_in_crm_resource_help(runner, text):
)
# help goes to stderr but we check stdout as well if that gets changed
return text in stderr or text in stdout
+
+def _is_in_pcmk_tool_help(
+ runner: CommandRunner, tool: str, text_list: Iterable[str]
+) -> bool:
+ stdout, stderr, dummy_retval = runner.run(
+ [__exec(tool), "--help-all"]
+ )
+ # Help goes to stderr but we check stdout as well if that gets changed. Use
+ # generators in all to return early.
+ return (
+ all(text in stderr for text in text_list)
+ or
+ all(text in stdout for text in text_list)
+ )
diff --git a/pcs_test/resources/crm_mon.rng.with_fence_history.xml b/pcs_test/resources/crm_mon.rng.with_fence_history.xml
deleted file mode 100644
index 45b380bd..00000000
--- a/pcs_test/resources/crm_mon.rng.with_fence_history.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
- <start>
- <ref name="element-crm_mon"/>
- </start>
- <define name="element-crm_mon">
- <element name="crm_mon">
- <optional>
- <element name="fence_history"/>
- </optional>
- </element>
- </define>
-</grammar>
diff --git a/pcs_test/resources/crm_mon.rng.without_fence_history.xml b/pcs_test/resources/crm_mon.rng.without_fence_history.xml
deleted file mode 100644
index f7efe52c..00000000
--- a/pcs_test/resources/crm_mon.rng.without_fence_history.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
- <start>
- <ref name="element-crm_mon"/>
- </start>
- <define name="element-crm_mon">
- <element name="crm_mon"/>
- </define>
-</grammar>
diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py
index 517aa908..06878668 100644
--- a/pcs_test/tier0/lib/commands/test_status.py
+++ b/pcs_test/tier0/lib/commands/test_status.py
@@ -1,15 +1,12 @@
from textwrap import dedent
-from unittest import mock, TestCase
+from unittest import TestCase
-from pcs import settings
from pcs.common import file_type_codes, report_codes
from pcs.lib.commands import status
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
from pcs_test.tools.misc import read_test_resource as rc_read
-crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml")
-crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml")
class FullClusterStatusPlaintext(TestCase):
def setUp(self):
@@ -212,11 +209,7 @@ class FullClusterStatusPlaintext(TestCase):
def test_success_live_verbose(self):
(self.config
.env.set_known_nodes(self.node_name_list)
- .fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng"
- )
+ .runner.pcmk.can_fence_history_status(stderr="not supported")
.runner.pcmk.load_state_plaintext(
verbose=True,
stdout="crm_mon cluster status",
@@ -288,11 +281,7 @@ class FullClusterStatusPlaintext(TestCase):
(self.config
.env.set_corosync_conf_data(rc_read("corosync.conf"))
.env.set_cib_data("<cib/>")
- .fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng"
- )
+ .runner.pcmk.can_fence_history_status(stderr="not supported")
.runner.pcmk.load_state_plaintext(
verbose=True, stdout="crm_mon cluster status",
)
@@ -320,11 +309,7 @@ class FullClusterStatusPlaintext(TestCase):
def test_success_verbose_inactive_and_fence_history(self):
(self.config
.env.set_known_nodes(self.node_name_list)
- .fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_with_history)(),
- name="fs.open.crm_mon_rng"
- )
+ .runner.pcmk.can_fence_history_status()
.runner.pcmk.load_state_plaintext(
verbose=True,
inactive=False,
@@ -375,11 +360,7 @@ class FullClusterStatusPlaintext(TestCase):
def _assert_success_with_ticket_status_failure(self, stderr="", msg=""):
(self.config
.env.set_known_nodes(self.node_name_list)
- .fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng"
- )
+ .runner.pcmk.can_fence_history_status(stderr="not supported")
.runner.pcmk.load_state_plaintext(
verbose=True,
stdout="crm_mon cluster status",
@@ -553,11 +534,7 @@ class FullClusterStatusPlaintext(TestCase):
(self.config
.env.set_known_nodes(self.node_name_list[1:])
- .fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng"
- )
+ .runner.pcmk.can_fence_history_status(stderr="not supported")
.runner.pcmk.load_state_plaintext(
verbose=True,
stdout="crm_mon cluster status",
diff --git a/pcs_test/tier0/lib/commands/test_stonith_history.py b/pcs_test/tier0/lib/commands/test_stonith_history.py
index e1bd35cb..cfdef13c 100644
--- a/pcs_test/tier0/lib/commands/test_stonith_history.py
+++ b/pcs_test/tier0/lib/commands/test_stonith_history.py
@@ -1,25 +1,16 @@
-from unittest import mock, TestCase
+from unittest import TestCase
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
-from pcs_test.tools.misc import read_test_resource as rc_read
-from pcs import settings
from pcs.common import report_codes
from pcs.lib.commands import stonith
-crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml")
-crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml")
-
class HistoryGetText(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(test_case=self)
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_with_history)(),
- name="fs.open.crm_mon_rng"
- )
+ self.config.runner.pcmk.can_fence_history_manage()
def test_success_all_nodes(self):
history = (
@@ -68,11 +59,10 @@ class HistoryGetText(TestCase):
)
def test_history_not_supported(self):
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng",
- instead="fs.open.crm_mon_rng"
+ self.config.runner.pcmk.can_fence_history_manage(
+ stderr="not supported",
+ name="runner.pcmk.can_fence_history_manage",
+ instead="runner.pcmk.can_fence_history_manage",
)
self.env_assist.assert_raise_library_error(
lambda: stonith.history_get_text(self.env_assist.get_env()),
@@ -88,11 +78,7 @@ class HistoryGetText(TestCase):
class HistoryCleanup(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(test_case=self)
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_with_history)(),
- name="fs.open.crm_mon_rng"
- )
+ self.config.runner.pcmk.can_fence_history_manage()
def test_success_all_nodes(self):
msg = "cleaning up fencing-history for node *\n"
@@ -129,11 +115,10 @@ class HistoryCleanup(TestCase):
)
def test_history_not_supported(self):
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng",
- instead="fs.open.crm_mon_rng"
+ self.config.runner.pcmk.can_fence_history_manage(
+ stderr="not supported",
+ name="runner.pcmk.can_fence_history_manage",
+ instead="runner.pcmk.can_fence_history_manage",
)
self.env_assist.assert_raise_library_error(
lambda: stonith.history_cleanup(self.env_assist.get_env()),
@@ -149,11 +134,7 @@ class HistoryCleanup(TestCase):
class HistoryUpdate(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(test_case=self)
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_with_history)(),
- name="fs.open.crm_mon_rng"
- )
+ self.config.runner.pcmk.can_fence_history_manage()
def test_success_all_nodes(self):
msg = "gather fencing-history from all nodes\n"
@@ -182,11 +163,10 @@ class HistoryUpdate(TestCase):
)
def test_history_not_supported(self):
- self.config.fs.open(
- settings.crm_mon_schema,
- mock.mock_open(read_data=crm_mon_rng_without_history)(),
- name="fs.open.crm_mon_rng",
- instead="fs.open.crm_mon_rng"
+ self.config.runner.pcmk.can_fence_history_manage(
+ stderr="not supported",
+ name="runner.pcmk.can_fence_history_manage",
+ instead="runner.pcmk.can_fence_history_manage",
)
self.env_assist.assert_raise_library_error(
lambda: stonith.history_update(self.env_assist.get_env()),
diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py
index 1ea5454e..d69d8b34 100644
--- a/pcs_test/tier0/lib/pacemaker/test_live.py
+++ b/pcs_test/tier0/lib/pacemaker/test_live.py
@@ -79,7 +79,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
class GetClusterStatusText(TestCase):
def setUp(self):
self.mock_fencehistory_supported = mock.patch(
- "pcs.lib.pacemaker.live.is_fence_history_supported",
+ "pcs.lib.pacemaker.live.is_fence_history_supported_status",
return_value=True
)
self.mock_fencehistory_supported.start()
@@ -125,7 +125,7 @@ class GetClusterStatusText(TestCase):
def test_success_no_fence_history(self):
self.mock_fencehistory_supported.stop()
self.mock_fencehistory_supported = mock.patch(
- "pcs.lib.pacemaker.live.is_fence_history_supported",
+ "pcs.lib.pacemaker.live.is_fence_history_supported_status",
return_value=False
)
self.mock_fencehistory_supported.start()
@@ -1399,3 +1399,30 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "--wait"]
)
+
+
+class IsInPcmkToolHelp(TestCase):
+ # pylint: disable=protected-access
+ def test_all_in_stderr(self):
+ mock_runner = get_runner("", "ABCDE", 0)
+ self.assertTrue(
+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
+ )
+
+ def test_all_in_stdout(self):
+ mock_runner = get_runner("ABCDE", "", 0)
+ self.assertTrue(
+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
+ )
+
+ def test_some_in_stderr_all_in_stdout(self):
+ mock_runner = get_runner("ABCDE", "ABC", 0)
+ self.assertTrue(
+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
+ )
+
+ def test_some_in_stderr_some_in_stdout(self):
+ mock_runner = get_runner("CDE", "ABC", 0)
+ self.assertFalse(
+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
+ )
diff --git a/pcs_test/tools/command_env/config_runner_pcmk.py b/pcs_test/tools/command_env/config_runner_pcmk.py
index 5bb9755b..0580e8d6 100644
--- a/pcs_test/tools/command_env/config_runner_pcmk.py
+++ b/pcs_test/tools/command_env/config_runner_pcmk.py
@@ -70,11 +70,52 @@ def _fixture_state_node_xml(
class PcmkShortcuts():
+ #pylint: disable=too-many-public-methods
def __init__(self, calls):
self.__calls = calls
self.default_wait_timeout = DEFAULT_WAIT_TIMEOUT
self.default_wait_error_returncode = WAIT_TIMEOUT_EXPIRED_RETURNCODE
+ def can_fence_history_manage(
+ self,
+ name="runner.pcmk.can_fence_history_manage",
+ stderr="--history --cleanup --broadcast",
+ instead=None,
+ ):
+ """
+ Create a call to check if fence_history is supported by stonith_admin
+
+ string name -- key of the call
+ string stderr -- stonith_admin help text
+ string instead -- key of call instead of which this new call is to be
+ placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall("stonith_admin --help-all", stderr=stderr),
+ instead=instead,
+ )
+
+ def can_fence_history_status(
+ self,
+ name="runner.pcmk.can_fence_history_status",
+ stderr="--fence-history",
+ instead=None,
+ ):
+ """
+ Create a call to check if fence_history is supported by crm_mon
+
+ string name -- key of the call
+ string stderr -- crm_mon help text
+ string instead -- key of call instead of which this new call is to be
+ placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall("crm_mon --help-all", stderr=stderr),
+ instead=instead,
+ )
+
def fence_history_get(
self, name="runner.pcmk.fence_history_get", node=None, stdout="",
stderr="", returncode=0
diff --git a/pcs_test/tools/command_env/mock_runner.py b/pcs_test/tools/command_env/mock_runner.py
index 2fe43137..8b9cb771 100644
--- a/pcs_test/tools/command_env/mock_runner.py
+++ b/pcs_test/tools/command_env/mock_runner.py
@@ -61,6 +61,7 @@ COMMAND_COMPLETIONS = {
"crm_ticket": path.join(settings.pacemaker_binaries, "crm_ticket"),
"crm_verify": path.join(settings.pacemaker_binaries, "crm_verify"),
"sbd": settings.sbd_binary,
+ "stonith_admin": path.join(settings.pacemaker_binaries, "stonith_admin"),
}
def complete_command(command):
--
2.21.1

View File

@ -0,0 +1,57 @@
From be40fe494ddeb4f7132389ca0f3c1193de0e425d Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Tue, 23 Jun 2020 12:57:05 +0200
Subject: [PATCH 2/3] fix 'resource | stonith refresh' documentation
---
pcs/pcs.8 | 4 ++--
pcs/usage.py | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index c887d332..3efc5bb2 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -325,7 +325,7 @@ If a node is not specified then resources / stonith devices on all nodes will be
refresh [<resource id>] [node=<node>] [\fB\-\-strict\fR]
Make the cluster forget the complete operation history (including failures) of the resource and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs resource cleanup' command.
.br
-If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given.
+If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given.
.br
If a resource id is not specified then all resources / stonith devices will be refreshed.
.br
@@ -613,7 +613,7 @@ If a node is not specified then resources / stonith devices on all nodes will be
refresh [<stonith id>] [\fB\-\-node\fR <node>] [\fB\-\-strict\fR]
Make the cluster forget the complete operation history (including failures) of the stonith device and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs stonith cleanup' command.
.br
-If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given.
+If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given.
.br
If a stonith id is not specified then all resources / stonith devices will be refreshed.
.br
diff --git a/pcs/usage.py b/pcs/usage.py
index 8722bd7b..0f3c95a3 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -663,7 +663,7 @@ Commands:
interested in forgetting failed operations only, use the 'pcs resource
cleanup' command.
If the named resource is part of a group, or one numbered instance of a
- clone or bundled resource, the clean-up applies to the whole collective
+ clone or bundled resource, the refresh applies to the whole collective
resource unless --strict is given.
If a resource id is not specified then all resources / stonith devices
will be refreshed.
@@ -1214,7 +1214,7 @@ Commands:
are interested in forgetting failed operations only, use the 'pcs
stonith cleanup' command.
If the named stonith device is part of a group, or one numbered
- instance of a clone or bundled resource, the clean-up applies to the
+ instance of a clone or bundled resource, the refresh applies to the
whole collective resource unless --strict is given.
If a stonith id is not specified then all resources / stonith devices
will be refreshed.
--
2.25.4

File diff suppressed because it is too large Load Diff

View File

@ -1,322 +0,0 @@
From d88962d655257940a678724cc8d7bc1008ed3a46 Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Tue, 5 May 2020 11:02:36 +0200
Subject: [PATCH 1/3] fix running 'pcs status' on remote nodes
---
pcs/lib/commands/status.py | 24 +++-
pcs_test/tier0/lib/commands/test_status.py | 122 +++++++++++++++++++++
2 files changed, 141 insertions(+), 5 deletions(-)
diff --git a/pcs/lib/commands/status.py b/pcs/lib/commands/status.py
index 26332a65..84e3e046 100644
--- a/pcs/lib/commands/status.py
+++ b/pcs/lib/commands/status.py
@@ -1,3 +1,4 @@
+import os.path
from typing import (
Iterable,
List,
@@ -6,6 +7,7 @@ from typing import (
)
from xml.etree.ElementTree import Element
+from pcs import settings
from pcs.common import file_type_codes
from pcs.common.node_communicator import Communicator
from pcs.common.reports import (
@@ -17,7 +19,7 @@ from pcs.common.tools import (
indent,
)
from pcs.lib import reports
-from pcs.lib.cib import stonith
+from pcs.lib.cib import nvpair, stonith
from pcs.lib.cib.tools import get_crm_config, get_resources
from pcs.lib.communication.nodes import CheckReachability
from pcs.lib.communication.tools import run as run_communication
@@ -57,6 +59,7 @@ def full_cluster_status_plaintext(
"""
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
+ # pylint: disable=too-many-statements
# validation
if not env.is_cib_live and env.is_corosync_conf_live:
@@ -84,7 +87,11 @@ def full_cluster_status_plaintext(
status_text, warning_list = get_cluster_status_text(
runner, hide_inactive_resources, verbose
)
- corosync_conf = env.get_corosync_conf()
+ corosync_conf = None
+ # If we are live on a remote node, we have no corosync.conf.
+ # TODO Use the new file framework so the path is not exposed.
+ if not live or os.path.exists(settings.corosync_conf_file):
+ corosync_conf = env.get_corosync_conf()
cib = env.get_cib()
if verbose:
ticket_status_text, ticket_status_stderr, ticket_status_retval = (
@@ -97,7 +104,7 @@ def full_cluster_status_plaintext(
except LibraryError:
pass
local_services_status = _get_local_services_status(runner)
- if verbose:
+ if verbose and corosync_conf:
node_name_list, node_names_report_list = get_existing_nodes_names(
corosync_conf
)
@@ -117,8 +124,15 @@ def full_cluster_status_plaintext(
if report_processor.has_errors:
raise LibraryError()
+ cluster_name = (
+ corosync_conf.get_cluster_name()
+ if corosync_conf
+ else nvpair.get_value(
+ "cluster_property_set", get_crm_config(cib), "cluster-name", ""
+ )
+ )
parts = []
- parts.append(f"Cluster name: {corosync_conf.get_cluster_name()}")
+ parts.append(f"Cluster name: {cluster_name}")
if warning_list:
parts.extend(["", "WARNINGS:"] + warning_list + [""])
parts.append(status_text)
@@ -136,7 +150,7 @@ def full_cluster_status_plaintext(
else:
parts.extend(indent(ticket_status_text.splitlines()))
if live:
- if verbose:
+ if verbose and corosync_conf:
parts.extend(["", "PCSD Status:"])
parts.extend(indent(
_format_node_reachability(node_name_list, node_reachability)
diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py
index 06878668..7d54d579 100644
--- a/pcs_test/tier0/lib/commands/test_status.py
+++ b/pcs_test/tier0/lib/commands/test_status.py
@@ -1,6 +1,7 @@
from textwrap import dedent
from unittest import TestCase
+from pcs import settings
from pcs.common import file_type_codes, report_codes
from pcs.lib.commands import status
from pcs_test.tools import fixture
@@ -9,16 +10,33 @@ from pcs_test.tools.misc import read_test_resource as rc_read
class FullClusterStatusPlaintext(TestCase):
+ # pylint: disable=too-many-public-methods
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
self.node_name_list = ["node1", "node2", "node3"]
self.maxDiff = None
+ @staticmethod
+ def _fixture_xml_clustername(name):
+ return """
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair
+ id="cib-bootstrap-options-cluster-name"
+ name="cluster-name" value="{name}"
+ />
+ </cluster_property_set>
+ </crm_config>
+ """.format(
+ name=name
+ )
+
def _fixture_config_live_minimal(self):
(self.config
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load(resources="""
<resources>
@@ -30,6 +48,25 @@ class FullClusterStatusPlaintext(TestCase):
)
)
+ def _fixture_config_live_remote_minimal(self):
+ (
+ self.config.runner.pcmk.load_state_plaintext(
+ stdout="crm_mon cluster status",
+ )
+ .fs.exists(settings.corosync_conf_file, return_value=False)
+ .runner.cib.load(
+ optional_in_conf=self._fixture_xml_clustername("test-cib"),
+ resources="""
+ <resources>
+ <primitive id="S" class="stonith" type="fence_dummy" />
+ </resources>
+ """,
+ )
+ .runner.systemctl.is_active(
+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd"
+ )
+ )
+
def _fixture_config_local_daemons(
self,
corosync_enabled=True, corosync_active=True,
@@ -150,6 +187,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load_content("invalid corosync conf")
)
self.env_assist.assert_raise_library_error(
@@ -170,6 +208,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load_content(
"some stdout", stderr="cib load error", returncode=1
@@ -214,6 +253,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -254,6 +294,82 @@ class FullClusterStatusPlaintext(TestCase):
)
)
+ def test_success_live_remote_node(self):
+ self._fixture_config_live_remote_minimal()
+ self._fixture_config_local_daemons(
+ corosync_enabled=False,
+ corosync_active=False,
+ pacemaker_enabled=False,
+ pacemaker_active=False,
+ pacemaker_remote_enabled=True,
+ pacemaker_remote_active=True,
+ )
+ self.assertEqual(
+ status.full_cluster_status_plaintext(self.env_assist.get_env()),
+ dedent(
+ """\
+ Cluster name: test-cib
+ crm_mon cluster status
+
+ Daemon Status:
+ corosync: inactive/disabled
+ pacemaker: inactive/disabled
+ pacemaker_remote: active/enabled
+ pcsd: active/enabled"""
+ ),
+ )
+
+ def test_success_live_remote_node_verbose(self):
+ (
+ self.config.runner.pcmk.can_fence_history_status(
+ stderr="not supported"
+ )
+ .runner.pcmk.load_state_plaintext(
+ verbose=True, stdout="crm_mon cluster status",
+ )
+ .fs.exists(settings.corosync_conf_file, return_value=False)
+ .runner.cib.load(
+ optional_in_conf=self._fixture_xml_clustername("test-cib"),
+ resources="""
+ <resources>
+ <primitive id="S" class="stonith" type="fence_dummy" />
+ </resources>
+ """,
+ )
+ .runner.pcmk.load_ticket_state_plaintext(stdout="ticket status")
+ .runner.systemctl.is_active(
+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd"
+ )
+ )
+ self._fixture_config_local_daemons(
+ corosync_enabled=False,
+ corosync_active=False,
+ pacemaker_enabled=False,
+ pacemaker_active=False,
+ pacemaker_remote_enabled=True,
+ pacemaker_remote_active=True,
+ )
+
+ self.assertEqual(
+ status.full_cluster_status_plaintext(
+ self.env_assist.get_env(), verbose=True
+ ),
+ dedent(
+ """\
+ Cluster name: test-cib
+ crm_mon cluster status
+
+ Tickets:
+ ticket status
+
+ Daemon Status:
+ corosync: inactive/disabled
+ pacemaker: inactive/disabled
+ pacemaker_remote: active/enabled
+ pcsd: active/enabled"""
+ ),
+ )
+
def test_succes_mocked(self):
(self.config
.env.set_corosync_conf_data(rc_read("corosync.conf"))
@@ -316,6 +432,7 @@ class FullClusterStatusPlaintext(TestCase):
fence_history=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -365,6 +482,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -421,6 +539,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load()
.runner.systemctl.is_active(
@@ -453,6 +572,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load()
.runner.systemctl.is_active(
@@ -481,6 +601,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load(resources="""
<resources>
@@ -539,6 +660,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
--
2.25.4

View File

@ -1,39 +0,0 @@
From 0cb9637f1962ad6be9e977b4b971b823af407c2d Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Thu, 14 May 2020 16:42:32 +0200
Subject: [PATCH 3/3] fix ruby daemon closing connection after 30s
---
pcs/daemon/ruby_pcsd.py | 2 +-
pcsd/rserver.rb | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py
index 53c53eaf..b640752d 100644
--- a/pcs/daemon/ruby_pcsd.py
+++ b/pcs/daemon/ruby_pcsd.py
@@ -127,7 +127,7 @@ class Wrapper:
def prepare_curl_callback(self, curl):
curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket)
- curl.setopt(pycurl.TIMEOUT, 70)
+ curl.setopt(pycurl.TIMEOUT, 0)
async def send_to_ruby(self, request: RubyDaemonRequest):
try:
diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
index 4b58f252..08eceb79 100644
--- a/pcsd/rserver.rb
+++ b/pcsd/rserver.rb
@@ -63,7 +63,7 @@ use TornadoCommunicationMiddleware
require 'pcsd'
::Rack::Handler.get('thin').run(Sinatra::Application, {
- :Host => PCSD_RUBY_SOCKET,
+ :Host => PCSD_RUBY_SOCKET, :timeout => 0
}) do |server|
puts server.class
server.threaded = true
--
2.25.4

View File

@ -1,25 +0,0 @@
From 5175507f22adffcb443f9f89bda9705599dd89e9 Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Thu, 7 May 2020 17:11:12 +0200
Subject: [PATCH 2/3] fix inability to create colocation const. (web ui)
---
pcsd/pcs.rb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 9a0efb46..59492d20 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -187,7 +187,7 @@ def add_colocation_constraint(
score = "INFINITY"
end
command = [
- PCS, "constraint", "colocation", "add", resourceA, resourceB, score
+ PCS, "constraint", "colocation", "add", resourceA, "with", resourceB, score
]
command << '--force' if force
stdout, stderr, retval = run_cmd(auth_user, *command)
--
2.25.4

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,402 @@
From 4a986e8ee0610b1c85a04e38042e4073d41207a4 Mon Sep 17 00:00:00 2001
From: Miroslav Lisik <mlisik@redhat.com>
Date: Mon, 13 Jul 2020 12:59:09 +0200
Subject: [PATCH 2/3] Fix tag removal in resource 'unclone/ungroup' commands
and extend test coverage
---
pcs/resource.py | 2 +-
.../tier1/cib_resource/test_clone_unclone.py | 73 +++++++--
.../tier1/cib_resource/test_group_ungroup.py | 143 +++++++++++++++---
pcs_test/tools/cib.py | 10 +-
4 files changed, 187 insertions(+), 41 deletions(-)
diff --git a/pcs/resource.py b/pcs/resource.py
index 9a3bd0ee..49d28ef0 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -2027,7 +2027,7 @@ def remove_resource_references(
if obj_ref.getAttribute("id") == resource_id:
tag = obj_ref.parentNode
tag.removeChild(obj_ref)
- if tag.getElementsByTagName(obj_ref).length == 0:
+ if tag.getElementsByTagName("obj_ref").length == 0:
remove_resource_references(
dom, tag.getAttribute("id"), output=output,
)
diff --git a/pcs_test/tier1/cib_resource/test_clone_unclone.py b/pcs_test/tier1/cib_resource/test_clone_unclone.py
index c9c6a29e..2633801a 100644
--- a/pcs_test/tier1/cib_resource/test_clone_unclone.py
+++ b/pcs_test/tier1/cib_resource/test_clone_unclone.py
@@ -55,6 +55,38 @@ FIXTURE_RESOURCES = """
)
+FIXTURE_CONSTRAINTS_CONFIG_XML = """
+ <constraints>
+ <rsc_location id="location-C-clone-rh7-1-INFINITY" node="rh7-1"
+ rsc="C-clone" score="INFINITY"/>
+ <rsc_location id="location-TagCloneOnly-rh7-1-INFINITY"
+ node="rh7-1" rsc="TagCloneOnly" score="INFINITY"/>
+ </constraints>
+"""
+
+
+FIXTURE_TAGS_CONFIG_XML = """
+ <tags>
+ <tag id="TagCloneOnly">
+ <obj_ref id="C-clone"/>
+ </tag>
+ <tag id="TagNotCloneOnly">
+ <obj_ref id="C-clone"/>
+ <obj_ref id="Dummy"/>
+ </tag>
+ </tags>
+"""
+
+
+FIXTURE_TAGS_RESULT_XML = """
+ <tags>
+ <tag id="TagNotCloneOnly">
+ <obj_ref id="Dummy"/>
+ </tag>
+ </tags>
+"""
+
+
class Unclone(
TestCase,
get_assert_pcs_effect_mixin(
@@ -66,6 +98,22 @@ class Unclone(
):
empty_cib = rc("cib-empty.xml")
+ def assert_tags_xml(self, expected_xml):
+ self.assert_resources_xml_in_cib(
+ expected_xml,
+ get_cib_part_func=lambda cib: etree.tostring(
+ etree.parse(cib).findall(".//tags")[0],
+ ),
+ )
+
+ def assert_constraint_xml(self, expected_xml):
+ self.assert_resources_xml_in_cib(
+ expected_xml,
+ get_cib_part_func=lambda cib: etree.tostring(
+ etree.parse(cib).findall(".//constraints")[0],
+ ),
+ )
+
def setUp(self):
# pylint: disable=invalid-name
self.temp_cib = get_tmp_file("tier1_cib_resource_group_ungroup")
@@ -75,18 +123,7 @@ class Unclone(
"resources", FIXTURE_CLONE, FIXTURE_DUMMY,
)
xml_manip.append_to_first_tag_name(
- "configuration",
- """
- <tags>
- <tag id="T1">
- <obj_ref id="C-clone"/>
- <obj_ref id="Dummy"/>
- </tag>
- <tag id="T2">
- <obj_ref id="C-clone"/>
- </tag>
- </tags>
- """,
+ "configuration", FIXTURE_TAGS_CONFIG_XML,
)
xml_manip.append_to_first_tag_name(
"constraints",
@@ -95,8 +132,8 @@ class Unclone(
rsc="C-clone" score="INFINITY"/>
""",
"""
- <rsc_location id="location-T1-rh7-1-INFINITY" node="rh7-1" rsc="T1"
- score="INFINITY"/>
+ <rsc_location id="location-TagCloneOnly-rh7-1-INFINITY"
+ node="rh7-1" rsc="TagCloneOnly" score="INFINITY"/>
""",
)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
@@ -111,6 +148,8 @@ class Unclone(
"Error: could not find resource: NonExistentClone\n",
)
self.assert_resources_xml_in_cib(FIXTURE_CLONE_AND_RESOURCE)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML)
def test_not_clone_resource(self):
self.assert_pcs_fail(
@@ -118,9 +157,15 @@ class Unclone(
"Error: 'Dummy' is not a clone resource\n",
)
self.assert_resources_xml_in_cib(FIXTURE_CLONE_AND_RESOURCE)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML)
def test_unclone_clone_id(self):
self.assert_effect("resource unclone C-clone", FIXTURE_RESOURCES)
+ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML)
+ self.assert_constraint_xml("<constraints/>")
def test_unclone_resoruce_id(self):
self.assert_effect("resource unclone C", FIXTURE_RESOURCES)
+ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML)
+ self.assert_constraint_xml("<constraints/>")
diff --git a/pcs_test/tier1/cib_resource/test_group_ungroup.py b/pcs_test/tier1/cib_resource/test_group_ungroup.py
index f86e9890..88cc315d 100644
--- a/pcs_test/tier1/cib_resource/test_group_ungroup.py
+++ b/pcs_test/tier1/cib_resource/test_group_ungroup.py
@@ -64,14 +64,63 @@ FIXTURE_AGROUP_XML = fixture_group_xml(
)
-class TestGroupMixin(
- get_assert_pcs_effect_mixin(
- lambda cib: etree.tostring(
- # pylint:disable=undefined-variable
- etree.parse(cib).findall(".//resources")[0]
- )
- ),
-):
+FIXTURE_CONSTRAINTS_CONFIG_XML = """
+ <constraints>
+ <rsc_location id="location-AGroup-rh7-1-INFINITY" node="rh7-1"
+ rsc="AGroup" score="INFINITY"/>
+ <rsc_location id="location-TagGroupOnly-rh7-1-INFINITY"
+ node="rh7-1" rsc="TagGroupOnly" score="INFINITY"/>
+ </constraints>
+"""
+
+FIXTURE_CLONE_TAG_CONSTRAINTS = """
+ <constraints>
+ <rsc_location id="location-AGroup-rh7-1-INFINITY" node="rh7-1"
+ rsc="AGroup-clone" score="INFINITY"
+ />
+ <rsc_location id="location-TagGroupOnly-rh7-1-INFINITY"
+ node="rh7-1" rsc="TagGroupOnly" score="INFINITY"
+ />
+ </constraints>
+"""
+
+
+FIXTURE_CLONE_CONSTRAINT = """
+ <constraints>
+ <rsc_location id="location-AGroup-rh7-1-INFINITY" node="rh7-1"
+ rsc="AGroup-clone" score="INFINITY"
+ />
+ </constraints>
+"""
+
+
+FIXTURE_TAGS_CONFIG_XML = """
+ <tags>
+ <tag id="TagGroupOnly">
+ <obj_ref id="AGroup"/>
+ </tag>
+ <tag id="TagNotGroupOnly">
+ <obj_ref id="AGroup"/>
+ <obj_ref id="A1"/>
+ <obj_ref id="A2"/>
+ <obj_ref id="A3"/>
+ </tag>
+ </tags>
+"""
+
+
+FIXTURE_TAGS_RESULT_XML = """
+ <tags>
+ <tag id="TagNotGroupOnly">
+ <obj_ref id="A1"/>
+ <obj_ref id="A2"/>
+ <obj_ref id="A3"/>
+ </tag>
+ </tags>
+"""
+
+
+class TestGroupMixin:
empty_cib = rc("cib-empty.xml")
def setUp(self):
@@ -81,17 +130,7 @@ class TestGroupMixin(
xml_manip = XmlManipulation.from_file(self.empty_cib)
xml_manip.append_to_first_tag_name("resources", FIXTURE_AGROUP_XML)
xml_manip.append_to_first_tag_name(
- "configuration",
- """
- <tags>
- <tag id="T1">
- <obj_ref id="AGroup"/>
- </tag>
- <tag id="T2">
- <obj_ref id="AGroup"/>
- </tag>
- </tags>
- """,
+ "configuration", FIXTURE_TAGS_CONFIG_XML,
)
xml_manip.append_to_first_tag_name(
"constraints",
@@ -100,8 +139,8 @@ class TestGroupMixin(
rsc="AGroup" score="INFINITY"/>
""",
"""
- <rsc_location id="location-T1-rh7-1-INFINITY" node="rh7-1" rsc="T1"
- score="INFINITY"/>
+ <rsc_location id="location-TagGroupOnly-rh7-1-INFINITY"
+ node="rh7-1" rsc="TagGroupOnly" score="INFINITY"/>
""",
)
write_data_to_tmpfile(str(xml_manip), self.temp_cib)
@@ -111,9 +150,33 @@ class TestGroupMixin(
self.temp_cib.close()
-class GroupDeleteRemoveUngroupBase(TestGroupMixin):
+class GroupDeleteRemoveUngroupBase(
+ get_assert_pcs_effect_mixin(
+ lambda cib: etree.tostring(
+ # pylint:disable=undefined-variable
+ etree.parse(cib).findall(".//resources")[0]
+ )
+ ),
+ TestGroupMixin,
+):
command = None
+ def assert_tags_xml(self, expected_xml):
+ self.assert_resources_xml_in_cib(
+ expected_xml,
+ get_cib_part_func=lambda cib: etree.tostring(
+ etree.parse(cib).findall(".//tags")[0],
+ ),
+ )
+
+ def assert_constraint_xml(self, expected_xml):
+ self.assert_resources_xml_in_cib(
+ expected_xml,
+ get_cib_part_func=lambda cib: etree.tostring(
+ etree.parse(cib).findall(".//constraints")[0],
+ ),
+ )
+
def test_nonexistent_group(self):
self.assert_pcs_fail(
f"resource {self.command} NonExistentGroup",
@@ -122,6 +185,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
self.assert_resources_xml_in_cib(
fixture_resources_xml([FIXTURE_AGROUP_XML]),
)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML)
def test_not_a_group_id(self):
self.assert_pcs_fail(
@@ -130,6 +195,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
self.assert_resources_xml_in_cib(
fixture_resources_xml([FIXTURE_AGROUP_XML]),
)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML)
def test_whole_group(self):
self.assert_effect(
@@ -142,10 +209,12 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
],
),
output=(
- "Removing Constraint - location-T1-rh7-1-INFINITY\n"
+ "Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n"
"Removing Constraint - location-AGroup-rh7-1-INFINITY\n"
),
)
+ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML)
+ self.assert_constraint_xml("<constraints/>")
def test_specified_resources(self):
self.assert_effect(
@@ -160,6 +229,26 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
],
),
)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML)
+
+ def test_all_resources(self):
+ self.assert_effect(
+ f"resource {self.command} AGroup A1 A2 A3",
+ fixture_resources_xml(
+ [
+ fixture_primitive_xml("A1"),
+ fixture_primitive_xml("A2"),
+ fixture_primitive_xml("A3"),
+ ],
+ ),
+ output=(
+ "Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n"
+ "Removing Constraint - location-AGroup-rh7-1-INFINITY\n"
+ ),
+ )
+ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML)
+ self.assert_constraint_xml("<constraints/>")
def test_cloned_group(self):
self.assert_pcs_success("resource clone AGroup")
@@ -172,6 +261,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
[fixture_clone_xml("AGroup-clone", FIXTURE_AGROUP_XML)],
)
)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CLONE_TAG_CONSTRAINTS)
def test_cloned_group_all_resorces_specified(self):
self.assert_pcs_success("resource clone AGroup")
@@ -184,6 +275,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
[fixture_clone_xml("AGroup-clone", FIXTURE_AGROUP_XML)],
)
)
+ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML)
+ self.assert_constraint_xml(FIXTURE_CLONE_TAG_CONSTRAINTS)
def test_cloned_group_with_one_resource(self):
self.assert_pcs_success("resource clone AGroup")
@@ -199,8 +292,10 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin):
fixture_primitive_xml("A2"),
],
),
- output="Removing Constraint - location-T1-rh7-1-INFINITY\n",
+ output="Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n",
)
+ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML)
+ self.assert_constraint_xml(FIXTURE_CLONE_CONSTRAINT)
class ResourceUngroup(GroupDeleteRemoveUngroupBase, TestCase):
diff --git a/pcs_test/tools/cib.py b/pcs_test/tools/cib.py
index d52176cf..5eaaa92e 100644
--- a/pcs_test/tools/cib.py
+++ b/pcs_test/tools/cib.py
@@ -30,8 +30,14 @@ def xml_format(xml_string):
def get_assert_pcs_effect_mixin(get_cib_part):
class AssertPcsEffectMixin(AssertPcsMixin):
- def assert_resources_xml_in_cib(self, expected_xml_resources):
- xml = get_cib_part(self.temp_cib)
+ def assert_resources_xml_in_cib(
+ self, expected_xml_resources, get_cib_part_func=None,
+ ):
+ self.temp_cib.seek(0)
+ if get_cib_part_func is not None:
+ xml = get_cib_part_func(self.temp_cib)
+ else:
+ xml = get_cib_part(self.temp_cib)
try:
assert_xml_equal(expected_xml_resources, xml.decode())
except AssertionError as e:
--
2.25.4

View File

@ -0,0 +1,80 @@
From 85f8cbca6af296a5b8e4d43e9f56daed0d7c195b Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Mon, 10 Aug 2020 12:17:01 +0200
Subject: [PATCH 1/2] rule: fix mixing 'and' and 'or' expressions
---
pcs/lib/cib/rule/parsed_to_cib.py | 5 +++++
pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py | 4 ++--
pcs_test/tier1/test_cib_options.py | 11 +++++++++--
3 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/pcs/lib/cib/rule/parsed_to_cib.py b/pcs/lib/cib/rule/parsed_to_cib.py
index 0fcae4f1..130663db 100644
--- a/pcs/lib/cib/rule/parsed_to_cib.py
+++ b/pcs/lib/cib/rule/parsed_to_cib.py
@@ -62,6 +62,11 @@ def __export_bool(
{
"id": create_subelement_id(parent_el, "rule", id_provider),
"boolean-op": boolean.operator.lower(),
+ # Score or score-attribute is required for nested rules, otherwise
+ # the CIB is not valid. Pacemaker doesn't use the score of nested
+ # rules. Score for the top rule, which is used by pacemaker, is
+ # supposed to be set in the export function above.
+ "score": "0",
},
)
for child in boolean.children:
diff --git a/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
index f61fce99..fa639f7c 100644
--- a/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
+++ b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
@@ -185,7 +185,7 @@ class Complex(Base):
),
"""
<rule id="X-rule" boolean-op="and" score="INFINITY">
- <rule id="X-rule-rule" boolean-op="or">
+ <rule id="X-rule-rule" boolean-op="or" score="0">
<rsc_expression id="X-rule-rule-rsc-ocf-pacemaker-Dummy"
class="ocf" provider="pacemaker" type="Dummy"
/>
@@ -197,7 +197,7 @@ class Complex(Base):
class="ocf" provider="heartbeat" type="Dummy"
/>
</rule>
- <rule id="X-rule-rule-1" boolean-op="or">
+ <rule id="X-rule-rule-1" boolean-op="or" score="0">
<op_expression id="X-rule-rule-1-op-monitor"
name="monitor" interval="30s"
/>
diff --git a/pcs_test/tier1/test_cib_options.py b/pcs_test/tier1/test_cib_options.py
index ba8f3515..92dbaed1 100644
--- a/pcs_test/tier1/test_cib_options.py
+++ b/pcs_test/tier1/test_cib_options.py
@@ -254,14 +254,21 @@ class OpDefaultsSetCreate(
self.assert_effect(
(
f"{self.cli_command} set create id=X meta nam1=val1 "
- "rule resource ::Dummy and op monitor"
+ "rule resource ::Dummy and (op start or op stop)"
),
f"""\
<{self.cib_tag}>
<meta_attributes id="X">
<rule id="X-rule" boolean-op="and" score="INFINITY">
<rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
- <op_expression id="X-rule-op-monitor" name="monitor"/>
+ <rule id="X-rule-rule" boolean-op="or" score="0">
+ <op_expression id="X-rule-rule-op-start"
+ name="start"
+ />
+ <op_expression id="X-rule-rule-op-stop"
+ name="stop"
+ />
+ </rule>
</rule>
<nvpair id="X-nam1" name="nam1" value="val1"/>
</meta_attributes>
--
2.25.4

View File

@ -1,54 +0,0 @@
From 898cfe8212a5940dba6552196ddd243f912b5942 Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Tue, 11 Feb 2020 10:18:33 +0100
Subject: [PATCH 5/7] daemon: fix cookie options
---
pcs/daemon/app/session.py | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/pcs/daemon/app/session.py b/pcs/daemon/app/session.py
index b4d29add..dcbb4c23 100644
--- a/pcs/daemon/app/session.py
+++ b/pcs/daemon/app/session.py
@@ -4,10 +4,16 @@ from pcs.daemon.auth import check_user_groups, authorize_user
PCSD_SESSION = "pcsd.sid"
class Mixin:
- __session = None
"""
Mixin for tornado.web.RequestHandler
"""
+
+ __session = None
+ __cookie_options = {
+ "secure": True,
+ "httponly": True,
+ }
+
def initialize(self, session_storage: Storage):
self.__storage = session_storage
@@ -63,7 +69,7 @@ class Mixin:
"""
Write the session id into a response cookie.
"""
- self.set_cookie(PCSD_SESSION, self.session.sid)
+ self.set_cookie(PCSD_SESSION, self.session.sid, **self.__cookie_options)
def put_request_cookies_sid_to_response_cookies_sid(self):
"""
@@ -73,7 +79,9 @@ class Mixin:
#TODO this method should exist temporarily (for sinatra compatibility)
#pylint: disable=invalid-name
if self.__sid_from_client is not None:
- self.set_cookie(PCSD_SESSION, self.__sid_from_client)
+ self.set_cookie(
+ PCSD_SESSION, self.__sid_from_client, **self.__cookie_options
+ )
def was_sid_in_request_cookies(self):
return self.__sid_from_client is not None
--
2.21.1

View File

@ -1,4 +1,4 @@
From 10d13839883a96b35fc609eb51939ec97bc4aac6 Mon Sep 17 00:00:00 2001
From aaf5cbfcc661cedc49ae5d86c0d442502aa17231 Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Tue, 20 Nov 2018 15:03:56 +0100
Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport
@ -10,10 +10,10 @@ Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport
3 files changed, 6 insertions(+)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index ff2ba0b0..7278c8dc 100644
index 3efc5bb2..20247774 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -283,6 +283,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
@@ -376,6 +376,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
Transports udp and udpu:
.br
@ -23,10 +23,10 @@ index ff2ba0b0..7278c8dc 100644
.br
Transport options are: ip_version, netmtu
diff --git a/pcs/usage.py b/pcs/usage.py
index 30c63964..60373d82 100644
index 0f3c95a3..51bc1196 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -689,6 +689,7 @@ Commands:
@@ -796,6 +796,7 @@ Commands:
hash=sha256. To disable encryption, set cipher=none and hash=none.
Transports udp and udpu:
@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644
#csetup-transport-options.knet .without-knet
{
--
2.21.1
2.25.4

View File

@ -1,39 +0,0 @@
From a6708c6bde467cfced3c4a950eadff0375908303 Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Thu, 23 Jan 2020 14:47:49 +0100
Subject: [PATCH 2/7] update a hint for 'resource create ... master'
---
pcs/cli/resource/parse_args.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
index 92dddac9..86280edb 100644
--- a/pcs/cli/resource/parse_args.py
+++ b/pcs/cli/resource/parse_args.py
@@ -1,5 +1,5 @@
from pcs.cli.common.parse_args import group_by_keywords, prepare_options
-from pcs.cli.common.errors import CmdLineInputError, HINT_SYNTAX_CHANGE
+from pcs.cli.common.errors import CmdLineInputError, SEE_MAN_CHANGES
def parse_create_simple(arg_list):
@@ -51,7 +51,14 @@ def parse_create(arg_list):
# manpage.
# To be removed in the next significant version.
if e.message == "missing value of 'master' option":
- raise CmdLineInputError(message=e.message, hint=HINT_SYNTAX_CHANGE)
+ raise CmdLineInputError(
+ message=e.message,
+ hint=(
+ "Master/Slave resources have been renamed to promotable "
+ "clones, please use the 'promotable' keyword instead of "
+ "'master'. " + SEE_MAN_CHANGES
+ )
+ )
raise e
return parts
--
2.21.1

View File

@ -1,11 +1,17 @@
Name: pcs
Version: 0.10.4
Release: 6%{?dist}.1
Version: 0.10.6
Release: 4%{?dist}
# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/
# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses
# GPLv2: pcs
# ASL 2.0: tornado
# MIT: handlebars
License: GPLv2 and ASL 2.0 and MIT
# ASL 2.0: dataclasses, tornado
# MIT: handlebars, backports, dacite, daemons, ethon, mustermann, rack,
# rack-protection, rack-test, sinatra, tilt
# GPLv2 or Ruby: eventmachne, json
# (GPLv2 or Ruby) and BSD: thin
# BSD or Ruby: open4, ruby2_keywords
# BSD and MIT: ffi
License: GPLv2 and ASL 2.0 and MIT and BSD and (GPLv2 or Ruby) and (BSD or Ruby)
URL: https://github.com/ClusterLabs/pcs
Group: System Environment/Base
Summary: Pacemaker Configuration System
@ -18,27 +24,31 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
%global pcs_source_name %{name}-%{version_or_commit}
# ui_commit can be determined by hash, tag or branch
%global ui_commit 0.1.2
%global ui_commit 0.1.4
%global ui_modules_version 0.1.3
%global ui_src_name pcs-web-ui-%{ui_commit}
%global pcs_snmp_pkg_name pcs-snmp
%global pyagentx_version 0.4.pcs.2
%global tornado_version 6.0.3
%global version_rubygem_backports 3.11.4
%global tornado_version 6.0.4
%global dataclasses_version 0.6
%global dacite_version 1.5.0
%global version_rubygem_backports 3.17.2
%global version_rubygem_daemons 1.3.1
%global version_rubygem_ethon 0.11.0
%global version_rubygem_ethon 0.12.0
%global version_rubygem_eventmachine 1.2.7
%global version_rubygem_ffi 1.9.25
%global version_rubygem_ffi 1.13.1
%global version_rubygem_json 2.3.0
%global version_rubygem_mustermann 1.0.3
%global version_rubygem_mustermann 1.1.1
%global version_rubygem_open4 1.3.4
%global version_rubygem_rack 2.0.6
%global version_rubygem_rack_protection 2.0.4
%global version_rubygem_rack_test 1.0.0
%global version_rubygem_sinatra 2.0.4
%global version_rubygem_rack 2.2.3
%global version_rubygem_rack_protection 2.0.8.1
%global version_rubygem_rack_test 1.1.0
%global version_rubygem_ruby2_keywords 0.0.2
%global version_rubygem_sinatra 2.0.8.1
%global version_rubygem_thin 1.7.2
%global version_rubygem_tilt 2.0.9
%global version_rubygem_tilt 2.0.10
# We do not use _libdir macro because upstream is not prepared for it.
# Pcs does not include binaries and thus it should live in /usr/lib. Tornado
@ -73,6 +83,8 @@ Source2: pcsd-bundle-config-2
Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz
Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz
Source43: https://github.com/ericvsmith/dataclasses/archive/%{dataclasses_version}/dataclasses-%{dataclasses_version}.tar.gz
Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz
Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem
Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem
@ -91,42 +103,36 @@ Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem
Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem
Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem
Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem
Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem
Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz
Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz
Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_modules_version}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz
# Patches from upstream.
# They should come before downstream patches to avoid unnecessary conflicts.
# Z-streams are exception here: they can come from upstream but should be
# applied at the end to keep z-stream changes as straightforward as possible.
Patch1: bz1676431-01-Display-status-of-disaster-recovery.patch
Patch2: bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
Patch3: bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch
Patch4: bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch
Patch5: update-a-hint-for-resource-create-master.patch
Patch6: bz1793574-01-fix-detecting-fence-history-support.patch
Patch7: bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch
Patch8: daemon-fix-cookie-options.patch
Patch9: bz1783106-01-fix-sinatra-wrapper-performance-issue.patch
Patch10: bz1783106-02-send-request-from-python-to-ruby-more-directly.patch
# Patch1: name.patch
Patch1: bz1817547-01-resource-and-operation-defaults.patch
Patch2: bz1805082-01-fix-resource-stonith-refresh-documentation.patch
Patch3: bz1843079-01-upgrade-CIB-schema-for-on-fail-demote.patch
Patch4: bz1857295-01-Fix-tag-removal-in-resource-unclone-ungroup-commands.patch
Patch5: bz1867516-01-rule-fix-mixing-and-and-or-expressions.patch
# Downstream patches do not come from upstream. They adapt pcs for specific
# RHEL needs.
Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch
Patch102: bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch
Patch103: bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch
Patch104: bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch
# git for patches
BuildRequires: git
#printf from coreutils is used in makefile
BuildRequires: coreutils
BuildRequires: execstack
# python for pcs
BuildRequires: platform-python
BuildRequires: python3-devel
BuildRequires: platform-python-setuptools
BuildRequires: python3-pycurl
BuildRequires: python3-pyparsing
# gcc for compiling custom rubygems
BuildRequires: gcc
BuildRequires: gcc-c++
@ -143,16 +149,6 @@ BuildRequires: systemd
# for tests
BuildRequires: python3-lxml
BuildRequires: python3-pyOpenSSL
BuildRequires: pacemaker-cli >= 2.0.0
# BuildRequires: fence-agents-all
BuildRequires: fence-agents-apc
BuildRequires: fence-agents-scsi
BuildRequires: fence-agents-ipmilan
# for tests
%ifarch i686 x86_64
BuildRequires: fence-virt
%endif
BuildRequires: booth-site
# pcsd fonts and font management tools for creating symlinks to fonts
BuildRequires: fontconfig
BuildRequires: liberation-sans-fonts
@ -169,6 +165,7 @@ Requires: python3-lxml
Requires: platform-python-setuptools
Requires: python3-clufter => 0.70.0
Requires: python3-pycurl
Requires: python3-pyparsing
# ruby and gems for pcsd
Requires: ruby >= 2.2.0
Requires: rubygems
@ -196,8 +193,12 @@ Requires: liberation-sans-fonts
Requires: overpass-fonts
# favicon Red Hat logo
Requires: redhat-logos
# needs logrotate for /etc/logrotate.d/pcsd
Requires: logrotate
Provides: bundled(tornado) = %{tornado_version}
Provides: bundled(dataclasses) = %{dataclasses_version}
Provides: bundled(dacite) = %{dacite_version}
Provides: bundled(backports) = %{version_rubygem_backports}
Provides: bundled(daemons) = %{version_rubygem_daemons}
Provides: bundled(ethon) = %{version_rubygem_ethon}
@ -207,8 +208,9 @@ Provides: bundled(json) = %{version_rubygem_json}
Provides: bundled(mustermann) = %{version_rubygem_mustermann}
Provides: bundled(open4) = %{version_rubygem_open4}
Provides: bundled(rack) = %{version_rubygem_rack}
Provides: bundled(rack) = %{version_rubygem_rack_protection}
Provides: bundled(rack) = %{version_rubygem_rack_test}
Provides: bundled(rack_protection) = %{version_rubygem_rack_protection}
Provides: bundled(rack_test) = %{version_rubygem_rack_test}
Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords}
Provides: bundled(sinatra) = %{version_rubygem_sinatra}
Provides: bundled(thin) = %{version_rubygem_thin}
Provides: bundled(tilt) = %{version_rubygem_tilt}
@ -258,7 +260,11 @@ update_times(){
unset file_list[0]
for fname in ${file_list[@]}; do
touch -r $reference_file $fname
# some files could be deleted by a patch therefore we test file for
# existance before touch to avoid exit with error: No such file or
# directory
# diffstat cannot create list of files without deleted files
test -e $fname && touch -r $reference_file $fname
done
}
@ -277,20 +283,13 @@ update_times_patch(){
update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}`
}
# update_times_patch %%{PATCH1}
update_times_patch %{PATCH1}
update_times_patch %{PATCH2}
update_times_patch %{PATCH3}
update_times_patch %{PATCH4}
update_times_patch %{PATCH5}
update_times_patch %{PATCH6}
update_times_patch %{PATCH7}
update_times_patch %{PATCH8}
update_times_patch %{PATCH9}
update_times_patch %{PATCH10}
update_times_patch %{PATCH101}
update_times_patch %{PATCH102}
update_times_patch %{PATCH103}
update_times_patch %{PATCH104}
cp -f %SOURCE1 pcsd/public/images
# prepare dirs/files necessary for building web ui
@ -322,6 +321,7 @@ cp -f %SOURCE92 pcsd/vendor/cache
cp -f %SOURCE93 pcsd/vendor/cache
cp -f %SOURCE94 pcsd/vendor/cache
cp -f %SOURCE95 pcsd/vendor/cache
cp -f %SOURCE96 pcsd/vendor/cache
# 3) dir for python bundles
@ -342,6 +342,20 @@ update_times %SOURCE42 `find %{bundled_src_dir}/tornado -follow`
cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE
cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst
# 6) sources for python dataclasses
tar -xzf %SOURCE43 -C %{bundled_src_dir}
mv %{bundled_src_dir}/dataclasses-%{dataclasses_version} %{bundled_src_dir}/dataclasses
update_times %SOURCE43 `find %{bundled_src_dir}/dataclasses -follow`
cp %{bundled_src_dir}/dataclasses/LICENSE.txt dataclasses_LICENSE.txt
cp %{bundled_src_dir}/dataclasses/README.rst dataclasses_README.rst
# 7) sources for python dacite
tar -xzf %SOURCE44 -C %{bundled_src_dir}
mv %{bundled_src_dir}/dacite-%{dacite_version} %{bundled_src_dir}/dacite
update_times %SOURCE44 `find %{bundled_src_dir}/dacite -follow`
cp %{bundled_src_dir}/dacite/LICENSE dacite_LICENSE
cp %{bundled_src_dir}/dacite/README.md dacite_README.md
%build
%define debug_package %{nil}
@ -351,6 +365,11 @@ pwd
# build bundled rubygems (in main install it is disabled by BUILD_GEMS=false)
mkdir -p %{rubygem_bundle_dir}
# The '-g' cflags option is needed for generation of MiniDebugInfo for shared
# libraries from rubygem extensions
# Currently used rubygems with extensions: eventmachine, ffi, json, thin
# There was rpmdiff issue with missing .gnu_debugdata section
# see https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping
gem install \
--force --verbose --no-rdoc --no-ri -l --no-user-install \
-i %{rubygem_bundle_dir} \
@ -365,22 +384,31 @@ gem install \
%{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \
%{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \
%{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
%{rubygem_cache_dir}/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem \
%{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \
%{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \
%{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \
-- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \
'--with-cflags="-O2 -ffunction-sections"'
'--with-cflags="-g -O2 -ffunction-sections"'
# We can remove files required for gem compilation
rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext
rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext
# With this file there is "File is not stripped" problem during rpmdiff
# See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping
for fname in `find %{rubygem_bundle_dir}/extensions -type f -name "*.so"`; do
strip ${fname}
done
# prepare license files
# some rubygems do not have a license file (ruby2_keywords, thin)
mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt
mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE
mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE
mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE
mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU
mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING
mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE
mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS
mv %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/LICENSE json_LICENSE
mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE
mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE
mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE
mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License
mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt
mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE
mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING
# build web ui and put it to pcsd
make -C %{pcsd_public_dir}/%{ui_src_name} build
@ -398,24 +426,37 @@ make install \
BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \
BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \
BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \
BUNDLE_DACITE_SRC_DIR=`readlink -f %{bundled_src_dir}/dacite` \
BUNDLE_DATACLASSES_SRC_DIR=`readlink -f %{bundled_src_dir}/dataclasses` \
BUILD_GEMS=false \
SYSTEMCTL_OVERRIDE=true \
hdrdir="%{_includedir}" \
rubyhdrdir="%{_includedir}" \
includedir="%{_includedir}"
# With this file there is "File is not stripped" problem during rpmdiff
# See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping
for fname in `find ${RPM_BUILD_ROOT}%{pcs_libdir}/pcs/bundled/packages/tornado/ -type f -name "*.so"`; do
strip ${fname}
done
# symlink favicon into pcsd directories
ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{pcs_libdir}/%{pcsd_public_dir}/images/favicon.png
#after the ruby gem compilation we do not need ruby gems in the cache
rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir}
# We are not building debug package for pcs but we need to add MiniDebuginfo
# to the bundled shared libraries from rubygem extensions in order to satisfy
# rpmdiff's binary stripping checker.
# Therefore we call find-debuginfo.sh script manually in order to strip
# binaries and add MiniDebugInfo with .gnu_debugdata section
/usr/lib/rpm/find-debuginfo.sh -j2 -m -i -S debugsourcefiles.list
# find-debuginfo.sh generated some files into /usr/lib/debug and
# /usr/src/debug/ that we don't want in the package
rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/debug
rm -rf $RPM_BUILD_ROOT%{_prefix}/src/debug
# We can remove files required for gem compilation
rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext
rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext
rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext
rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext
%check
# In the building environment LC_CTYPE is set to C which causes tests to fail
# due to python prints a warning about it to stderr. The following environment
@ -438,7 +479,7 @@ run_all_tests(){
# TODO: Investigate the issue
BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \
%{__python3} pcs_test/suite.py -v --vanilla --all-but \
%{__python3} pcs_test/suite.py --tier0 -v --vanilla --all-but \
pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \
pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \
pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \
@ -504,8 +545,29 @@ remove_all_tests
%doc CHANGELOG.md
%doc README.md
%doc tornado_README.rst
%doc dacite_README.md
%doc dataclasses_README.rst
%license tornado_LICENSE
%license dacite_LICENSE
%license dataclasses_LICENSE.txt
%license COPYING
# rugygem licenses
%license backports_LICENSE.txt
%license daemons_LICENSE
%license ethon_LICENSE
%license eventmachine_LICENSE
%license eventmachine_GNU
%license ffi_COPYING
%license ffi_LICENSE
%license ffi_LICENSE.SPECS
%license json_LICENSE
%license mustermann_LICENSE
%license open4_LICENSE
%license rack_MIT-LICENSE
%license rack-protection_License
%license rack-test_MIT-LICENSE.txt
%license sinatra_LICENSE
%license tilt_COPYING
%{python3_sitelib}/pcs
%{python3_sitelib}/pcs-%{version}-py3.*.egg-info
%{_sbindir}/pcs
@ -514,11 +576,14 @@ remove_all_tests
%{pcs_libdir}/pcsd/*
%{pcs_libdir}/pcsd/.bundle/config
%{pcs_libdir}/pcs/bundled/packages/tornado*
%{pcs_libdir}/pcs/bundled/packages/dacite*
%{pcs_libdir}/pcs/bundled/packages/dataclasses*
%{pcs_libdir}/pcs/bundled/packages/__pycache__/dataclasses.cpython-36.pyc
%{_unitdir}/pcsd.service
%{_unitdir}/pcsd-ruby.service
%{_datadir}/bash-completion/completions/pcs
%{_sharedstatedir}/pcsd
%{_sysconfdir}/pam.d/pcsd
%config(noreplace) %{_sysconfdir}/pam.d/pcsd
%dir %{_var}/log/pcsd
%config(noreplace) %{_sysconfdir}/logrotate.d/pcsd
%config(noreplace) %{_sysconfdir}/sysconfig/pcsd
@ -558,12 +623,37 @@ remove_all_tests
%license pyagentx_LICENSE.txt
%changelog
* Wed May 27 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6.el8_2.1
- Fixed running pcs status on remote nodes
- Fixed ruby daemon closing connection after 30s
- Fixed inability to create colocation constraint in webUI
- Updated bundled rubygem-json
- Resolves: rhbz#1832914 rhbz#1838084 rhbz#1840154 rhbz#1840158
* Tue Aug 11 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-4
- Fixed invalid CIB error caused by resource and operation defaults with mixed and-or rules
- Updated pcs-web-ui
- Resolves: rhbz#1867516
* Thu Jul 16 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-3
- Added Upgrade CIB if user specifies on-fail=demote
- Fixed rpmdiff issue with binary stripping checker
- Fixed removing non-empty tag by removing tagged resource group or clone
- Resolves: rhbz#1843079 rhbz#1857295
* Thu Jun 25 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-2
- Added resource and operation defaults that apply to specific resource/operation types
- Added Requires/BuildRequires: python3-pyparsing
- Added Requires: logrotate
- Fixed resource and stonith documentation
- Fixed rubygem licenses
- Fixed update_times()
- Updated rubygem rack to version 2.2.3
- Removed BuildRequires execstack (it is not needed)
- Resolves: rhbz#1805082 rhbz#1817547
* Thu Jun 11 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-1
- Rebased to latest upstream sources (see CHANGELOG.md)
- Added python bundled dependencies: dacite, dataclasses
- Added new bundled rubygem ruby2_keywords
- Updated rubygem bundled packages: backports, ethon, ffi, json, mustermann, rack, rack_protection, rack_test, sinatra, tilt
- Updated pcs-web-ui
- Updated test run, only tier0 tests are running during build
- Removed BuildRequires needed for tier1 tests which were removed for build (pacemaker-cli, fence_agents-*, fence_virt, booth-site)
- Resolves: rhbz#1387358 rhbz#1684676 rhbz#1722970 rhbz#1778672 rhbz#1782553 rhbz#1790460 rhbz#1805082 rhbz#1810017 rhbz#1817547 rhbz#1830552 rhbz#1832973 rhbz#1833114 rhbz#1833506 rhbz#1838853 rhbz#1839637
* Fri Mar 20 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6
- Fixed communication between python and ruby daemons