import pcs-0.10.4-6.el8_2.1

This commit is contained in:
CentOS Sources 2020-06-09 17:54:56 -04:00 committed by Andrew Lukoshko
parent 400a5be185
commit 3043f1d354
6 changed files with 403 additions and 4 deletions

2
.gitignore vendored
View File

@ -4,7 +4,7 @@ SOURCES/daemons-1.3.1.gem
SOURCES/ethon-0.11.0.gem SOURCES/ethon-0.11.0.gem
SOURCES/eventmachine-1.2.7.gem SOURCES/eventmachine-1.2.7.gem
SOURCES/ffi-1.9.25.gem SOURCES/ffi-1.9.25.gem
SOURCES/json-2.1.0.gem SOURCES/json-2.3.0.gem
SOURCES/mustermann-1.0.3.gem SOURCES/mustermann-1.0.3.gem
SOURCES/open4-1.3.4-1.gem SOURCES/open4-1.3.4-1.gem
SOURCES/pcs-0.10.4.tar.gz SOURCES/pcs-0.10.4.tar.gz

View File

@ -4,7 +4,7 @@ e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem
3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem 3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem
7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem 7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem
86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem 86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem
8b9e81a2a6ff57f97bec1f65940c61cc6b6d81be SOURCES/json-2.1.0.gem 0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem
2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem 2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem
41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem
d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz

View File

@ -0,0 +1,322 @@
From d88962d655257940a678724cc8d7bc1008ed3a46 Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Tue, 5 May 2020 11:02:36 +0200
Subject: [PATCH 1/3] fix running 'pcs status' on remote nodes
---
pcs/lib/commands/status.py | 24 +++-
pcs_test/tier0/lib/commands/test_status.py | 122 +++++++++++++++++++++
2 files changed, 141 insertions(+), 5 deletions(-)
diff --git a/pcs/lib/commands/status.py b/pcs/lib/commands/status.py
index 26332a65..84e3e046 100644
--- a/pcs/lib/commands/status.py
+++ b/pcs/lib/commands/status.py
@@ -1,3 +1,4 @@
+import os.path
from typing import (
Iterable,
List,
@@ -6,6 +7,7 @@ from typing import (
)
from xml.etree.ElementTree import Element
+from pcs import settings
from pcs.common import file_type_codes
from pcs.common.node_communicator import Communicator
from pcs.common.reports import (
@@ -17,7 +19,7 @@ from pcs.common.tools import (
indent,
)
from pcs.lib import reports
-from pcs.lib.cib import stonith
+from pcs.lib.cib import nvpair, stonith
from pcs.lib.cib.tools import get_crm_config, get_resources
from pcs.lib.communication.nodes import CheckReachability
from pcs.lib.communication.tools import run as run_communication
@@ -57,6 +59,7 @@ def full_cluster_status_plaintext(
"""
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
+ # pylint: disable=too-many-statements
# validation
if not env.is_cib_live and env.is_corosync_conf_live:
@@ -84,7 +87,11 @@ def full_cluster_status_plaintext(
status_text, warning_list = get_cluster_status_text(
runner, hide_inactive_resources, verbose
)
- corosync_conf = env.get_corosync_conf()
+ corosync_conf = None
+ # If we are live on a remote node, we have no corosync.conf.
+ # TODO Use the new file framework so the path is not exposed.
+ if not live or os.path.exists(settings.corosync_conf_file):
+ corosync_conf = env.get_corosync_conf()
cib = env.get_cib()
if verbose:
ticket_status_text, ticket_status_stderr, ticket_status_retval = (
@@ -97,7 +104,7 @@ def full_cluster_status_plaintext(
except LibraryError:
pass
local_services_status = _get_local_services_status(runner)
- if verbose:
+ if verbose and corosync_conf:
node_name_list, node_names_report_list = get_existing_nodes_names(
corosync_conf
)
@@ -117,8 +124,15 @@ def full_cluster_status_plaintext(
if report_processor.has_errors:
raise LibraryError()
+ cluster_name = (
+ corosync_conf.get_cluster_name()
+ if corosync_conf
+ else nvpair.get_value(
+ "cluster_property_set", get_crm_config(cib), "cluster-name", ""
+ )
+ )
parts = []
- parts.append(f"Cluster name: {corosync_conf.get_cluster_name()}")
+ parts.append(f"Cluster name: {cluster_name}")
if warning_list:
parts.extend(["", "WARNINGS:"] + warning_list + [""])
parts.append(status_text)
@@ -136,7 +150,7 @@ def full_cluster_status_plaintext(
else:
parts.extend(indent(ticket_status_text.splitlines()))
if live:
- if verbose:
+ if verbose and corosync_conf:
parts.extend(["", "PCSD Status:"])
parts.extend(indent(
_format_node_reachability(node_name_list, node_reachability)
diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py
index 06878668..7d54d579 100644
--- a/pcs_test/tier0/lib/commands/test_status.py
+++ b/pcs_test/tier0/lib/commands/test_status.py
@@ -1,6 +1,7 @@
from textwrap import dedent
from unittest import TestCase
+from pcs import settings
from pcs.common import file_type_codes, report_codes
from pcs.lib.commands import status
from pcs_test.tools import fixture
@@ -9,16 +10,33 @@ from pcs_test.tools.misc import read_test_resource as rc_read
class FullClusterStatusPlaintext(TestCase):
+ # pylint: disable=too-many-public-methods
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
self.node_name_list = ["node1", "node2", "node3"]
self.maxDiff = None
+ @staticmethod
+ def _fixture_xml_clustername(name):
+ return """
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair
+ id="cib-bootstrap-options-cluster-name"
+ name="cluster-name" value="{name}"
+ />
+ </cluster_property_set>
+ </crm_config>
+ """.format(
+ name=name
+ )
+
def _fixture_config_live_minimal(self):
(self.config
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load(resources="""
<resources>
@@ -30,6 +48,25 @@ class FullClusterStatusPlaintext(TestCase):
)
)
+ def _fixture_config_live_remote_minimal(self):
+ (
+ self.config.runner.pcmk.load_state_plaintext(
+ stdout="crm_mon cluster status",
+ )
+ .fs.exists(settings.corosync_conf_file, return_value=False)
+ .runner.cib.load(
+ optional_in_conf=self._fixture_xml_clustername("test-cib"),
+ resources="""
+ <resources>
+ <primitive id="S" class="stonith" type="fence_dummy" />
+ </resources>
+ """,
+ )
+ .runner.systemctl.is_active(
+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd"
+ )
+ )
+
def _fixture_config_local_daemons(
self,
corosync_enabled=True, corosync_active=True,
@@ -150,6 +187,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load_content("invalid corosync conf")
)
self.env_assist.assert_raise_library_error(
@@ -170,6 +208,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load_content(
"some stdout", stderr="cib load error", returncode=1
@@ -214,6 +253,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -254,6 +294,82 @@ class FullClusterStatusPlaintext(TestCase):
)
)
+ def test_success_live_remote_node(self):
+ self._fixture_config_live_remote_minimal()
+ self._fixture_config_local_daemons(
+ corosync_enabled=False,
+ corosync_active=False,
+ pacemaker_enabled=False,
+ pacemaker_active=False,
+ pacemaker_remote_enabled=True,
+ pacemaker_remote_active=True,
+ )
+ self.assertEqual(
+ status.full_cluster_status_plaintext(self.env_assist.get_env()),
+ dedent(
+ """\
+ Cluster name: test-cib
+ crm_mon cluster status
+
+ Daemon Status:
+ corosync: inactive/disabled
+ pacemaker: inactive/disabled
+ pacemaker_remote: active/enabled
+ pcsd: active/enabled"""
+ ),
+ )
+
+ def test_success_live_remote_node_verbose(self):
+ (
+ self.config.runner.pcmk.can_fence_history_status(
+ stderr="not supported"
+ )
+ .runner.pcmk.load_state_plaintext(
+ verbose=True, stdout="crm_mon cluster status",
+ )
+ .fs.exists(settings.corosync_conf_file, return_value=False)
+ .runner.cib.load(
+ optional_in_conf=self._fixture_xml_clustername("test-cib"),
+ resources="""
+ <resources>
+ <primitive id="S" class="stonith" type="fence_dummy" />
+ </resources>
+ """,
+ )
+ .runner.pcmk.load_ticket_state_plaintext(stdout="ticket status")
+ .runner.systemctl.is_active(
+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd"
+ )
+ )
+ self._fixture_config_local_daemons(
+ corosync_enabled=False,
+ corosync_active=False,
+ pacemaker_enabled=False,
+ pacemaker_active=False,
+ pacemaker_remote_enabled=True,
+ pacemaker_remote_active=True,
+ )
+
+ self.assertEqual(
+ status.full_cluster_status_plaintext(
+ self.env_assist.get_env(), verbose=True
+ ),
+ dedent(
+ """\
+ Cluster name: test-cib
+ crm_mon cluster status
+
+ Tickets:
+ ticket status
+
+ Daemon Status:
+ corosync: inactive/disabled
+ pacemaker: inactive/disabled
+ pacemaker_remote: active/enabled
+ pcsd: active/enabled"""
+ ),
+ )
+
def test_succes_mocked(self):
(self.config
.env.set_corosync_conf_data(rc_read("corosync.conf"))
@@ -316,6 +432,7 @@ class FullClusterStatusPlaintext(TestCase):
fence_history=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -365,6 +482,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
@@ -421,6 +539,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load()
.runner.systemctl.is_active(
@@ -453,6 +572,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load()
.runner.systemctl.is_active(
@@ -481,6 +601,7 @@ class FullClusterStatusPlaintext(TestCase):
.runner.pcmk.load_state_plaintext(
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load()
.runner.cib.load(resources="""
<resources>
@@ -539,6 +660,7 @@ class FullClusterStatusPlaintext(TestCase):
verbose=True,
stdout="crm_mon cluster status",
)
+ .fs.exists(settings.corosync_conf_file, return_value=True)
.corosync_conf.load(node_name_list=self.node_name_list)
.runner.cib.load(resources="""
<resources>
--
2.25.4

View File

@ -0,0 +1,39 @@
From 0cb9637f1962ad6be9e977b4b971b823af407c2d Mon Sep 17 00:00:00 2001
From: Tomas Jelinek <tojeline@redhat.com>
Date: Thu, 14 May 2020 16:42:32 +0200
Subject: [PATCH 3/3] fix ruby daemon closing connection after 30s
---
pcs/daemon/ruby_pcsd.py | 2 +-
pcsd/rserver.rb | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py
index 53c53eaf..b640752d 100644
--- a/pcs/daemon/ruby_pcsd.py
+++ b/pcs/daemon/ruby_pcsd.py
@@ -127,7 +127,7 @@ class Wrapper:
def prepare_curl_callback(self, curl):
curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket)
- curl.setopt(pycurl.TIMEOUT, 70)
+ curl.setopt(pycurl.TIMEOUT, 0)
async def send_to_ruby(self, request: RubyDaemonRequest):
try:
diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
index 4b58f252..08eceb79 100644
--- a/pcsd/rserver.rb
+++ b/pcsd/rserver.rb
@@ -63,7 +63,7 @@ use TornadoCommunicationMiddleware
require 'pcsd'
::Rack::Handler.get('thin').run(Sinatra::Application, {
- :Host => PCSD_RUBY_SOCKET,
+ :Host => PCSD_RUBY_SOCKET, :timeout => 0
}) do |server|
puts server.class
server.threaded = true
--
2.25.4

View File

@ -0,0 +1,25 @@
From 5175507f22adffcb443f9f89bda9705599dd89e9 Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Thu, 7 May 2020 17:11:12 +0200
Subject: [PATCH 2/3] fix inability to create colocation const. (web ui)
---
pcsd/pcs.rb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 9a0efb46..59492d20 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -187,7 +187,7 @@ def add_colocation_constraint(
score = "INFINITY"
end
command = [
- PCS, "constraint", "colocation", "add", resourceA, resourceB, score
+ PCS, "constraint", "colocation", "add", resourceA, "with", resourceB, score
]
command << '--force' if force
stdout, stderr, retval = run_cmd(auth_user, *command)
--
2.25.4

View File

@ -1,6 +1,6 @@
Name: pcs Name: pcs
Version: 0.10.4 Version: 0.10.4
Release: 6%{?dist} Release: 6%{?dist}.1
# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses
# GPLv2: pcs # GPLv2: pcs
# ASL 2.0: tornado # ASL 2.0: tornado
@ -30,7 +30,7 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
%global version_rubygem_ethon 0.11.0 %global version_rubygem_ethon 0.11.0
%global version_rubygem_eventmachine 1.2.7 %global version_rubygem_eventmachine 1.2.7
%global version_rubygem_ffi 1.9.25 %global version_rubygem_ffi 1.9.25
%global version_rubygem_json 2.1.0 %global version_rubygem_json 2.3.0
%global version_rubygem_mustermann 1.0.3 %global version_rubygem_mustermann 1.0.3
%global version_rubygem_open4 1.3.4 %global version_rubygem_open4 1.3.4
%global version_rubygem_rack 2.0.6 %global version_rubygem_rack 2.0.6
@ -113,6 +113,9 @@ Patch10: bz1783106-02-send-request-from-python-to-ruby-more-directly.patch
# Downstream patches do not come from upstream. They adapt pcs for specific # Downstream patches do not come from upstream. They adapt pcs for specific
# RHEL needs. # RHEL needs.
Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch
Patch102: bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch
Patch103: bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch
Patch104: bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch
# git for patches # git for patches
BuildRequires: git BuildRequires: git
@ -285,6 +288,9 @@ update_times_patch %{PATCH8}
update_times_patch %{PATCH9} update_times_patch %{PATCH9}
update_times_patch %{PATCH10} update_times_patch %{PATCH10}
update_times_patch %{PATCH101} update_times_patch %{PATCH101}
update_times_patch %{PATCH102}
update_times_patch %{PATCH103}
update_times_patch %{PATCH104}
cp -f %SOURCE1 pcsd/public/images cp -f %SOURCE1 pcsd/public/images
# prepare dirs/files necessary for building web ui # prepare dirs/files necessary for building web ui
@ -552,6 +558,13 @@ remove_all_tests
%license pyagentx_LICENSE.txt %license pyagentx_LICENSE.txt
%changelog %changelog
* Wed May 27 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6.el8_2.1
- Fixed running pcs status on remote nodes
- Fixed ruby daemon closing connection after 30s
- Fixed inability to create colocation constraint in webUI
- Updated bundled rubygem-json
- Resolves: rhbz#1832914 rhbz#1838084 rhbz#1840154 rhbz#1840158
* Fri Mar 20 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6 * Fri Mar 20 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6
- Fixed communication between python and ruby daemons - Fixed communication between python and ruby daemons
- Resolves: rhbz#1783106 - Resolves: rhbz#1783106