commit 2512d4a118e2934cae48314ef5f9c2c36d9e90ca Author: CentOS Sources Date: Tue Feb 16 02:41:01 2021 -0500 import resource-agents-4.1.1-68.el8_3.1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9640a61 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz +SOURCES/aliyun-cli-2.1.10.tar.gz +SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz +SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz +SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz +SOURCES/colorama-0.3.3.tar.gz +SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz +SOURCES/httplib2-0.18.1.tar.gz +SOURCES/pycryptodome-3.6.4.tar.gz +SOURCES/pyroute2-0.4.13.tar.gz diff --git a/.resource-agents.metadata b/.resource-agents.metadata new file mode 100644 index 0000000..546b20f --- /dev/null +++ b/.resource-agents.metadata @@ -0,0 +1,10 @@ +0358e1cb7fe86b2105bd2646cbe86f3c0273844a SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz +306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz +0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz +c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz +f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz +0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz +876e2b0c0e3031c6e6101745acd08e4e9f53d6a9 SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz +c5d22ce6660999633154927684eb9b799123e569 SOURCES/httplib2-0.18.1.tar.gz +326a73f58a62ebee00c11a12cfdd838b196e0e8e SOURCES/pycryptodome-3.6.4.tar.gz +147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz diff --git a/SOURCES/1-configure-add-python-path-detection.patch b/SOURCES/1-configure-add-python-path-detection.patch new file mode 100644 index 0000000..f1ed530 --- /dev/null +++ b/SOURCES/1-configure-add-python-path-detection.patch @@ -0,0 +1,29 @@ +From 266e10a719a396a3a522e4b0ce4271a372e4f6f1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Jul 2018 08:59:45 +0200 +Subject: [PATCH 1/3] configure: add Python path detection + +--- + configure.ac | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/configure.ac b/configure.ac +index 90ed2453..bdf057d3 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -501,6 +501,12 @@ AC_SUBST(PING) + AC_SUBST(RM) + AC_SUBST(TEST) + ++AM_PATH_PYTHON ++if test -z "$PYTHON"; then ++ echo "*** Essential program python not found" 1>&2 ++ exit 1 ++fi ++ + AC_PATH_PROGS(ROUTE, route) + AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) + +-- +2.17.1 + diff --git a/SOURCES/10-gcloud-support-info.patch b/SOURCES/10-gcloud-support-info.patch new file mode 100644 index 0000000..ef96ca5 --- /dev/null +++ b/SOURCES/10-gcloud-support-info.patch @@ -0,0 +1,25 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py +--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200 +@@ -900,6 +900,9 @@ + return """\ + For detailed information on this command and its flags, run: + {command_path} --help ++ ++WARNING: {command_path} is only supported for "{command_path} init" and for use ++with the agents in resource-agents. + """.format(command_path=' '.join(command.GetPath())) + + +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py +--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200 +@@ -84,7 +84,7 @@ + + pkg_root = os.path.dirname(os.path.dirname(surface.__file__)) + loader = cli.CLILoader( +- name='gcloud', ++ name='gcloud-ra', + command_root_directory=os.path.join(pkg_root, 'surface'), + allow_non_existing_modules=True, + version_func=VersionFunc, diff --git a/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch new file mode 100644 index 0000000..fd891e0 --- /dev/null +++ b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch @@ -0,0 +1,24 @@ +From 059effc058758c1294d80f03741bf5c078f1498d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Jul 2018 13:22:56 +0200 +Subject: [PATCH 2/3] CI: skip Python agents in shellcheck + +--- + ci/build.sh | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ci/build.sh b/ci/build.sh +index 608387ad..c331e9ab 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -58,6 +58,7 @@ check_all_executables() { + echo "Checking executables and .sh files..." + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue ++ file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue + head=$(head -n1 "$script") + [[ "$head" =~ .*ruby.* ]] && continue + [[ "$head" =~ .*zsh.* ]] && continue +-- +2.17.1 + diff --git a/SOURCES/3-gcp-vpc-move-vip.patch b/SOURCES/3-gcp-vpc-move-vip.patch new file mode 100644 index 0000000..75beb19 --- /dev/null +++ b/SOURCES/3-gcp-vpc-move-vip.patch @@ -0,0 +1,646 @@ +From 92da4155d881e9ac2dce3a51c6953817349d164a Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Mon, 25 Jun 2018 11:03:51 -0300 +Subject: [PATCH 1/4] gcp-vpc-move-vip.in: manage ip alias + +Add a resource agent to manage ip alias in the cluster. + +start: + Check if any machine in hostlist has the alias_ip assigned and + disassociate it. + Assign alias_ip to the current machine. + +stop: + Disassociate the alias_ip from the current machine. + +status/monitor: + Check if alias_ip is assigned with the current machine. + +--- + +This is a port to the following bash script to python: +https://storage.googleapis.com/sapdeploy/pacemaker-gcp/alias + +The problem with the bash script is the use of gcloud whose command line +API is not stable. + +ocf-tester.in results: + + > sudo ./tools/ocf-tester.in -o alias_ip='10.128.1.0/32' -o stackdriver_logging=yes -n gcp-vpc-move-vip.in heartbeat/gcp-vpc-move-vip.in + Beginning tests for heartbeat/gcp-vpc-move-vip.in... + ./tools/ocf-tester.in: line 226: cd: @datadir@/resource-agents: No such file or directory + close failed in file object destructor: + sys.excepthook is missing + lost sys.stderr + * rc=1: Your agent produces meta-data which does not conform to ra-api-1.dtd + Tests failed: heartbeat/gcp-vpc-move-vip.in failed 1 tests + +The only test faillig is the meta-data, but all the agents that I tried +also fails on this. If this is a concern, could you please point me out +to a test which succeeds so I can check what I am doing differently? + +This commit can also be viewed at: + https://github.com/collabora-gce/resource-agents/tree/alias + +Thanks +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-vpc-move-vip.in | 299 ++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 302 insertions(+) + create mode 100755 heartbeat/gcp-vpc-move-vip.in + +diff --git a/configure.ac b/configure.ac +index bdf057d33..3d8f9ca74 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -959,6 +959,7 @@ AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) ++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) + AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) + AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) + AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index c59126d13..e9eaf369f 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -114,6 +114,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_galera.7 \ + ocf_heartbeat_garbd.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ ++ ocf_heartbeat_gcp-vpc-move-vip.7 \ + ocf_heartbeat_iSCSILogicalUnit.7 \ + ocf_heartbeat_iSCSITarget.7 \ + ocf_heartbeat_iface-bridge.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 4f5059e27..36b271956 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -111,6 +111,7 @@ ocf_SCRIPTS = AoEtarget \ + galera \ + garbd \ + gcp-vpc-move-ip \ ++ gcp-vpc-move-vip \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +new file mode 100755 +index 000000000..4954e11df +--- /dev/null ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -0,0 +1,299 @@ ++#!/usr/bin/env python ++# --------------------------------------------------------------------- ++# Copyright 2016 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++# --------------------------------------------------------------------- ++# Description: Google Cloud Platform - Floating IP Address (Alias) ++# --------------------------------------------------------------------- ++ ++import json ++import logging ++import os ++import sys ++import time ++ ++import googleapiclient.discovery ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++CONN = None ++THIS_VM = None ++OCF_SUCCESS = 0 ++OCF_ERR_GENERIC = 1 ++OCF_ERR_CONFIGURED = 6 ++OCF_NOT_RUNNING = 7 ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = \ ++''' ++ ++ ++ 1.0 ++ Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance ++ Floating IP Address on Google Cloud Platform ++ ++ ++ List of hosts in the cluster ++ Host list ++ ++ ++ ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support ++ ++ ++ ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ ++ ++ ++ Subnet name for the Alias IP2 ++ Subnet name for the Alias IP ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def get_instance(project, zone, instance): ++ request = CONN.instances().get( ++ project=project, zone=zone, instance=instance) ++ return request.execute() ++ ++ ++def get_network_ifaces(project, zone, instance): ++ return get_instance(project, zone, instance)['networkInterfaces'] ++ ++ ++def wait_for_operation(project, zone, operation): ++ while True: ++ result = CONN.zoneOperations().get( ++ project=project, ++ zone=zone, ++ operation=operation['name']).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return ++ time.sleep(1) ++ ++ ++def set_alias(project, zone, instance, alias, alias_range_name=None): ++ fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++ body = { ++ 'aliasIpRanges': [], ++ 'fingerprint': fingerprint ++ } ++ if alias: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ body['aliasIpRanges'].append(obj) ++ ++ request = CONN.instances().updateNetworkInterface( ++ instance=instance, networkInterface='nic0', project=project, zone=zone, ++ body=body) ++ operation = request.execute() ++ wait_for_operation(project, zone, operation) ++ ++ ++def get_alias(project, zone, instance): ++ iface = get_network_ifaces(project, zone, instance) ++ try: ++ return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ except KeyError: ++ return '' ++ ++ ++def get_localhost_alias(): ++ net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) ++ net_iface = json.loads(net_iface.decode('utf-8')) ++ try: ++ return net_iface[0]['ipAliases'][0] ++ except (KeyError, IndexError): ++ return '' ++ ++ ++def get_zone(project, instance): ++ request = CONN.instances().aggregatedList(project=project) ++ while request is not None: ++ response = request.execute() ++ zones = response.get('items', {}) ++ for zone in zones.values(): ++ for inst in zone.get('instances', []): ++ if inst['name'] == instance: ++ return inst['zone'].split("/")[-1] ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ raise Exception("Unable to find instance %s" % (instance)) ++ ++ ++def gcp_alias_start(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ my_zone = get_metadata('instance/zone').split('/')[-1] ++ project = get_metadata('project/project-id') ++ ++ # If I already have the IP, exit. If it has an alias IP that isn't the VIP, ++ # then remove it ++ if my_alias == alias: ++ logging.info( ++ '%s already has %s attached. No action required' % (THIS_VM, alias)) ++ sys.exit(OCF_SUCCESS) ++ elif my_alias: ++ logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ set_alias(project, my_zone, THIS_VM, '') ++ ++ # Loops through all hosts & remove the alias IP from the host that has it ++ hostlist = os.environ.get('OCF_RESKEY_hostlist', '') ++ hostlist.replace(THIS_VM, '') ++ for host in hostlist.split(): ++ host_zone = get_zone(project, host) ++ host_alias = get_alias(project, host_zone, host) ++ if alias == host_alias: ++ logging.info( ++ '%s is attached to %s - Removing all alias IP addresses from %s' % ++ (alias, host, host)) ++ set_alias(project, host_zone, host, '') ++ break ++ ++ # add alias IP to localhost ++ set_alias( ++ project, my_zone, THIS_VM, alias, ++ os.environ.get('OCF_RESKEY_alias_range_name')) ++ ++ # Check the IP has been added ++ my_alias = get_localhost_alias() ++ if alias == my_alias: ++ logging.info('Finished adding %s to %s' % (alias, THIS_VM)) ++ elif my_alias: ++ logging.error( ++ 'Failed to add IP. %s has an IP attached but it isn\'t %s' % ++ (THIS_VM, alias)) ++ sys.exit(OCF_ERR_GENERIC) ++ else: ++ logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ sys.exit(OCF_ERR_GENERIC) ++ ++ ++def gcp_alias_stop(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ my_zone = get_metadata('instance/zone').split('/')[-1] ++ project = get_metadata('project/project-id') ++ ++ if my_alias == alias: ++ logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ set_alias(project, my_zone, THIS_VM, '') ++ ++ ++def gcp_alias_status(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ if alias == my_alias: ++ logging.info('%s has the correct IP address attached' % THIS_VM) ++ else: ++ sys.exit(OCF_NOT_RUNNING) ++ ++ ++def configure(): ++ global CONN ++ global THIS_VM ++ ++ # Populate global vars ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ THIS_VM = get_metadata('instance/name') ++ ++ # Prepare logging ++ logging.basicConfig( ++ format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=THIS_VM) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:alias "%(message)s"') ++ handler.setFormatter(formatter) ++ root_logger = logging.getLogger() ++ root_logger.addHandler(handler) ++ except ImportError: ++ logging.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def main(): ++ configure() ++ ++ alias = os.environ.get('OCF_RESKEY_alias_ip') ++ if 'start' in sys.argv[1]: ++ gcp_alias_start(alias) ++ elif 'stop' in sys.argv[1]: ++ gcp_alias_stop(alias) ++ elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: ++ gcp_alias_status(alias) ++ elif 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ else: ++ logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) ++ ++ ++if __name__ == "__main__": ++ main() + +From 0e6ba4894a748664ac1d8ff5b9e8c271f0b04d93 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 12 Jul 2018 09:01:22 -0300 +Subject: [PATCH 2/4] gcp-vpc-move-vip.in: minor fixes + +- Get hostlist from the project if the parameter is not given +- Verify if alias is present out of each action function +- Don't call configure if 'meta-data' action is given +--- + heartbeat/gcp-vpc-move-vip.in | 40 ++++++++++++++++++++++++++++------------ + 1 file changed, 28 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 4954e11df..f3d117bda 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -50,7 +50,7 @@ METADATA = \ + Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance + Floating IP Address on Google Cloud Platform + +- ++ + List of hosts in the cluster + Host list + +@@ -177,9 +177,22 @@ def get_zone(project, instance): + raise Exception("Unable to find instance %s" % (instance)) + + ++def get_instances_list(project, exclude): ++ hostlist = [] ++ request = CONN.instances().aggregatedList(project=project) ++ while request is not None: ++ response = request.execute() ++ zones = response.get('items', {}) ++ for zone in zones.values(): ++ for inst in zone.get('instances', []): ++ if inst['name'] != exclude: ++ hostlist.append(inst['name']) ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ return hostlist ++ ++ + def gcp_alias_start(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') +@@ -196,8 +209,11 @@ def gcp_alias_start(alias): + + # Loops through all hosts & remove the alias IP from the host that has it + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') +- hostlist.replace(THIS_VM, '') +- for host in hostlist.split(): ++ if hostlist: ++ hostlist.replace(THIS_VM, '').split() ++ else: ++ hostlist = get_instances_list(project, THIS_VM) ++ for host in hostlist: + host_zone = get_zone(project, host) + host_alias = get_alias(project, host_zone, host) + if alias == host_alias: +@@ -227,8 +243,6 @@ def gcp_alias_start(alias): + + + def gcp_alias_stop(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') +@@ -239,8 +253,6 @@ def gcp_alias_stop(alias): + + + def gcp_alias_status(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + if alias == my_alias: + logging.info('%s has the correct IP address attached' % THIS_VM) +@@ -280,17 +292,21 @@ def configure(): + + + def main(): +- configure() ++ if 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ return + + alias = os.environ.get('OCF_RESKEY_alias_ip') ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ configure() + if 'start' in sys.argv[1]: + gcp_alias_start(alias) + elif 'stop' in sys.argv[1]: + gcp_alias_stop(alias) + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + gcp_alias_status(alias) +- elif 'meta-data' in sys.argv[1]: +- print(METADATA) + else: + logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) + + +From 1f50c4bc80f23f561a8630c12076707366525899 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 12 Jul 2018 13:02:16 -0300 +Subject: [PATCH 3/4] gcp-vcp-move-vip.in: implement validate-all + +Also fix some return errors +--- + heartbeat/gcp-vpc-move-vip.in | 47 +++++++++++++++++++++++++++++++------------ + 1 file changed, 34 insertions(+), 13 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index f3d117bda..a90c2de8d 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -22,7 +22,10 @@ import os + import sys + import time + +-import googleapiclient.discovery ++try: ++ import googleapiclient.discovery ++except ImportError: ++ pass + + if sys.version_info >= (3, 0): + # Python 3 imports. +@@ -36,6 +39,7 @@ else: + + CONN = None + THIS_VM = None ++ALIAS = None + OCF_SUCCESS = 0 + OCF_ERR_GENERIC = 1 + OCF_ERR_CONFIGURED = 6 +@@ -210,7 +214,7 @@ def gcp_alias_start(alias): + # Loops through all hosts & remove the alias IP from the host that has it + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: +- hostlist.replace(THIS_VM, '').split() ++ hostlist = hostlist.replace(THIS_VM, '').split() + else: + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: +@@ -260,14 +264,31 @@ def gcp_alias_status(alias): + sys.exit(OCF_NOT_RUNNING) + + +-def configure(): ++def validate(): ++ global ALIAS + global CONN + global THIS_VM + + # Populate global vars +- CONN = googleapiclient.discovery.build('compute', 'v1') +- THIS_VM = get_metadata('instance/name') ++ try: ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logging.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ try: ++ THIS_VM = get_metadata('instance/name') ++ except Exception as e: ++ logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) + ++ ALIAS = os.environ.get('OCF_RESKEY_alias_ip') ++ if not ALIAS: ++ logging.error('Missing alias_ip parameter') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ++def configure_logs(): + # Prepare logging + logging.basicConfig( + format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) +@@ -296,19 +317,19 @@ def main(): + print(METADATA) + return + +- alias = os.environ.get('OCF_RESKEY_alias_ip') +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) ++ validate() ++ if 'validate-all' in sys.argv[1]: ++ return + +- configure() ++ configure_logs() + if 'start' in sys.argv[1]: +- gcp_alias_start(alias) ++ gcp_alias_start(ALIAS) + elif 'stop' in sys.argv[1]: +- gcp_alias_stop(alias) ++ gcp_alias_stop(ALIAS) + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: +- gcp_alias_status(alias) ++ gcp_alias_status(ALIAS) + else: +- logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) ++ logging.error('no such function %s' % str(sys.argv[1])) + + + if __name__ == "__main__": + +From f11cb236bb348ebee74e962d0ded1cb2fc97bd5f Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 13 Jul 2018 08:01:02 -0300 +Subject: [PATCH 4/4] gcp-vpc-move-vip.in: minor fixes + +--- + heartbeat/gcp-vpc-move-vip.in | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index a90c2de8d..9fc87242f 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!@PYTHON@ -tt + # --------------------------------------------------------------------- + # Copyright 2016 Google Inc. + # +@@ -59,7 +59,7 @@ METADATA = \ + Host list + + +- ++ + If enabled (set to true), IP failover logs will be posted to stackdriver logging + Stackdriver-logging support + +@@ -80,6 +80,7 @@ METADATA = \ + + + ++ + + ''' + diff --git a/SOURCES/4-gcp-vpc-move-route.patch b/SOURCES/4-gcp-vpc-move-route.patch new file mode 100644 index 0000000..ccd221e --- /dev/null +++ b/SOURCES/4-gcp-vpc-move-route.patch @@ -0,0 +1,632 @@ +From 0ee4c62105ee8f90a43fe0bf8a65bc9b9da2e7e0 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Wed, 18 Jul 2018 11:54:40 -0300 +Subject: [PATCH 1/4] gcp-vpc-move-route.in: python implementation of + gcp-vpc-move-ip.in + +gcloud api is not reliable and it is slow, add a python version of +gcp-vpc-move-ip.in +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-vpc-move-route.in | 441 ++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 444 insertions(+) + create mode 100644 heartbeat/gcp-vpc-move-route.in + +diff --git a/configure.ac b/configure.ac +index 3d8f9ca74..039b4942c 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -960,6 +960,7 @@ AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) ++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) + AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) + AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) + AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index e9eaf369f..3ac0569de 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -115,6 +115,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_garbd.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ + ocf_heartbeat_gcp-vpc-move-vip.7 \ ++ ocf_heartbeat_gcp-vpc-move-route.7 \ + ocf_heartbeat_iSCSILogicalUnit.7 \ + ocf_heartbeat_iSCSITarget.7 \ + ocf_heartbeat_iface-bridge.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 36b271956..d4750bf09 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -112,6 +112,7 @@ ocf_SCRIPTS = AoEtarget \ + garbd \ + gcp-vpc-move-ip \ + gcp-vpc-move-vip \ ++ gcp-vpc-move-route \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +new file mode 100644 +index 000000000..5f4569baa +--- /dev/null ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -0,0 +1,441 @@ ++#!@PYTHON@ -tt ++# - *- coding: utf- 8 - *- ++# ++# ++# OCF resource agent to move an IP address within a VPC in GCP ++# ++# License: GNU General Public License (GPL) ++# Copyright (c) 2018 Hervé Werner (MFG Labs) ++# Copyright 2018 Google Inc. ++# Based on code from Markus Guertler (aws-vpc-move-ip) ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++ ++####################################################################### ++ ++import atexit ++import logging ++import os ++import sys ++import time ++ ++try: ++ import googleapiclient.discovery ++ import pyroute2 ++except ImportError: ++ pass ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++OCF_SUCCESS = 0 ++OCF_ERR_GENERIC = 1 ++OCF_ERR_UNIMPLEMENTED = 3 ++OCF_ERR_PERM = 4 ++OCF_ERR_CONFIGURED = 6 ++OCF_NOT_RUNNING = 7 ++GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1' ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = \ ++''' ++ ++ ++1.0 ++ ++Resource Agent that can move a floating IP addresse within a GCP VPC by changing an ++entry in the routing table. This agent also configures the floating IP locally ++on the instance OS. ++Requirements : ++- IP forwarding must be enabled on all instances in order to be able to ++terminate the route ++- The floating IP address must be choosen so that it is outside all existing ++subnets in the VPC network ++- IAM permissions ++(see https://cloud.google.com/compute/docs/access/iam-permissions) : ++1) compute.routes.delete, compute.routes.get and compute.routes.update on the ++route ++2) compute.networks.updatePolicy on the network (to add a new route) ++3) compute.networks.get on the network (to check the VPC network existence) ++4) compute.routes.list on the project (to check conflicting routes) ++ ++Move IP within a GCP VPC ++ ++ ++ ++ ++If enabled (set to true), IP failover logs will be posted to stackdriver logging ++Stackdriver-logging support ++ ++ ++ ++ ++ ++Floating IP address. Note that this IP must be chosen outside of all existing ++subnet ranges ++ ++Floating IP ++ ++ ++ ++ ++ ++Name of the VPC network ++ ++VPC network ++ ++ ++ ++ ++ ++Name of the network interface ++ ++Network interface name ++ ++ ++ ++ ++ ++Route name ++ ++Route name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++class Context(object): ++ __slots__ = 'conn', 'iface_idx', 'instance', 'instance_url', 'interface', \ ++ 'ip', 'iproute', 'project', 'route_name', 'vpc_network', \ ++ 'vpc_network_url', 'zone' ++ ++ ++def wait_for_operation(ctx, response): ++ """Blocks until operation completes. ++ Code from GitHub's GoogleCloudPlatform/python-docs-samples ++ ++ Args: ++ response: dict, a request's response ++ """ ++ def _OperationGetter(response): ++ operation = response[u'name'] ++ if response.get(u'zone'): ++ return ctx.conn.zoneOperations().get( ++ project=ctx.project, zone=ctx.zone, operation=operation) ++ else: ++ return ctx.conn.globalOperations().get( ++ project=ctx.project, operation=operation) ++ ++ while True: ++ result = _OperationGetter(response).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return result ++ ++ time.sleep(1) ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def validate(ctx): ++ if os.geteuid() != 0: ++ logging.error('You must run this agent as root') ++ sys.exit(OCF_ERR_PERM) ++ ++ try: ++ ctx.conn = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logging.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ctx.ip = os.environ.get('OCF_RESKEY_ip') ++ if not ctx.ip: ++ logging.error('Missing ip parameter') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ try: ++ ctx.instance = get_metadata('instance/name') ++ ctx.zone = get_metadata('instance/zone').split('/')[-1] ++ ctx.project = get_metadata('project/project-id') ++ except Exception as e: ++ logging.error( ++ 'Instance information not found. Is this a GCE instance ?: %s', str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) ++ ctx.vpc_network = os.environ.get('OCF_RESKEY_vpc_network', 'default') ++ ctx.vpc_network_url = '%s/projects/%s/global/networks/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network) ++ ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0') ++ ctx.route_name = os.environ.get( ++ 'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME']) ++ ctx.iproute = pyroute2.IPRoute() ++ atexit.register(ctx.iproute.close) ++ idxs = ctx.iproute.link_lookup(ifname=ctx.interface) ++ if not idxs: ++ logging.error('Network interface not found') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ctx.iface_idx = idxs[0] ++ ++ ++def check_conflicting_routes(ctx): ++ fl = '(destRange = "%s*") AND (network = "%s") AND (name != "%s")' % ( ++ ctx.ip, ctx.vpc_network_url, ctx.route_name) ++ request = ctx.conn.routes().list(project=ctx.project, filter=fl) ++ response = request.execute() ++ route_list = response.get('items', None) ++ if route_list: ++ logging.error( ++ 'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s', ++ ctx.ip, ctx.vpc_network, str(route_list)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ++def route_release(ctx): ++ request = ctx.conn.routes().delete(project=ctx.project, route=ctx.route_name) ++ wait_for_operation(ctx, request.execute()) ++ ++ ++def ip_monitor(ctx): ++ logging.info('IP monitor: checking local network configuration') ++ ++ def address_filter(addr): ++ for attr in addr['attrs']: ++ if attr[0] == 'IFA_LOCAL': ++ if attr[1] == ctx.ip: ++ return True ++ else: ++ return False ++ ++ route = ctx.iproute.get_addr( ++ index=ctx.iface_idx, match=address_filter) ++ if not route: ++ logging.warn( ++ 'The floating IP %s is not locally configured on this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_NOT_RUNNING ++ ++ logging.debug( ++ 'The floating IP %s is correctly configured on this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_SUCCESS ++ ++ ++def ip_release(ctx): ++ ctx.iproute.addr('del', index=ctx.iface_idx, address=ctx.ip, mask=32) ++ ++ ++def ip_and_route_start(ctx): ++ logging.info('Bringing up the floating IP %s', ctx.ip) ++ ++ # Add a new entry in the routing table ++ # If the route entry exists and is pointing to another instance, take it over ++ ++ # Ensure that there is no route that we are not aware of that is also handling our IP ++ check_conflicting_routes(ctx) ++ ++ # There is no replace API, We need to first delete the existing route if any ++ try: ++ request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) ++ request.execute() ++ # TODO: check specific exception for 404 ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status != 404: ++ raise ++ else: ++ route_release(ctx) ++ ++ route_body = { ++ 'name': ctx.route_name, ++ 'network': ctx.vpc_network_url, ++ 'destRange': '%s/32' % ctx.ip, ++ 'nextHopInstance': ctx.instance_url, ++ } ++ try: ++ request = ctx.conn.routes().insert(project=ctx.project, body=route_body) ++ wait_for_operation(ctx, request.execute()) ++ except googleapiclient.errors.HttpError: ++ try: ++ request = ctx.conn.networks().get( ++ project=ctx.project, network=ctx.vpc_network) ++ request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logging.error('VPC network not found') ++ sys.exit(OCF_ERR_CONFIGURED) ++ else: ++ raise ++ else: ++ raise ++ ++ # Configure the IP address locally ++ # We need to release the IP first ++ if ip_monitor(ctx) == OCF_SUCCESS: ++ ip_release(ctx) ++ ++ ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32) ++ ctx.iproute.link('set', index=ctx.iface_idx, state='up') ++ logging.info('Successfully brought up the floating IP %s', ctx.ip) ++ ++ ++def route_monitor(ctx): ++ logging.info('GCP route monitor: checking route table') ++ ++ # Ensure that there is no route that we are not aware of that is also handling our IP ++ check_conflicting_routes ++ ++ try: ++ request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) ++ response = request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if 'Insufficient Permission' in e.content: ++ return OCF_ERR_PERM ++ elif e.resp.status == 404: ++ return OCF_NOT_RUNNING ++ else: ++ raise ++ ++ routed_to_instance = response.get('nextHopInstance', '') ++ instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) ++ if routed_to_instance != instance_url: ++ logging.warn( ++ 'The floating IP %s is not routed to this instance (%s) but to instance %s', ++ ctx.ip, ctx.instance, routed_to_instance.split('/')[-1]) ++ return OCF_NOT_RUNNING ++ ++ logging.debug( ++ 'The floating IP %s is correctly routed to this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_SUCCESS ++ ++ ++def ip_and_route_stop(ctx): ++ logging.info('Bringing down the floating IP %s', ctx.ip) ++ ++ # Delete the route entry ++ # If the route entry exists and is pointing to another instance, don't touch it ++ if route_monitor(ctx) == OCF_NOT_RUNNING: ++ logging.info( ++ 'The floating IP %s is already not routed to this instance (%s)', ++ ctx.ip, ctx.instance) ++ else: ++ route_release(ctx) ++ ++ if ip_monitor(ctx) == OCF_NOT_RUNNING: ++ logging.info('The floating IP %s is already down', ctx.ip) ++ else: ++ ip_release(ctx) ++ ++ ++def configure_logs(ctx): ++ # Prepare logging ++ logging.basicConfig( ++ format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO) ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=ctx.instance) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:route "%(message)s"') ++ handler.setFormatter(formatter) ++ root_logger = logging.getLogger() ++ root_logger.addHandler(handler) ++ except ImportError: ++ logging.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def main(): ++ if 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ return ++ ++ ctx = Context() ++ ++ validate(ctx) ++ if 'validate-all' in sys.argv[1]: ++ return ++ ++ configure_logs(ctx) ++ if 'start' in sys.argv[1]: ++ ip_and_route_start(ctx) ++ elif 'stop' in sys.argv[1]: ++ ip_and_route_stop(ctx) ++ elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: ++ sys.exit(ip_monitor(ctx)) ++ else: ++ usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}' ++ logging.error(usage) ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ ++ ++if __name__ == "__main__": ++ main() + +From 6590c99f462403808854114ec1031755e5ce6b36 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 19 Jul 2018 12:33:44 -0300 +Subject: [PATCH 2/4] gcp-vpc-move-ip.in: add deprecation message + +--- + heartbeat/gcp-vpc-move-ip.in | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in +index 4a6c343a8..3b8d998b3 100755 +--- a/heartbeat/gcp-vpc-move-ip.in ++++ b/heartbeat/gcp-vpc-move-ip.in +@@ -348,6 +348,8 @@ ip_and_route_stop() { + # + ############################################################################### + ++ocf_log warn "gcp-vpc-move-ip is deprecated, prefer to use gcp-vpc-move-route instead" ++ + case $__OCF_ACTION in + meta-data) metadata + exit $OCF_SUCCESS + +From 73608196d21068c6c2d5fb9f77e3d40179c85fee Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 20 Jul 2018 08:26:17 -0300 +Subject: [PATCH 3/4] gcp-vpc-move-route.in: move stackdriver parameter + +Move stackdriver parameter to the bottom of metadata list +--- + heartbeat/gcp-vpc-move-route.in | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 5f4569baa..8d5bfff36 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -90,12 +90,6 @@ route + + + +- +-If enabled (set to true), IP failover logs will be posted to stackdriver logging +-Stackdriver-logging support +- +- +- + + + Floating IP address. Note that this IP must be chosen outside of all existing +@@ -128,6 +122,12 @@ Route name + Route name + + ++ ++ ++If enabled (set to true), IP failover logs will be posted to stackdriver logging ++Stackdriver-logging support ++ ++ + + + + +From e54565ec69f809b28337c0471ad0a9b26a64f8bf Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 20 Jul 2018 08:45:53 -0300 +Subject: [PATCH 4/4] gcp-vpc-move-route.in: minor fixes + +--- + heartbeat/gcp-vpc-move-route.in | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 8d5bfff36..566a70f86 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -104,7 +104,7 @@ subnet ranges + Name of the VPC network + + VPC network +- ++ + + + +@@ -112,7 +112,7 @@ Name of the VPC network + Name of the network interface + + Network interface name +- ++ + + + +@@ -120,7 +120,7 @@ Name of the network interface + Route name + + Route name +- ++ + + + +@@ -138,7 +138,7 @@ Route name + + + +-''' ++''' % os.path.basename(sys.argv[0]) + + + class Context(object): +@@ -229,7 +229,7 @@ def validate(ctx): + GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network) + ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0') + ctx.route_name = os.environ.get( +- 'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME']) ++ 'OCF_RESKEY_route_name', 'ra-%s' % os.path.basename(sys.argv[0])) + ctx.iproute = pyroute2.IPRoute() + atexit.register(ctx.iproute.close) + idxs = ctx.iproute.link_lookup(ifname=ctx.interface) +@@ -432,7 +432,8 @@ def main(): + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + sys.exit(ip_monitor(ctx)) + else: +- usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}' ++ usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \ ++ os.path.basename(sys.argv[0]) + logging.error(usage) + sys.exit(OCF_ERR_UNIMPLEMENTED) + diff --git a/SOURCES/5-python-library.patch b/SOURCES/5-python-library.patch new file mode 100644 index 0000000..0066119 --- /dev/null +++ b/SOURCES/5-python-library.patch @@ -0,0 +1,600 @@ +From 13ae97dec5754642af4d0d0edc03d9290e792e7f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 19 Jul 2018 16:12:35 +0200 +Subject: [PATCH 1/5] Add Python library + +--- + heartbeat/Makefile.am | 3 +- + heartbeat/ocf.py | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 138 insertions(+), 1 deletion(-) + create mode 100644 heartbeat/ocf.py + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index d4750bf09..1333f8feb 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -185,7 +185,8 @@ ocfcommon_DATA = ocf-shellfuncs \ + ora-common.sh \ + mysql-common.sh \ + nfsserver-redhat.sh \ +- findif.sh ++ findif.sh \ ++ ocf.py + + # Legacy locations + hbdir = $(sysconfdir)/ha.d +diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py +new file mode 100644 +index 000000000..12be7a2a4 +--- /dev/null ++++ b/heartbeat/ocf.py +@@ -0,0 +1,136 @@ ++# ++# Copyright (c) 2016 Red Hat, Inc, Oyvind Albrigtsen ++# All Rights Reserved. ++# ++# ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++# ++ ++import sys, os, logging, syslog ++ ++argv=sys.argv ++env=os.environ ++ ++# ++# Common variables for the OCF Resource Agents supplied by ++# heartbeat. ++# ++ ++OCF_SUCCESS=0 ++OCF_ERR_GENERIC=1 ++OCF_ERR_ARGS=2 ++OCF_ERR_UNIMPLEMENTED=3 ++OCF_ERR_PERM=4 ++OCF_ERR_INSTALLED=5 ++OCF_ERR_CONFIGURED=6 ++OCF_NOT_RUNNING=7 ++ ++# Non-standard values. ++# ++# OCF does not include the concept of master/slave resources so we ++# need to extend it so we can discover a resource's complete state. ++# ++# OCF_RUNNING_MASTER: ++# The resource is in "master" mode and fully operational ++# OCF_FAILED_MASTER: ++# The resource is in "master" mode but in a failed state ++# ++# The extra two values should only be used during a probe. ++# ++# Probes are used to discover resources that were started outside of ++# the CRM and/or left behind if the LRM fails. ++# ++# They can be identified in RA scripts by checking for: ++# [ "${__OCF_ACTION}" = "monitor" -a "${OCF_RESKEY_CRM_meta_interval}" = "0" ] ++# ++# Failed "slaves" should continue to use: OCF_ERR_GENERIC ++# Fully operational "slaves" should continue to use: OCF_SUCCESS ++# ++OCF_RUNNING_MASTER=8 ++OCF_FAILED_MASTER=9 ++ ++ ++## Own logger handler that uses old-style syslog handler as otherwise ++## everything is sourced from /dev/syslog ++class SyslogLibHandler(logging.StreamHandler): ++ """ ++ A handler class that correctly push messages into syslog ++ """ ++ def emit(self, record): ++ syslog_level = { ++ logging.CRITICAL:syslog.LOG_CRIT, ++ logging.ERROR:syslog.LOG_ERR, ++ logging.WARNING:syslog.LOG_WARNING, ++ logging.INFO:syslog.LOG_INFO, ++ logging.DEBUG:syslog.LOG_DEBUG, ++ logging.NOTSET:syslog.LOG_DEBUG, ++ }[record.levelno] ++ ++ msg = self.format(record) ++ ++ # syslog.syslog can not have 0x00 character inside or exception ++ # is thrown ++ syslog.syslog(syslog_level, msg.replace("\x00","\n")) ++ return ++ ++ ++OCF_RESOURCE_INSTANCE = env.get("OCF_RESOURCE_INSTANCE") ++ ++HA_DEBUG = env.get("HA_debug", 0) ++HA_DATEFMT = env.get("HA_DATEFMT", "%b %d %T ") ++HA_LOGFACILITY = env.get("HA_LOGFACILITY") ++HA_LOGFILE = env.get("HA_LOGFILE") ++HA_DEBUGLOG = env.get("HA_DEBUGLOG") ++ ++log = logging.getLogger(os.path.basename(argv[0])) ++log.setLevel(logging.DEBUG) ++ ++## add logging to stderr ++if sys.stdout.isatty(): ++ seh = logging.StreamHandler(stream=sys.stderr) ++ if HA_DEBUG == 0: ++ seh.setLevel(logging.WARNING) ++ sehformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ seh.setFormatter(sehformatter) ++ log.addHandler(seh) ++ ++## add logging to syslog ++if HA_LOGFACILITY: ++ slh = SyslogLibHandler() ++ if HA_DEBUG == 0: ++ slh.setLevel(logging.WARNING) ++ slhformatter = logging.Formatter('%(levelname)s: %(message)s') ++ slh.setFormatter(slhformatter) ++ log.addHandler(slh) ++ ++## add logging to file ++if HA_LOGFILE: ++ lfh = logging.FileHandler(HA_LOGFILE) ++ if HA_DEBUG == 0: ++ lfh.setLevel(logging.WARNING) ++ lfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ lfh.setFormatter(lfhformatter) ++ log.addHandler(lfh) ++ ++## add debug logging to file ++if HA_DEBUGLOG and HA_LOGFILE != HA_DEBUGLOG: ++ dfh = logging.FileHandler(HA_DEBUGLOG) ++ if HA_DEBUG == 0: ++ dfh.setLevel(logging.WARNING) ++ dfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ dfh.setFormatter(dfhformatter) ++ log.addHandler(dfh) ++ ++logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + +From 2ade8dbf1f6f6d3889dd1ddbf40858edf10fbdc7 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 19 Jul 2018 16:20:39 +0200 +Subject: [PATCH 2/5] gcp-vpc-move-vip: use Python library + +--- + heartbeat/gcp-vpc-move-vip.in | 42 +++++++++++++++++++++--------------------- + 1 file changed, 21 insertions(+), 21 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index af2080502..eb5bce6a8 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -22,6 +22,11 @@ import os + import sys + import time + ++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++from ocf import * ++ + try: + import googleapiclient.discovery + except ImportError: +@@ -40,10 +45,6 @@ else: + CONN = None + THIS_VM = None + ALIAS = None +-OCF_SUCCESS = 0 +-OCF_ERR_GENERIC = 1 +-OCF_ERR_CONFIGURED = 6 +-OCF_NOT_RUNNING = 7 + METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' + METADATA_HEADERS = {'Metadata-Flavor': 'Google'} + METADATA = \ +@@ -206,11 +207,11 @@ def gcp_alias_start(alias): + # If I already have the IP, exit. If it has an alias IP that isn't the VIP, + # then remove it + if my_alias == alias: +- logging.info( ++ logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) + elif my_alias: +- logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ logger.info('Removing %s from %s' % (my_alias, THIS_VM)) + set_alias(project, my_zone, THIS_VM, '') + + # Loops through all hosts & remove the alias IP from the host that has it +@@ -223,7 +224,7 @@ def gcp_alias_start(alias): + host_zone = get_zone(project, host) + host_alias = get_alias(project, host_zone, host) + if alias == host_alias: +- logging.info( ++ logger.info( + '%s is attached to %s - Removing all alias IP addresses from %s' % + (alias, host, host)) + set_alias(project, host_zone, host, '') +@@ -237,14 +238,14 @@ def gcp_alias_start(alias): + # Check the IP has been added + my_alias = get_localhost_alias() + if alias == my_alias: +- logging.info('Finished adding %s to %s' % (alias, THIS_VM)) ++ logger.info('Finished adding %s to %s' % (alias, THIS_VM)) + elif my_alias: +- logging.error( ++ logger.error( + 'Failed to add IP. %s has an IP attached but it isn\'t %s' % + (THIS_VM, alias)) + sys.exit(OCF_ERR_GENERIC) + else: +- logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + +@@ -254,14 +255,14 @@ def gcp_alias_stop(alias): + project = get_metadata('project/project-id') + + if my_alias == alias: +- logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ logger.info('Removing %s from %s' % (my_alias, THIS_VM)) + set_alias(project, my_zone, THIS_VM, '') + + + def gcp_alias_status(alias): + my_alias = get_localhost_alias() + if alias == my_alias: +- logging.info('%s has the correct IP address attached' % THIS_VM) ++ logger.info('%s has the correct IP address attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,25 +276,24 @@ def validate(): + try: + CONN = googleapiclient.discovery.build('compute', 'v1') + except Exception as e: +- logging.error('Couldn\'t connect with google api: ' + str(e)) ++ logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') + if not ALIAS: +- logging.error('Missing alias_ip parameter') ++ logger.error('Missing alias_ip parameter') + sys.exit(OCF_ERR_CONFIGURED) + + + def configure_logs(): + # Prepare logging +- logging.basicConfig( +- format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) ++ global logger + logging.getLogger('googleapiclient').setLevel(logging.WARN) + logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') + if logging_env: +@@ -307,10 +307,10 @@ def configure_logs(): + handler.setLevel(logging.INFO) + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) +- root_logger = logging.getLogger() +- root_logger.addHandler(handler) ++ log.addHandler(handler) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + except ImportError: +- logging.error('Couldn\'t import google.cloud.logging, ' ++ logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') + + +@@ -331,7 +331,7 @@ def main(): + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + gcp_alias_status(ALIAS) + else: +- logging.error('no such function %s' % str(sys.argv[1])) ++ logger.error('no such function %s' % str(sys.argv[1])) + + + if __name__ == "__main__": + +From 9e9ea17c42df27d4c13fed9badba295df48437f2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 20 Jul 2018 13:27:42 +0200 +Subject: [PATCH 3/5] gcp-vpc-move-vip: moved alias-parameters to top of + metadata + +--- + heartbeat/gcp-vpc-move-vip.in | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index eb5bce6a8..ba61193b6 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -55,6 +55,16 @@ METADATA = \ + Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance + Floating IP Address on Google Cloud Platform + ++ ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ ++ ++ ++ Subnet name for the Alias IP ++ Subnet name for the Alias IP ++ ++ + + List of hosts in the cluster + Host list +@@ -65,16 +75,6 @@ METADATA = \ + Stackdriver-logging support + + +- +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- +- +- +- Subnet name for the Alias IP2 +- Subnet name for the Alias IP +- +- + + + + +From 716d69040dba7a769efb5a60eca934fdd65585f2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 23 Jul 2018 11:17:00 +0200 +Subject: [PATCH 4/5] gcp-vpc-move-route: use Python library + +--- + heartbeat/gcp-vpc-move-route.in | 58 ++++++++++++++++++++--------------------- + 1 file changed, 28 insertions(+), 30 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 566a70f86..125289d86 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -39,6 +39,11 @@ import os + import sys + import time + ++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++from ocf import * ++ + try: + import googleapiclient.discovery + import pyroute2 +@@ -55,12 +60,6 @@ else: + import urllib2 as urlrequest + + +-OCF_SUCCESS = 0 +-OCF_ERR_GENERIC = 1 +-OCF_ERR_UNIMPLEMENTED = 3 +-OCF_ERR_PERM = 4 +-OCF_ERR_CONFIGURED = 6 +-OCF_NOT_RUNNING = 7 + GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1' + METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' + METADATA_HEADERS = {'Metadata-Flavor': 'Google'} +@@ -199,18 +198,18 @@ def get_metadata(metadata_key, params=None, timeout=None): + + def validate(ctx): + if os.geteuid() != 0: +- logging.error('You must run this agent as root') ++ logger.error('You must run this agent as root') + sys.exit(OCF_ERR_PERM) + + try: + ctx.conn = googleapiclient.discovery.build('compute', 'v1') + except Exception as e: +- logging.error('Couldn\'t connect with google api: ' + str(e)) ++ logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ctx.ip = os.environ.get('OCF_RESKEY_ip') + if not ctx.ip: +- logging.error('Missing ip parameter') ++ logger.error('Missing ip parameter') + sys.exit(OCF_ERR_CONFIGURED) + + try: +@@ -218,7 +217,7 @@ def validate(ctx): + ctx.zone = get_metadata('instance/zone').split('/')[-1] + ctx.project = get_metadata('project/project-id') + except Exception as e: +- logging.error( ++ logger.error( + 'Instance information not found. Is this a GCE instance ?: %s', str(e)) + sys.exit(OCF_ERR_CONFIGURED) + +@@ -234,7 +233,7 @@ def validate(ctx): + atexit.register(ctx.iproute.close) + idxs = ctx.iproute.link_lookup(ifname=ctx.interface) + if not idxs: +- logging.error('Network interface not found') ++ logger.error('Network interface not found') + sys.exit(OCF_ERR_CONFIGURED) + ctx.iface_idx = idxs[0] + +@@ -246,7 +245,7 @@ def check_conflicting_routes(ctx): + response = request.execute() + route_list = response.get('items', None) + if route_list: +- logging.error( ++ logger.error( + 'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s', + ctx.ip, ctx.vpc_network, str(route_list)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -258,7 +257,7 @@ def route_release(ctx): + + + def ip_monitor(ctx): +- logging.info('IP monitor: checking local network configuration') ++ logger.info('IP monitor: checking local network configuration') + + def address_filter(addr): + for attr in addr['attrs']: +@@ -271,12 +270,12 @@ def ip_monitor(ctx): + route = ctx.iproute.get_addr( + index=ctx.iface_idx, match=address_filter) + if not route: +- logging.warn( ++ logger.warning( + 'The floating IP %s is not locally configured on this instance (%s)', + ctx.ip, ctx.instance) + return OCF_NOT_RUNNING + +- logging.debug( ++ logger.debug( + 'The floating IP %s is correctly configured on this instance (%s)', + ctx.ip, ctx.instance) + return OCF_SUCCESS +@@ -287,7 +286,7 @@ def ip_release(ctx): + + + def ip_and_route_start(ctx): +- logging.info('Bringing up the floating IP %s', ctx.ip) ++ logger.info('Bringing up the floating IP %s', ctx.ip) + + # Add a new entry in the routing table + # If the route entry exists and is pointing to another instance, take it over +@@ -322,7 +321,7 @@ def ip_and_route_start(ctx): + request.execute() + except googleapiclient.errors.HttpError as e: + if e.resp.status == 404: +- logging.error('VPC network not found') ++ logger.error('VPC network not found') + sys.exit(OCF_ERR_CONFIGURED) + else: + raise +@@ -336,11 +335,11 @@ def ip_and_route_start(ctx): + + ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32) + ctx.iproute.link('set', index=ctx.iface_idx, state='up') +- logging.info('Successfully brought up the floating IP %s', ctx.ip) ++ logger.info('Successfully brought up the floating IP %s', ctx.ip) + + + def route_monitor(ctx): +- logging.info('GCP route monitor: checking route table') ++ logger.info('GCP route monitor: checking route table') + + # Ensure that there is no route that we are not aware of that is also handling our IP + check_conflicting_routes +@@ -360,39 +359,38 @@ def route_monitor(ctx): + instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( + GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) + if routed_to_instance != instance_url: +- logging.warn( ++ logger.warning( + 'The floating IP %s is not routed to this instance (%s) but to instance %s', + ctx.ip, ctx.instance, routed_to_instance.split('/')[-1]) + return OCF_NOT_RUNNING + +- logging.debug( ++ logger.debug( + 'The floating IP %s is correctly routed to this instance (%s)', + ctx.ip, ctx.instance) + return OCF_SUCCESS + + + def ip_and_route_stop(ctx): +- logging.info('Bringing down the floating IP %s', ctx.ip) ++ logger.info('Bringing down the floating IP %s', ctx.ip) + + # Delete the route entry + # If the route entry exists and is pointing to another instance, don't touch it + if route_monitor(ctx) == OCF_NOT_RUNNING: +- logging.info( ++ logger.info( + 'The floating IP %s is already not routed to this instance (%s)', + ctx.ip, ctx.instance) + else: + route_release(ctx) + + if ip_monitor(ctx) == OCF_NOT_RUNNING: +- logging.info('The floating IP %s is already down', ctx.ip) ++ logger.info('The floating IP %s is already down', ctx.ip) + else: + ip_release(ctx) + + + def configure_logs(ctx): + # Prepare logging +- logging.basicConfig( +- format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO) ++ global logger + logging.getLogger('googleapiclient').setLevel(logging.WARN) + logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') + if logging_env: +@@ -406,10 +404,10 @@ def configure_logs(ctx): + handler.setLevel(logging.INFO) + formatter = logging.Formatter('gcp:route "%(message)s"') + handler.setFormatter(formatter) +- root_logger = logging.getLogger() +- root_logger.addHandler(handler) ++ log.addHandler(handler) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + except ImportError: +- logging.error('Couldn\'t import google.cloud.logging, ' ++ logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') + + +@@ -434,7 +432,7 @@ def main(): + else: + usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \ + os.path.basename(sys.argv[0]) +- logging.error(usage) ++ logger.error(usage) + sys.exit(OCF_ERR_UNIMPLEMENTED) + + + +From 6ec7e87693a51cbb16a1822e6d15f1dbfc11f8e6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 23 Jul 2018 15:55:48 +0200 +Subject: [PATCH 5/5] Python: add logging.basicConfig() to support background + logging + +--- + heartbeat/ocf.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py +index 12be7a2a4..36e7ccccd 100644 +--- a/heartbeat/ocf.py ++++ b/heartbeat/ocf.py +@@ -94,6 +94,7 @@ def emit(self, record): + HA_LOGFILE = env.get("HA_LOGFILE") + HA_DEBUGLOG = env.get("HA_DEBUGLOG") + ++logging.basicConfig() + log = logging.getLogger(os.path.basename(argv[0])) + log.setLevel(logging.DEBUG) + diff --git a/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch new file mode 100644 index 0000000..69ac757 --- /dev/null +++ b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch @@ -0,0 +1,25 @@ +From 6bd66e337922403cb2dd3a8715ae401def8c0437 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 19 Jul 2018 13:00:58 -0300 +Subject: [PATCH] gcp-vpc-move-vip.in: filter call to aggregatedList + +Don't list all the instances in the project, filter only the one we are +interested in. +--- + heartbeat/gcp-vpc-move-vip.in | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 9fc87242f..af2080502 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -169,7 +169,8 @@ def get_localhost_alias(): + + + def get_zone(project, instance): +- request = CONN.instances().aggregatedList(project=project) ++ fl = 'name="%s"' % instance ++ request = CONN.instances().aggregatedList(project=project, filter=fl) + while request is not None: + response = request.execute() + zones = response.get('items', {}) diff --git a/SOURCES/7-gcp-bundled.patch b/SOURCES/7-gcp-bundled.patch new file mode 100644 index 0000000..b1b8a50 --- /dev/null +++ b/SOURCES/7-gcp-bundled.patch @@ -0,0 +1,35 @@ +diff -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in +--- a/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:20:26.164739897 +0200 ++++ b/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:21:01.331139742 +0200 +@@ -36,7 +36,7 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults +-OCF_RESKEY_gcloud_default="/usr/bin/gcloud" ++OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra" + OCF_RESKEY_configuration_default="default" + OCF_RESKEY_vpc_network_default="default" + OCF_RESKEY_interface_default="eth0" +diff -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2020-06-25 13:21:42.090334894 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-06-25 13:14:16.668092817 +0200 +@@ -28,6 +28,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + except ImportError: + pass +diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2020-06-25 13:22:03.216301380 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2020-06-25 13:13:19.864183380 +0200 +@@ -45,6 +45,8 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp') ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + import pyroute2 + except ImportError: diff --git a/SOURCES/7-gcp-stackdriver-logging-note.patch b/SOURCES/7-gcp-stackdriver-logging-note.patch new file mode 100644 index 0000000..b714513 --- /dev/null +++ b/SOURCES/7-gcp-stackdriver-logging-note.patch @@ -0,0 +1,28 @@ +diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2018-07-30 16:56:23.486368292 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2018-07-30 17:11:54.189715666 +0200 +@@ -125,8 +125,8 @@ + + + +-If enabled (set to true), IP failover logs will be posted to stackdriver logging +-Stackdriver-logging support ++If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). ++Stackdriver-logging support. Requires additional libraries (google-cloud-logging). + + + +diff -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2018-07-30 16:56:23.486368292 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2018-07-30 17:06:17.260686483 +0200 +@@ -72,8 +72,8 @@ + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging +- Stackdriver-logging support ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). ++ Stackdriver-logging support. Requires additional libraries (google-cloud-logging). + + + diff --git a/SOURCES/8-google-cloud-sdk-fixes.patch b/SOURCES/8-google-cloud-sdk-fixes.patch new file mode 100644 index 0000000..d734d82 --- /dev/null +++ b/SOURCES/8-google-cloud-sdk-fixes.patch @@ -0,0 +1,12 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud +--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 2018-06-18 14:30:10.000000000 +0200 ++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2018-06-25 13:12:56.057000620 +0200 +@@ -64,6 +64,8 @@ + } + CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0") + ++CLOUDSDK_PYTHON_SITEPACKAGES=1 ++ + # if CLOUDSDK_PYTHON is empty + if [ -z "$CLOUDSDK_PYTHON" ]; then + # if python2 exists then plain python may point to a version != 2 diff --git a/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch new file mode 100644 index 0000000..de378c4 --- /dev/null +++ b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch @@ -0,0 +1,129 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py +--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200 +@@ -19,8 +19,14 @@ + certificates. + """ + ++from pyasn1.codec.der import decoder + from pyasn1_modules import pem +-import rsa ++from pyasn1_modules.rfc2459 import Certificate ++from pyasn1_modules.rfc5208 import PrivateKeyInfo ++from cryptography.hazmat.primitives import serialization, hashes ++from cryptography.hazmat.primitives.asymmetric import padding ++from cryptography import x509 ++from cryptography.hazmat.backends import default_backend + import six + + from oauth2client import _helpers +@@ -40,7 +46,7 @@ + '-----END RSA PRIVATE KEY-----') + _PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----', + '-----END PRIVATE KEY-----') +-_PKCS8_SPEC = None ++_PKCS8_SPEC = PrivateKeyInfo() + + + def _bit_list_to_bytes(bit_list): +@@ -67,7 +73,8 @@ + """ + + def __init__(self, pubkey): +- self._pubkey = pubkey ++ self._pubkey = serialization.load_pem_public_key(pubkey, ++ backend=default_backend()) + + def verify(self, message, signature): + """Verifies a message against a signature. +@@ -84,8 +91,9 @@ + """ + message = _helpers._to_bytes(message, encoding='utf-8') + try: +- return rsa.pkcs1.verify(message, signature, self._pubkey) +- except (ValueError, rsa.pkcs1.VerificationError): ++ return self._pubkey.verify(signature, message, padding.PKCS1v15(), ++ hashes.SHA256()) ++ except (ValueError, TypeError, InvalidSignature): + return False + + @classmethod +@@ -109,19 +117,18 @@ + """ + key_pem = _helpers._to_bytes(key_pem) + if is_x509_cert: +- from pyasn1.codec.der import decoder +- from pyasn1_modules import rfc2459 +- +- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE') +- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate()) ++ der = x509.load_pem_x509_certificate(pem_data, default_backend()) ++ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate()) + if remaining != b'': + raise ValueError('Unused bytes', remaining) + + cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo'] + key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey']) +- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER') ++ pubkey = serialization.load_der_public_key(decoded_key, ++ backend=default_backend()) + else: +- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM') ++ pubkey = serialization.load_pem_public_key(decoded_key, ++ backend=default_backend()) + return cls(pubkey) + + +@@ -134,6 +141,8 @@ + + def __init__(self, pkey): + self._key = pkey ++ self._pubkey = serialization.load_pem_private_key(pkey, ++ backend=default_backend()) + + def sign(self, message): + """Signs a message. +@@ -145,7 +154,7 @@ + string, The signature of the message for the given key. + """ + message = _helpers._to_bytes(message, encoding='utf-8') +- return rsa.pkcs1.sign(message, self._key, 'SHA-256') ++ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256()) + + @classmethod + def from_string(cls, key, password='notasecret'): +@@ -163,27 +172,24 @@ + ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in + PEM format. + """ +- global _PKCS8_SPEC + key = _helpers._from_bytes(key) # pem expects str in Py3 + marker_id, key_bytes = pem.readPemBlocksFromFile( + six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) + + if marker_id == 0: +- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, +- format='DER') +- elif marker_id == 1: +- from pyasn1.codec.der import decoder +- from pyasn1_modules import rfc5208 ++ pkey = serialization.load_der_private_key( ++ key_bytes, password=None, ++ backend=default_backend()) + +- if _PKCS8_SPEC is None: +- _PKCS8_SPEC = rfc5208.PrivateKeyInfo() ++ elif marker_id == 1: + key_info, remaining = decoder.decode( + key_bytes, asn1Spec=_PKCS8_SPEC) + if remaining != b'': + raise ValueError('Unused bytes', remaining) + pkey_info = key_info.getComponentByName('privateKey') +- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), +- format='DER') ++ pkey = serialization.load_der_private_key( ++ pkey_info.asOctets(), password=None, ++ backend=default_backend()) + else: + raise ValueError('No key could be detected.') + diff --git a/SOURCES/IPaddr2-monitor_retries.patch b/SOURCES/IPaddr2-monitor_retries.patch new file mode 100644 index 0000000..6f2629a --- /dev/null +++ b/SOURCES/IPaddr2-monitor_retries.patch @@ -0,0 +1,66 @@ +diff -uNr a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +--- a/heartbeat/IPaddr2 2018-06-27 10:29:08.000000000 +0200 ++++ b/heartbeat/IPaddr2 2018-06-29 16:01:50.538797379 +0200 +@@ -80,6 +80,7 @@ + OCF_RESKEY_arp_bg_default=true + OCF_RESKEY_run_arping_default=false + OCF_RESKEY_preferred_lft_default="forever" ++OCF_RESKEY_monitor_retries="1" + + : ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}} + : ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}} +@@ -92,6 +93,7 @@ + : ${OCF_RESKEY_arp_bg=${OCF_RESKEY_arp_bg_default}} + : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}} + : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}} ++: ${OCF_RESKEY_monitor_retries=${OCF_RESKEY_monitor_retries_default}} + ####################################################################### + + SENDARP=$HA_BIN/send_arp +@@ -368,6 +370,18 @@ + + + ++ ++ ++Set number of retries to find interface in monitor-action. ++ ++ONLY INCREASE IF THE AGENT HAS ISSUES FINDING YOUR NIC DURING THE ++MONITOR-ACTION. A HIGHER SETTING MAY LEAD TO DELAYS IN DETECTING ++A FAILURE. ++ ++Number of retries to find interface in monitor-action ++ ++ ++ + + + +@@ -536,15 +550,26 @@ + find_interface() { + local ipaddr="$1" + local netmask="$2" ++ local iface="" + + # + # List interfaces but exclude FreeS/WAN ipsecN virtual interfaces + # +- local iface="`$IP2UTIL -o -f $FAMILY addr show \ ++ for i in $(seq 1 $OCF_RESKEY_monitor_retries); do ++ iface="`$IP2UTIL -o -f $FAMILY addr show \ + | grep "\ $ipaddr/$netmask" \ + | cut -d ' ' -f2 \ + | grep -v '^ipsec[0-9][0-9]*$'`" + ++ if [ -n "$iface" ]; then ++ break ++ fi ++ ++ if [ $OCF_RESKEY_monitor_retries -gt 1 ]; then ++ sleep 1 ++ fi ++ done ++ + echo "$iface" + return 0 + } diff --git a/SOURCES/LVM-activate-1-warn-vg_access_mode.patch b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch new file mode 100644 index 0000000..3471524 --- /dev/null +++ b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch @@ -0,0 +1,42 @@ +From 12ef5a343158bbfaa5233468a0506074fceaac81 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 21 Aug 2018 12:14:49 +0200 +Subject: [PATCH] LVM-activate: return OCF_ERR_CONFIGURED for incorrect + vg_access_mode + +--- + heartbeat/LVM-activate | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index fbd058288..55e36a2d2 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -448,7 +448,7 @@ lvm_validate() { + ;; + *) + ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode" +- exit $OCF_ERR_ARGS ++ exit $OCF_ERR_CONFIGURED + ;; + esac + +@@ -771,7 +771,6 @@ lvm_stop() { + return $OCF_SUCCESS + fi + +- lvm_validate + ocf_log info "Deactivating ${vol}" + + case ${VG_access_mode} in +@@ -788,8 +787,8 @@ lvm_stop() { + tagging_deactivate + ;; + *) +- ocf_exit_reason "VG [${VG}] is not properly configured in cluster. It's unsafe!" +- exit $OCF_ERR_CONFIGURED ++ ocf_log err "VG [${VG}] is not properly configured in cluster. It's unsafe!" ++ exit $OCF_SUCCESS + ;; + esac + diff --git a/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch new file mode 100644 index 0000000..ae1fe65 --- /dev/null +++ b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch @@ -0,0 +1,137 @@ +From 792077bf2994e2e582ccfb0768f3186517de9025 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 12:00:07 +0200 +Subject: [PATCH] LVM-activate: fixes + +- read parameters for start/stop/monitor-actions +- fail during monitor-action when run with incorrect access_mode +--- + heartbeat/LVM-activate | 44 ++++++++++++++++++++++++++---------------- + 1 file changed, 27 insertions(+), 17 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 55e36a2d2..f46932c1c 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -56,6 +56,7 @@ LV=${OCF_RESKEY_lvname} + # 3: vg has system_id (new) + # 4: vg has tagging (old) + VG_access_mode=${OCF_RESKEY_vg_access_mode} ++VG_access_mode_num=0 + + # Activate LV(s) with "shared" lock for cluster fs + # or "exclusive" lock for local fs +@@ -176,7 +177,9 @@ END + # 2: vg is clustered - clvmd (old) + # 3: vg has system_id (new) + # 4: vg has tagging (old) +-get_VG_access_mode() { ++ ++get_VG_access_mode_num() ++{ + local access_mode + local vg_locktype + local vg_clustered +@@ -415,11 +418,8 @@ tagging_check() + return $OCF_SUCCESS + } + +-lvm_validate() { +- local lv_count +- local mode +- +- # Parameters checking ++read_parameters() ++{ + if [ -z "$VG" ] + then + ocf_exit_reason "You must identify the volume group name!" +@@ -435,22 +435,30 @@ lvm_validate() { + # Convert VG_access_mode from string to index + case ${VG_access_mode} in + lvmlockd) +- VG_access_mode=1 ++ VG_access_mode_num=1 + ;; + clvmd) +- VG_access_mode=2 ++ VG_access_mode_num=2 + ;; + system_id) +- VG_access_mode=3 ++ VG_access_mode_num=3 + ;; + tagging) +- VG_access_mode=4 ++ VG_access_mode_num=4 + ;; + *) ++ # dont exit with error-code here or nodes will get fenced on ++ # e.g. "pcs resource create" + ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode" +- exit $OCF_ERR_CONFIGURED + ;; + esac ++} ++ ++lvm_validate() { ++ local lv_count ++ local mode ++ ++ read_parameters + + check_binary pgrep + # Every LVM command is just symlink to lvm binary +@@ -471,9 +479,9 @@ lvm_validate() { + # Get the access mode from VG metadata and check if it matches the input + # value. Skip to check "tagging" mode because there's no reliable way to + # automatically check if "tagging" mode is being used. +- get_VG_access_mode ++ get_VG_access_mode_num + mode=$? +- if [ $VG_access_mode -ne 4 ] && [ $mode -ne $VG_access_mode ]; then ++ if [ $VG_access_mode_num -ne 4 ] && [ $mode -ne $VG_access_mode_num ]; then + ocf_exit_reason "The specified vg_access_mode doesn't match the lock_type on VG metadata!" + exit $OCF_ERR_ARGS + fi +@@ -495,8 +503,8 @@ lvm_validate() { + fi + fi + +- # VG_access_mode specific checking goes here +- case ${VG_access_mode} in ++ # VG_access_mode_num specific checking goes here ++ case ${VG_access_mode_num} in + 1) + lvmlockd_check + ;; +@@ -731,7 +739,7 @@ lvm_start() { + [ -z ${LV} ] && vol=${VG} || vol=${VG}/${LV} + ocf_log info "Activating ${vol}" + +- case ${VG_access_mode} in ++ case ${VG_access_mode_num} in + 1) + lvmlockd_activate + ;; +@@ -773,7 +781,7 @@ lvm_stop() { + + ocf_log info "Deactivating ${vol}" + +- case ${VG_access_mode} in ++ case ${VG_access_mode_num} in + 1) + lvmlockd_deactivate + ;; +@@ -811,9 +819,11 @@ start) + lvm_start + ;; + stop) ++ read_parameters + lvm_stop + ;; + monitor) ++ lvm_validate + lvm_status + ;; + validate-all) diff --git a/SOURCES/LVM-activate-fix-issue-with-dashes.patch b/SOURCES/LVM-activate-fix-issue-with-dashes.patch new file mode 100644 index 0000000..7f3b996 --- /dev/null +++ b/SOURCES/LVM-activate-fix-issue-with-dashes.patch @@ -0,0 +1,54 @@ +From 2c219dd000d7f5edd3765a1c6bc5f3e6efb7208b Mon Sep 17 00:00:00 2001 +From: Paul Mezzanini +Date: Fri, 1 Jun 2018 11:58:06 -0400 +Subject: [PATCH] Volume groups and logical volumes "-" in their name get + mangled with double dashes in dmsetup. Switching to wc and just counting + lines while depending on the vgname + lvname filter in the dmsetup call gets + around the issue with dmsetup outputting correctly but grep failing due to + the name mangle. + +Logic for both test cases and dmsetup calls changed so they match too. No reason +to not have matching tests. + +This is AllBad but there isn't a better way that I'm aware of yet. +--- + heartbeat/LVM-activate | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 60e656178..fbd058288 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -692,20 +692,27 @@ tagging_deactivate() { + # lvs/vgs when the metadata is somehow inconsistent. + # + # So, we have to make compromise that the VG is assumably active if any LV of the VG is active. ++# ++# Paul: ++# VGS + LVS with "-" in their name get mangled with double dashes in dmsetup. ++# Switching to wc and just counting lines while depending on the vgname + lvname filter ++# in dmsetup gets around the issue with dmsetup reporting correctly but grep failing. ++# ++# Logic for both test cases and dmsetup calls changed so they match too. ++# ++# This is AllBad but there isn't a better way that I'm aware of yet. + lvm_status() { + local dm_count + + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" \ +- | grep -Eq "${VG}-+${LV}" ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | wc -l ) + else +- dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-") +- test $dm_count -gt 0 ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l ) + fi + +- if [ $? -ne 0 ]; then ++ if [ $dm_count -eq 0 ]; then + return $OCF_NOT_RUNNING + fi + diff --git a/SOURCES/LVM-fix-missing-dash.patch b/SOURCES/LVM-fix-missing-dash.patch new file mode 100644 index 0000000..0e24f5f --- /dev/null +++ b/SOURCES/LVM-fix-missing-dash.patch @@ -0,0 +1,22 @@ +From 5a664525a20d3d5094912322be4faac668e4920e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 13 Aug 2018 14:30:50 +0200 +Subject: [PATCH] LVM: fix missing dash + +--- + heartbeat/lvm-tag.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh +index 71f53b20b..fe17e0f21 100644 +--- a/heartbeat/lvm-tag.sh ++++ b/heartbeat/lvm-tag.sh +@@ -147,7 +147,7 @@ lvm_init() { + if [ -n "$OCF_RESKEY_tag" ]; then + OUR_TAG=$OCF_RESKEY_tag + fi +- vgchange_activate_options="aly --config activation{volume_list=[\"@${OUR_TAG}\"]}" ++ vgchange_activate_options="-aly --config activation{volume_list=[\"@${OUR_TAG}\"]}" + vgchange_deactivate_options="-aln" + } + diff --git a/SOURCES/LVM-volume_group_check_only.patch b/SOURCES/LVM-volume_group_check_only.patch new file mode 100644 index 0000000..505c66a --- /dev/null +++ b/SOURCES/LVM-volume_group_check_only.patch @@ -0,0 +1,72 @@ +From c414259728610f95243d9e34289fefd596b0ac8b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 22 Jun 2018 15:37:36 +0200 +Subject: [PATCH] LVM: add "volume_group_check_only" parameter to avoid + timeouts in some cases + +--- + heartbeat/LVM | 10 ++++++++++ + heartbeat/lvm-tag.sh | 24 +++++++++++++----------- + 2 files changed, 23 insertions(+), 11 deletions(-) + +diff --git a/heartbeat/LVM b/heartbeat/LVM +index d3cd1a14..10f7186d 100755 +--- a/heartbeat/LVM ++++ b/heartbeat/LVM +@@ -103,6 +103,16 @@ logical volumes. + + + ++ ++ ++If set, only the volume group will be checked during monitoring. ++ ++WARNING: ONLY USE IF YOU HAVE ISSUES WITH TIMEOUTS! ++ ++Only check volume group during monitoring ++ ++ ++ + + + +diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh +index 71f53b20..170426e8 100644 +--- a/heartbeat/lvm-tag.sh ++++ b/heartbeat/lvm-tag.sh +@@ -160,19 +160,21 @@ lvm_validate_all() { + lvm_status() { + local rc=0 + +- # If vg is running, make sure the correct tag is present. Otherwise we +- # can not guarantee exclusive activation. +- if ! check_tags; then +- ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\"" +- rc=$OCF_ERR_GENERIC +- fi ++ if ! ocf_is_true "$OCF_RESKEY_volume_group_check_only"; then ++ # If vg is running, make sure the correct tag is present. Otherwise we ++ # can not guarantee exclusive activation. ++ if ! check_tags; then ++ ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\"" ++ rc=$OCF_ERR_GENERIC ++ fi + +- # make sure the environment for tags activation is still valid +- if ! verify_tags_environment; then +- rc=$OCF_ERR_GENERIC ++ # make sure the environment for tags activation is still valid ++ if ! verify_tags_environment; then ++ rc=$OCF_ERR_GENERIC ++ fi ++ # let the user know if their initrd is older than lvm.conf. ++ check_initrd_warning + fi +- # let the user know if their initrd is older than lvm.conf. +- check_initrd_warning + + return $rc + } +-- +2.17.1 + diff --git a/SOURCES/VirtualDomain-stateless-support.patch b/SOURCES/VirtualDomain-stateless-support.patch new file mode 100644 index 0000000..9d79622 --- /dev/null +++ b/SOURCES/VirtualDomain-stateless-support.patch @@ -0,0 +1,126 @@ +diff -uNr a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain +--- a/heartbeat/VirtualDomain 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/VirtualDomain 2018-07-03 14:01:25.892705351 +0200 +@@ -26,6 +26,9 @@ + OCF_RESKEY_CRM_meta_timeout_default=90000 + OCF_RESKEY_save_config_on_stop_default=false + OCF_RESKEY_sync_config_on_stop_default=false ++OCF_RESKEY_backingfile_default="" ++OCF_RESKEY_stateless_default="false" ++OCF_RESKEY_copyindirs_default="" + + : ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}} + : ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}} +@@ -36,6 +39,9 @@ + : ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}} + : ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}} + : ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}} ++: ${OCF_RESKEY_backingfile=${OCF_RESKEY_backingfile_default}} ++: ${OCF_RESKEY_stateless=${OCF_RESKEY_stateless_default}} ++: ${OCF_RESKEY_copyindirs=${OCF_RESKEY_copyindirs_default}} + + if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then + OCF_RESKEY_save_config_on_stop="true" +@@ -271,6 +277,35 @@ + + + ++ ++ ++When the VM is used in Copy-On-Write mode, this is the backing file to use (with its full path). ++The VMs image will be created based on this backing file. ++This backing file will never be changed during the life of the VM. ++ ++If the VM is wanted to work with Copy-On-Write mode, this is the backing file to use (with its full path) ++ ++ ++ ++ ++ ++If set to true and backingfile is defined, the start of the VM will systematically create a new qcow2 based on ++the backing file, therefore the VM will always be stateless. If set to false, the start of the VM will use the ++COW (<vmname>.qcow2) file if it exists, otherwise the first start will create a new qcow2 based on the backing ++file given as backingfile. ++ ++If set to true, the (<vmname>.qcow2) file will be re-created at each start, based on the backing file (if defined) ++ ++ ++ ++ ++ ++List of directories for the virt-copy-in before booting the VM. Used only in stateless mode. ++ ++List of directories for the virt-copy-in before booting the VM stateless mode. ++ ++ ++ + + + virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown' +@@ -545,11 +580,49 @@ + # is restored to an 'undefined' state before creating. + verify_undefined + +- virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." +- return $OCF_ERR_GENERIC ++ if [ -z "${OCF_RESKEY_backingfile}" ]; then ++ virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ else ++ if ocf_is_true "${OCF_RESKEY_stateless}" || [ ! -s "${OCF_RESKEY_config%%.*}.qcow2" ]; then ++ # Create the Stateless image ++ dirconfig=`dirname ${OCF_RESKEY_config}` ++ qemu-img create -f qcow2 -b ${OCF_RESKEY_backingfile} ${OCF_RESKEY_config%%.*}.qcow2 ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed qemu-img create ${DOMAIN_NAME} with backing file ${OCF_RESKEY_backingfile}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ virsh define ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ if [ -n "${OCF_RESKEY_copyindirs}" ]; then ++ # Inject copyindirs directories and files ++ virt-copy-in -d ${DOMAIN_NAME} ${OCF_RESKEY_copyindirs} / ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed on virt-copy-in command ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ else ++ virsh define ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ virsh $VIRSH_OPTIONS start ${DOMAIN_NAME} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi + fi + + while ! VirtualDomain_monitor; do +@@ -926,6 +999,11 @@ + ocf_exit_reason "migration_downtime has to be a decimal value" + return $OCF_ERR_CONFIGURED + fi ++ ++ if ocf_is_true "${OCF_RESKEY_stateless}" && [ -z "${OCF_RESKEY_backingfile}" ]; then ++ ocf_exit_reason "Stateless functionality can't be achieved without a backing file." ++ return $OCF_ERR_CONFIGURED ++ fi + } + + VirtualDomain_getconfig() { diff --git a/SOURCES/aliyun-vpc-move-ip-1.patch b/SOURCES/aliyun-vpc-move-ip-1.patch new file mode 100644 index 0000000..ab948dc --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-1.patch @@ -0,0 +1,275 @@ +From e45d0ca9ccc3d5fbe94372f40bedb7559dc9530a Mon Sep 17 00:00:00 2001 +From: "feng.changf1" +Date: Tue, 24 Jul 2018 15:08:45 +0800 +Subject: [PATCH] Add Aliyun vpc-move-ip agent. + +--- + heartbeat/aliyun-vpc-move-ip | 258 +++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 258 insertions(+) + create mode 100644 heartbeat/aliyun-vpc-move-ip + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +new file mode 100644 +index 000000000..bc97822a8 +--- /dev/null ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -0,0 +1,258 @@ ++#!/bin/bash ++# ++# OCF resource agent to move an IP address within a VPC in the Aliyun ++# Based on code of Markus Guertler (GitHub AWS-VPC-move-IP) ++# Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip) ++# ++ ++############################################################################### ++# For testing purposes delete OCF_ROOT after testing ++OCF_ROOT=/usr/lib/ocf/ ++# ++# INIT ++#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} ++#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then ++# . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ++#fi ++ ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} ++. ${OCF_FUNCTIONS} ++: ${__OCF_ACTION=$1} ++export HOME=/root ++####################################################################### ++ ++ ++USAGE="usage: $0 {start|stop|status|meta-data}"; ++############################################################################### ++ ++ ++############################################################################### ++# ++# Functions ++# ++############################################################################### ++ ++ ++metadata() { ++cat < ++ ++ ++2.0 ++ ++Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS ++by changing an entry in an specific routing table ++ ++Move IP within a APC of the Aliyun ECS ++ ++ ++ ++VPC private IP address ++ ++vpc ip ++ ++ ++ ++ ++Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++ ++routing table name ++ ++ ++ ++ ++Name of the network interfacen, i.e. eth0 ++ ++network interface name ++ ++ ++ ++ ++Valid Aliyun CLI profile name ++ ++profile name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++debugger() { ++ ocf_log info "DEBUG: $1" ++} ++ ++ecs_ip_validate() { ++ debugger "function: validate" ++ ++ # IP address ++ [[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED ++ ++ # Network Interface ++ [[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED ++ ++ # Routing Table ++ [[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED ++ ++ ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ ++ if [ -z "${ECS_INSTANCE_ID}" ]; then ++ ocf_exit_reason "Instance ID not found. Is this a ECS instance?" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++ecs_ip_monitor() { ++ ecs_ip_validate ++ debugger "function: ecsip_monitor: check routing table" ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ if [ -z "$ROUTE_TO_INSTANCE" ]; then ++ ROUTE_TO_INSTANCE="" ++ fi ++ ++ [[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING ++ cmd="ping -W 1 -c 1 $OCF_RESKEY_address" ++ debugger "executing command: $cmd" ++ $cmd > /dev/null ++ [[ $? -gt 0 ]] && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING ++ debugger "routed in VPC and locally reachable" ++ return $OCF_SUCCESS ++} ++ ++ ++ecs_ip_drop() { ++ debugger "function: ecsip_drop" ++ cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ [[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC ++ debugger "command succeeded" ++ return $OCF_SUCCESS ++} ++ ++wait_for_deleted() { ++ while [ ! -z "$ROUTE_TO_INSTANCE" ]; do ++ sleep 1 ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ done ++ sleep 5 ++} ++ ++wait_for_started() { ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ ++ while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do ++ sleep 1 ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ done ++ sleep 5 ++} ++ ++ecs_ip_get_and_configure() { ++ debugger "function: ecsip_get_and_configure" ++ ++ if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ++ if [ $ROUTE_TO_INSTANCE != "" ]; then ++ # Adjusting the routing table ++ cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ #wait_for_deleted ++ sleep 3 ++ fi ++ ++ cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ while [ $rc != 0 ]; do ++ sleep 2 ++ cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ done ++ wait_for_started ++ fi ++ ++ ++ # Reconfigure the local ip address ++ ecs_ip_drop ++ ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface ++ rc=$? ++ [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ debugger "-success" ++ return $OCF_SUCCESS ++} ++ ++ecs_ip_stop() { ++ ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address" ++ ecs_ip_validate ++ ecs_ip_monitor ++ [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS ++ ecs_ip_drop ++ [[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC ++ ecs_ip_monitor ++ [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS ++ ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ return $OCF_ERR_GENERIC ++} ++ ++ecs_ip_start() { ++ ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" ++ ecs_ip_validate ++ ecs_ip_monitor ++ [[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS ++ ocf_log info "ECS: Adjusting routing table and locally configuring IP address" ++ ecs_ip_get_and_configure ++ [[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC ++ return $OCF_SUCCESS ++ ecs_ip_monitor ++ [[ $? == $OCF_SUCCESS ]] && return $? ++ ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" ++ return $OCF_ERR_GENERIC ++} ++ ++############################################################################### ++# ++# MAIN ++# ++############################################################################### ++ ++case $__OCF_ACTION in ++ meta-data) metadata ++ exit $OCF_SUCCESS;; ++ monitor) ++ ecs_ip_monitor;; ++ stop) ++ ecs_ip_stop;; ++ validate-all) ecs_ip_validate;; ++ start) ++ ecs_ip_start;; ++ *) exit $OCF_ERR_UNIMPLEMENTED;; ++esac +\ No newline at end of file diff --git a/SOURCES/aliyun-vpc-move-ip-2-fixes.patch b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch new file mode 100644 index 0000000..7c5db4c --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch @@ -0,0 +1,451 @@ +From db3df55a6f7097e1da7d77eb361e9e7560f13353 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 24 Jul 2018 13:57:08 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: fixes + +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/aliyun-vpc-move-ip | 336 ++++++++++++++++++++++++------------------- + 3 files changed, 189 insertions(+), 149 deletions(-) + mode change 100644 => 100755 heartbeat/aliyun-vpc-move-ip + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 3ac0569de..fc9a67161 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -93,6 +93,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_WinPopup.7 \ + ocf_heartbeat_Xen.7 \ + ocf_heartbeat_Xinetd.7 \ ++ ocf_heartbeat_aliyun-vpc-move-ip.7 \ + ocf_heartbeat_anything.7 \ + ocf_heartbeat_apache.7 \ + ocf_heartbeat_asterisk.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index d4750bf09..6adc6bc3c 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -90,6 +90,7 @@ ocf_SCRIPTS = AoEtarget \ + Xen \ + Xinetd \ + ZFS \ ++ aliyun-vpc-move-ip \ + anything \ + apache \ + asterisk \ +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +old mode 100644 +new mode 100755 +index bc97822a8..108feb247 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -1,30 +1,19 @@ +-#!/bin/bash ++#!/bin/sh + # + # OCF resource agent to move an IP address within a VPC in the Aliyun + # Based on code of Markus Guertler (GitHub AWS-VPC-move-IP) + # Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip) + # + +-############################################################################### +-# For testing purposes delete OCF_ROOT after testing +-OCF_ROOT=/usr/lib/ocf/ +-# +-# INIT +-#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} +-#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then +-# . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs +-#fi +- + ####################################################################### + # Initialization: +- +-: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} +-. ${OCF_FUNCTIONS} +-: ${__OCF_ACTION=$1} +-export HOME=/root ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ####################################################################### + +- ++# aliyuncli doesnt work without HOME parameter ++export HOME="/root" ++ + USAGE="usage: $0 {start|stop|status|meta-data}"; + ############################################################################### + +@@ -36,8 +25,96 @@ USAGE="usage: $0 {start|stop|status|meta-data}"; + ############################################################################### + + +-metadata() { +-cat < + + +@@ -74,8 +151,8 @@ Name of the network interfacen, i.e. eth0 + Valid Aliyun CLI profile name + + profile name +- +- ++ ++ + + + +@@ -88,171 +165,132 @@ Valid Aliyun CLI profile name + END + } + +-debugger() { +- ocf_log info "DEBUG: $1" +-} +- + ecs_ip_validate() { +- debugger "function: validate" +- ++ ocf_log debug "function: validate" ++ + # IP address +- [[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED +- ++ if [ -z "$OCF_RESKEY_address" ]; then ++ ocf_log err "IP address parameter not set $OCF_RESKEY_ADDRESS!" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ + # Network Interface +- [[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED +- ++ if [ -z "$OCF_RESKEY_interface" ]; then ++ ocf_log err "Network interface parameter not set $OCF_RESKEY_INTERFACE!" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ + # Routing Table +- [[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED +- +- ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ if [ -z "$OCF_RESKEY_routing_table" ]; then ++ ocf_log err "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" ++ exit $OCF_ERR_CONFIGURED ++ fi + + if [ -z "${ECS_INSTANCE_ID}" ]; then + ocf_exit_reason "Instance ID not found. Is this a ECS instance?" + return $OCF_ERR_GENERIC + fi +- +- return $OCF_SUCCESS +-} + +-ecs_ip_monitor() { +- ecs_ip_validate +- debugger "function: ecsip_monitor: check routing table" +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- if [ -z "$ROUTE_TO_INSTANCE" ]; then +- ROUTE_TO_INSTANCE="" +- fi +- +- [[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING +- cmd="ping -W 1 -c 1 $OCF_RESKEY_address" +- debugger "executing command: $cmd" +- $cmd > /dev/null +- [[ $? -gt 0 ]] && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING +- debugger "routed in VPC and locally reachable" +- return $OCF_SUCCESS +-} +- +- +-ecs_ip_drop() { +- debugger "function: ecsip_drop" +- cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- [[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC +- debugger "command succeeded" + return $OCF_SUCCESS + } + +-wait_for_deleted() { +- while [ ! -z "$ROUTE_TO_INSTANCE" ]; do +- sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- done +- sleep 5 +-} ++ecs_ip_start() { ++ ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" + +-wait_for_started() { +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- +- while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do +- sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- done +- sleep 5 +-} ++ ecs_ip_monitor ++ if [ $? = $OCF_SUCCESS ]; then ++ ocf_log info "ECS: $OCF_RESKEY_address already started" ++ return $OCF_SUCCESS ++ fi + +-ecs_ip_get_and_configure() { +- debugger "function: ecsip_get_and_configure" +- +- if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then +- +- if [ $ROUTE_TO_INSTANCE != "" ]; then +- # Adjusting the routing table +- cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- #wait_for_deleted +- sleep 3 +- fi +- +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- while [ $rc != 0 ]; do +- sleep 2 +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- done +- wait_for_started ++ ocf_log info "ECS: Adjusting routing table and locally configuring IP address" ++ ip_get_and_configure ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Received $rc from 'aliyun cli'" ++ return $OCF_ERR_GENERIC + fi +- +- +- # Reconfigure the local ip address +- ecs_ip_drop +- ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface ++ ++ ecs_ip_monitor + rc=$? +- [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- debugger "-success" ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log err "IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" ++ return $rc ++ fi ++ + return $OCF_SUCCESS + } + + ecs_ip_stop() { + ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address" +- ecs_ip_validate ++ + ecs_ip_monitor +- [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS +- ecs_ip_drop +- [[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC ++ if [ $? = $OCF_NOT_RUNNING ]; then ++ ocf_log info "ECS: Address $OCF_RESKEY_address already down" ++ return $OCF_SUCCESS ++ fi ++ ++ ip_drop ++ if [ $? -ne $OCF_SUCCESS ]; then ++ ocf_log err "ECS: Couldn't drop IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ return $OCF_ERR_GENERIC ++ fi ++ + ecs_ip_monitor +- [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS +- ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ if [ $? = $OCF_NOT_RUNNING ]; then ++ ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" ++ return $OCF_SUCCESS ++ fi ++ ++ ocf_log err "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." + return $OCF_ERR_GENERIC + } + +-ecs_ip_start() { +- ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" +- ecs_ip_validate +- ecs_ip_monitor +- [[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS +- ocf_log info "ECS: Adjusting routing table and locally configuring IP address" +- ecs_ip_get_and_configure +- [[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC +- return $OCF_SUCCESS +- ecs_ip_monitor +- [[ $? == $OCF_SUCCESS ]] && return $? +- ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" +- return $OCF_ERR_GENERIC ++ecs_ip_monitor() { ++ ocf_log debug "function: ecsip_monitor: check routing table" ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ ocf_log debug "executing command: $cmd" ++ ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ ++ if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log debug "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ cmd="ping -W 1 -c 1 $OCF_RESKEY_address" ++ ocf_log debug "executing command: $cmd" ++ $cmd > /dev/null ++ if [ $? -ne 0 ]; then ++ ocf_log debug "IP $OCF_RESKEY_address not locally reachable via ping on this system" ++ return $OCF_NOT_RUNNING ++ fi ++ ocf_log debug "routed in VPC and locally reachable" ++ return $OCF_SUCCESS + } + ++ + ############################################################################### + # + # MAIN + # + ############################################################################### + +-case $__OCF_ACTION in +- meta-data) metadata ++case $__OCF_ACTION in ++ meta-data) ecs_ip_metadata + exit $OCF_SUCCESS;; +- monitor) +- ecs_ip_monitor;; +- stop) +- ecs_ip_stop;; + validate-all) ecs_ip_validate;; ++esac ++ ++ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ ++case $__OCF_ACTION in + start) ++ ecs_ip_validate + ecs_ip_start;; ++ stop) ++ ecs_ip_stop;; ++ monitor) ++ ecs_ip_monitor;; + *) exit $OCF_ERR_UNIMPLEMENTED;; +-esac +\ No newline at end of file ++esac diff --git a/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch new file mode 100644 index 0000000..619b721 --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch @@ -0,0 +1,22 @@ +From ee081df601f914079f111eec10cb81ab212130a9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 25 Jul 2018 11:22:39 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: fix manpage + +--- + heartbeat/aliyun-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index 108feb247..e27952adb 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -117,7 +117,7 @@ ecs_ip_metadata() { + cat < + +- ++ + 2.0 + + Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS diff --git a/SOURCES/aliyun-vpc-move-ip-4-bundled.patch b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch new file mode 100644 index 0000000..29a92b9 --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch @@ -0,0 +1,15 @@ +--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200 +@@ -35,10 +35,10 @@ + USAGE="usage: $0 {start|stop|status|meta-data}"; + + if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then +- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" ++ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" + fi + +-if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then ++if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli-ra' ]] || [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then + OUTPUT="text" + EXECUTING='{ print $3 }' + IFS_=" " diff --git a/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch new file mode 100644 index 0000000..872158c --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch @@ -0,0 +1,49 @@ +From fc497e888afcb88babbc21a59883556335c070fa Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 31 Aug 2018 11:41:32 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: improve metadata and manpage + +--- + heartbeat/aliyun-vpc-move-ip | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index e27952adb..c004d26fc 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -123,7 +123,7 @@ ecs_ip_metadata() { + Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS + by changing an entry in an specific routing table + +-Move IP within a APC of the Aliyun ECS ++Move IP within a VPC of the Aliyun ECS + + + +@@ -134,21 +134,23 @@ VPC private IP address + + + +-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++Name of the routing table, where the route for the IP address should be changed, i.e. vtb-... + + routing table name + + + + +-Name of the network interfacen, i.e. eth0 ++Name of the network interface, i.e. eth0 + + network interface name + + + + +-Valid Aliyun CLI profile name ++Valid Aliyun CLI profile name (see 'aliyuncli-ra configure'). ++ ++See https://www.alibabacloud.com/help/doc-detail/43039.htm?spm=a2c63.p38356.b99.16.38a914abRZtOU3 for more information about aliyuncli-ra. + + profile name + diff --git a/SOURCES/aliyuncli-python3-fixes.patch b/SOURCES/aliyuncli-python3-fixes.patch new file mode 100644 index 0000000..22be4e1 --- /dev/null +++ b/SOURCES/aliyuncli-python3-fixes.patch @@ -0,0 +1,398 @@ +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A file name is needed! please use \'--filename\' and add the file name." +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + print("A profile is needed! please use \'--filename\' and add the profile name.") +@@ -21,7 +21,7 @@ + + def getInstanceCount(self,keyValues): + count = 1 +- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0: ++ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0: + if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: + count = keyValues['--instancecount'][0] + else: +@@ -113,7 +113,7 @@ + + def isAllocatePublicIpAddress(self,keyValues): + _publicIp = False +- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0: ++ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0: + if keyValues['--allocatepublicip'][0] == "yes": + _publicIp = True + return _publicIp +@@ -125,7 +125,7 @@ + ''' + data = json.loads(jsonbody) + ''' +- if data.has_key('InstanceId') and len(data['InstanceId']) > 0: ++ if 'InstanceId' in data and len(data['InstanceId']) > 0: + instanceId = data['InstanceId'] + except Exception as e: + pass +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100 +@@ -38,7 +38,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A file name is needed! please use \'--filename\' and add the file name." +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A filename is needed! please use \'--filename\' and add the file name." +@@ -21,7 +21,7 @@ + def getInstanceCount(self,keyValues): + count = 1 + import_count = "--count" +- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0: ++ if import_count in keyValues and len(keyValues[import_count]) > 0: + if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0: + count = keyValues[import_count][0] + else: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100 +@@ -17,37 +17,37 @@ + + def getConfigHandlerOptions(self): + return [ConfigCmd.name] +- ++ + def showConfig(self): + _credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials) + _configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure) + config = dict() + configContent = dict() +- credentialsContent = dict () +- if os.path.exists(_configurePath): ++ credentialsContent = dict () ++ if os.path.exists(_configurePath): + for line in open(_configurePath): + line = line.strip('\n') + if line.find('=') > 0: + list = line.split("=",1) +- configContent[list[0]] = list[1] +- else: +- pass +- config['configure'] = configContent +- if os.path.exists(_credentialsPath): +- for line in open(_credentialsPath): ++ configContent[list[0]] = list[1] ++ else: ++ pass ++ config['configure'] = configContent ++ if os.path.exists(_credentialsPath): ++ for line in open(_credentialsPath): + line = line.strip('\n') + if line.find('=') > 0: + list = line.split("=",1) +- credentialsContent[list[0]] = list[1] +- else: +- pass +- config ['credentials'] = credentialsContent +- response.display_response("showConfigure",config,'table') ++ credentialsContent[list[0]] = list[1] ++ else: ++ pass ++ config ['credentials'] = credentialsContent ++ response.display_response("showConfigure",config,'table') + def importConfig(): + pass + def exportConfig(): + pass +- ++ + + + if __name__ == "__main__": +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100 +@@ -20,7 +20,7 @@ + def handleProfileCmd(self, cmd, keyValues): + if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right + #check --name is valid +- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: ++ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: + _value = keyValues[ProfileCmd.name][0] # use the first value + self.extensionCliHandler.setUserProfile(_value) + else: +@@ -34,7 +34,7 @@ + newProfileName = '' + if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right + #check --name is valid +- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: ++ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: + _value = keyValues[ProfileCmd.name][0] # check the first value + # only input key and secret + newProfileName = _value +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100 +@@ -137,9 +137,9 @@ + values.append(self.args[index]) + index = index + 1 + keyValues[currentValue] = values +- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0: ++ if keystr in keyValues and keyValues[keystr].__len__() > 0: + _key = keyValues[keystr][0] +- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: ++ if secretstr in keyValues and keyValues[secretstr].__len__() > 0: + _secret = keyValues[secretstr][0] + #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) + return _key, _secret +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100 +@@ -19,8 +19,9 @@ + ''' + + import sys +-reload(sys) +-sys.setdefaultencoding('utf-8') ++if sys.version_info[0] < 3: ++ reload(sys) ++ sys.setdefaultencoding('utf-8') + __author__ = 'xixi.xxx' + import aliyunCliMain + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100 +@@ -18,7 +18,7 @@ + ''' + + import aliyunCliConfiugre +-import urllib2 ++import urllib3 + import re + import os + import platform +@@ -151,7 +151,7 @@ + # this functino will get the latest version + def _getLatestTimeFromServer(self): + try: +- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5) ++ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5) + s = f.read() + return s + except Exception as e: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100 +@@ -26,7 +26,7 @@ + import aliyunSdkConfigure + import json + import cliError +-import urllib2 ++import urllib3 + import handleEndPoint + + from __init__ import __version__ +@@ -259,7 +259,7 @@ + def changeEndPoint(self, classname, keyValues): + endpoint = "Endpoint" + try: +- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0: ++ if endpoint in keyValues and keyValues[endpoint].__len__() > 0: + classname._RestApi__domain = keyValues[endpoint][0] + except Exception as e: + pass +@@ -444,10 +444,10 @@ + + def getTempVersion(self,keyValues): + key='--version' +- if keyValues is not None and keyValues.has_key(key): ++ if keyValues is not None and key in keyValues: + return keyValues.get(key) + key = 'version' +- if keyValues is not None and keyValues.has_key(key): ++ if keyValues is not None and key in keyValues: + return keyValues.get(key) + + def getVersionFromFile(self,cmd): +@@ -513,7 +513,7 @@ + self.checkForServer(response,cmd,operation) + def getRequestId(self,response): + try: +- if response.has_key('RequestId') and len(response['RequestId']) > 0: ++ if 'RequestId' in response and len(response['RequestId']) > 0: + requestId = response['RequestId'] + return requestId + except Exception: +@@ -532,7 +532,7 @@ + ua = "" + url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation + try: +- f = urllib2.urlopen(url,data=None,timeout=5) ++ f = urllib3.urlopen(url,data=None,timeout=5) + s = f.read() + return s + except Exception : +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100 +@@ -39,7 +39,7 @@ + + def sdkConfigure(self,cmd,operation): + keyValues = self.parser._getKeyValues() +- if keyValues.has_key('--version') and len(keyValues['--version']) > 0: ++ if '--version' in keyValues and len(keyValues['--version']) > 0: + version=keyValues['--version'][0] + filename=self.fileName + self.writeCmdVersionToFile(cmd,version,filename) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100 +@@ -23,6 +23,8 @@ + import aliyunCliParser + import platform + ++if sys.version_info[0] > 2: ++ raw_input = input + + OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~') + OSS_CONFIG_SECTION = 'OSSCredentials' +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100 +@@ -19,7 +19,7 @@ + #/usr/bin/env python + #!-*- coding:utf-8 -*- + import os +-import urllib2 ++import urllib3 + import cliError + + +@@ -64,9 +64,9 @@ + print(e) + def _getParamFromUrl(prefix,value,mode): + +- req = urllib2.Request(value) ++ req = urllib3.Request(value) + try: +- response=urllib2.urlopen(req) ++ response=urllib3.urlopen(req) + if response.getcode() == 200: + return response.read() + else: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100 +@@ -340,8 +340,8 @@ + + + _urllib_error_moved_attributes = [ +- MovedAttribute("URLError", "urllib2", "urllib.error"), +- MovedAttribute("HTTPError", "urllib2", "urllib.error"), ++ MovedAttribute("URLError", "urllib3", "urllib.error"), ++ MovedAttribute("HTTPError", "urllib3", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), + ] + for attr in _urllib_error_moved_attributes: +@@ -359,34 +359,34 @@ + + + _urllib_request_moved_attributes = [ +- MovedAttribute("urlopen", "urllib2", "urllib.request"), +- MovedAttribute("install_opener", "urllib2", "urllib.request"), +- MovedAttribute("build_opener", "urllib2", "urllib.request"), ++ MovedAttribute("urlopen", "urllib3", "urllib.request"), ++ MovedAttribute("install_opener", "urllib3", "urllib.request"), ++ MovedAttribute("build_opener", "urllib3", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), +- MovedAttribute("Request", "urllib2", "urllib.request"), +- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), +- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), +- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), +- MovedAttribute("BaseHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), +- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), +- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), +- MovedAttribute("FileHandler", "urllib2", "urllib.request"), +- MovedAttribute("FTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), ++ MovedAttribute("Request", "urllib3", "urllib.request"), ++ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"), ++ MovedAttribute("BaseHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"), ++ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"), ++ MovedAttribute("FileHandler", "urllib3", "urllib.request"), ++ MovedAttribute("FTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), +diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py +--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100 +@@ -24,7 +24,7 @@ + + install_requires = [ + 'colorama>=0.2.5,<=0.3.3', +- 'jmespath>=0.7.0,<=0.7.1', ++ 'jmespath>=0.7.0', + ] + def main(): + setup( diff --git a/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch new file mode 100644 index 0000000..9c23ffa --- /dev/null +++ b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch @@ -0,0 +1,39 @@ +From 7632a85bcf642b484df52a25dbffbfa0031421bc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michal=20Koutn=C3=BD?= +Date: Mon, 6 Aug 2018 18:04:52 +0200 +Subject: [PATCH] aws-vpc-move-ip: Use ip utility to check address + +When pinging the assigned address during initial monitor (probe) on one +node we may actually ping the reachable address when the resource is +running on another node. This yields false positive monitor result on +the pinging node. Avoid this by merely checking the assignment of the +address to an interface. +--- + heartbeat/aws-vpc-move-ip | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index cefa38e03..3bbbed474 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,15 +167,15 @@ ec2ip_monitor() { + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi + +- cmd="ping -W 1 -c 1 $OCF_RESKEY_ip" ++ cmd="ip addr show to '$OCF_RESKEY_ip' up" + ocf_log debug "executing command: $cmd" +- $cmd > /dev/null +- if [ "$?" -gt 0 ]; then +- ocf_log warn "IP $OCF_RESKEY_ip not locally reachable via ping on this system" ++ RESULT=$($cmd | grep '$OCF_RESKEY_ip') ++ if [ -z "$RESULT" ]; then ++ ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface" + return $OCF_NOT_RUNNING + fi + +- ocf_log debug "route in VPC and locally reachable" ++ ocf_log debug "route in VPC and address assigned" + return $OCF_SUCCESS + } + diff --git a/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch new file mode 100644 index 0000000..4d1cbf9 --- /dev/null +++ b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch @@ -0,0 +1,31 @@ +From 42dccdd20aff3ebf134c8041f79ab0a658975e69 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michal=20Koutn=C3=BD?= +Date: Thu, 30 Aug 2018 18:02:51 +0200 +Subject: [PATCH] aws-vpc-move-ip: Fix broken shell quoting + +The argument 4th to `ip` is passed with single quotes around which +cannot be parsed as valid IP address. Furthermore, we need to expand the +$OCF_RESKEY_ip for grep. This breaks correct detection of the assigned +address. + +Fixes 7632a85bcf642b484df52a25dbffbfa0031421bc. +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3bbbed474..ce3fc6b9a 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,9 +167,9 @@ ec2ip_monitor() { + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi + +- cmd="ip addr show to '$OCF_RESKEY_ip' up" ++ cmd="ip addr show to $OCF_RESKEY_ip up" + ocf_log debug "executing command: $cmd" +- RESULT=$($cmd | grep '$OCF_RESKEY_ip') ++ RESULT=$($cmd | grep "$OCF_RESKEY_ip") + if [ -z "$RESULT" ]; then + ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface" + return $OCF_NOT_RUNNING diff --git a/SOURCES/build-add-missing-manpages.patch b/SOURCES/build-add-missing-manpages.patch new file mode 100644 index 0000000..6ac1c2d --- /dev/null +++ b/SOURCES/build-add-missing-manpages.patch @@ -0,0 +1,43 @@ +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2018-07-27 13:05:09.611188363 +0200 ++++ b/doc/man/Makefile.am 2018-07-27 13:05:43.981806873 +0200 +@@ -97,6 +97,7 @@ + ocf_heartbeat_WinPopup.7 \ + ocf_heartbeat_Xen.7 \ + ocf_heartbeat_Xinetd.7 \ ++ ocf_heartbeat_ZFS.7 \ + ocf_heartbeat_aliyun-vpc-move-ip.7 \ + ocf_heartbeat_anything.7 \ + ocf_heartbeat_apache.7 \ +@@ -136,6 +137,7 @@ + ocf_heartbeat_lxd-info.7 \ + ocf_heartbeat_machine-info.7 \ + ocf_heartbeat_mariadb.7 \ ++ ocf_heartbeat_minio.7 \ + ocf_heartbeat_mysql.7 \ + ocf_heartbeat_mysql-proxy.7 \ + ocf_heartbeat_nagios.7 \ +@@ -150,6 +152,7 @@ + ocf_heartbeat_oracle.7 \ + ocf_heartbeat_oralsnr.7 \ + ocf_heartbeat_ovsmonitor.7 \ ++ ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ + ocf_heartbeat_pingd.7 \ + ocf_heartbeat_portblock.7 \ +@@ -158,6 +161,7 @@ + ocf_heartbeat_proftpd.7 \ + ocf_heartbeat_rabbitmq-cluster.7 \ + ocf_heartbeat_redis.7 \ ++ ocf_heartbeat_rkt.7 \ + ocf_heartbeat_rsyncd.7 \ + ocf_heartbeat_rsyslog.7 \ + ocf_heartbeat_scsi2reservation.7 \ +@@ -172,6 +176,7 @@ + ocf_heartbeat_varnish.7 \ + ocf_heartbeat_vdo-vol.7 \ + ocf_heartbeat_vmware.7 \ ++ ocf_heartbeat_vsftpd.7 \ + ocf_heartbeat_zabbixserver.7 + + if USE_IPV6ADDR_AGENT diff --git a/SOURCES/bz1552330-vdo-vol.patch b/SOURCES/bz1552330-vdo-vol.patch new file mode 100644 index 0000000..2aa093d --- /dev/null +++ b/SOURCES/bz1552330-vdo-vol.patch @@ -0,0 +1,285 @@ +From 8b07d095acbbb1069c1fb44142ccfdd0aeed075f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 16 May 2018 14:10:49 +0200 +Subject: [PATCH] vdo-vol: new resource agent + +--- + doc/man/Makefile.am | 3 +- + heartbeat/Makefile.am | 1 + + heartbeat/vdo-vol | 234 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 237 insertions(+), 1 deletion(-) + create mode 100755 heartbeat/vdo-vol + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index c59126d1..8d94c10c 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -158,11 +158,12 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_slapd.7 \ + ocf_heartbeat_sybaseASE.7 \ + ocf_heartbeat_sg_persist.7 \ +- ocf_heartbeat_mpathpersist.7 \ ++ ocf_heartbeat_mpathpersist.7 \ + ocf_heartbeat_symlink.7 \ + ocf_heartbeat_syslog-ng.7 \ + ocf_heartbeat_tomcat.7 \ + ocf_heartbeat_varnish.7 \ ++ ocf_heartbeat_vdo-vol.7 \ + ocf_heartbeat_vmware.7 \ + ocf_heartbeat_zabbixserver.7 + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 4f5059e2..a68fa978 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -162,6 +162,7 @@ ocf_SCRIPTS = AoEtarget \ + syslog-ng \ + tomcat \ + varnish \ ++ vdo-vol \ + vmware \ + vsftpd \ + zabbixserver +diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol +new file mode 100755 +index 00000000..074339db +--- /dev/null ++++ b/heartbeat/vdo-vol +@@ -0,0 +1,234 @@ ++#!/bin/sh ++# ++# License: GNU General Public License (GPL) ++# (c) 2018 O. Albrigtsen ++# and Linux-HA contributors ++# ++# ----------------------------------------------------------------------------- ++# O C F R E S O U R C E S C R I P T S P E C I F I C A T I O N ++# ----------------------------------------------------------------------------- ++# ++# NAME ++# vdo-vol : OCF resource agent script for VDO (Virtual Data Optimizer) ++# ++ ++# Initialization: ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Defaults ++OCF_RESKEY_volume_default="" ++ ++: ${OCF_RESKEY_volume=${OCF_RESKEY_volume_default}} ++ ++ ++vdo_usage() { ++ cat < ++ ++ ++0.75 ++ ++ ++OCF Resource script for VDO (Virtual Data Optimizer) volume(s). It manages VDO volume(s) as a HA resource. ++ ++The configuration file needs to be synced to all nodes, and the systemd vdo service must be disabled when ++using this agent. ++ ++VDO resource agent ++ ++ ++ ++ ++ Configuration file ++ Config file ++ ++ ++ ++ ++ VDO Volume (leave empty for all) ++ Volume (empty for all) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++ ++rebuild() { ++ ocf_log warn "${OCF_RESKEY_volume} is in $MODE mode, starting in rebuild mode" ++ ++ vdo stop $OPTIONS ++ ++ while vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ vdo start $OPTIONS --forceRebuild ++ ++ while ! vdo_monitor; do ++ sleep 1 ++ done ++ ++ return $? ++} ++ ++vdo_start() { ++ # if resource is already running,no need to continue code after this. ++ if vdo_monitor; then ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} is already active" ++ return $OCF_SUCCESS ++ fi ++ ++ vdo activate $OPTIONS ++ vdo start $OPTIONS ++ ++ while ! vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}') ++ if [ $(echo "$MODE" | grep -v "normal" | wc -l) -gt 0 ]; then ++ rebuild ++ fi ++ ++ if [ $? -eq $OCF_SUCCESS ]; then ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} activated" ++ return ${OCF_SUCCESS} ++ fi ++ ++ return $? ++} ++ ++vdo_stop() { ++ vdo_monitor skiprocheck ++ if [ $? -ne $OCF_SUCCESS ]; then ++ # Currently not running. Nothing to do. ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} already deactivated" ++ ++ return $OCF_SUCCESS ++ fi ++ ++ vdo stop $OPTIONS ++ vdo deactivate $OPTIONS ++ ++ # Wait for process to stop ++ while vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ return $OCF_SUCCESS ++} ++ ++vdo_monitor(){ ++ status=$(vdo status $OPTIONS 2>&1) ++ MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}') ++ ++ case "$status" in ++ *"Device mapper status: not available"*) ++ return $OCF_NOT_RUNNING ++ ;; ++ *"Device mapper status: "*online*) ++ if [ "$MODE" = "read-only" ] && [ "$1" != "skiprocheck" ]; then ++ ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} is in $MODE mode." ++ return $OCF_ERR_GENERIC ++ else ++ return $OCF_SUCCESS ++ fi ++ ;; ++ *) ++ ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} failed\n$status" ++ return $OCF_ERR_GENERIC;; ++ esac ++} ++ ++vdo_validate_all(){ ++ check_binary "vdo" ++ ++ if systemctl is-enabled vdo > /dev/null 2>&1; then ++ ocf_exit_reason "systemd service vdo needs to be disabled" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ if [ -n "${OCF_RESKEY_config}" ] && [ ! -f "${OCF_RESKEY_config}" ]; then ++ ocf_exit_reason "Configuration file: ${OCF_RESKEY_config} not found" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++ ++# **************************** MAIN SCRIPT ************************************ ++ ++# Make sure meta-data and usage always succeed ++case $__OCF_ACTION in ++ meta-data) ++ vdo_meta_data ++ exit $OCF_SUCCESS ++ ;; ++ usage|help) ++ vdo_usage ++ exit $OCF_SUCCESS ++ ;; ++esac ++ ++# This OCF agent script need to be run as root user. ++if ! ocf_is_root; then ++ echo "$0 agent script need to be run as root user." ++ ocf_log debug "$0 agent script need to be run as root user." ++ exit $OCF_ERR_GENERIC ++fi ++ ++if [ -z "${OCF_RESKEY_volume}" ]; then ++ OPTIONS="-a" ++else ++ OPTIONS="-n ${OCF_RESKEY_volume}" ++fi ++ ++if [ -n "${OCF_RESKEY_config}" ]; then ++ OPTIONS="$OPTIONS -f ${OCF_RESKEY_config}" ++fi ++ ++# Translate each action into the appropriate function call ++case $__OCF_ACTION in ++ start) ++ vdo_validate_all ++ vdo_start;; ++ stop) ++ vdo_stop;; ++ status|monitor) ++ vdo_monitor;; ++ validate-all) ++ ;; ++ *) ++ vdo_usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ ++exit $? ++ ++# End of this script +-- +2.17.1 + diff --git a/SOURCES/bz1607607-podman.patch b/SOURCES/bz1607607-podman.patch new file mode 100644 index 0000000..572e761 --- /dev/null +++ b/SOURCES/bz1607607-podman.patch @@ -0,0 +1,538 @@ +From 07d283a6e20b8e559257c9694f7e36e155075014 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Sun, 22 Jul 2018 17:54:29 +0200 +Subject: [PATCH] Initial podman support + +Tested with the following container: + podman container set: test_bundle [docker.io/sdelrio/docker-minimal-nginx] + test_bundle-podman-0 (ocf::heartbeat:podman): Started nodea + test_bundle-podman-1 (ocf::heartbeat:podman): Started nodeb + test_bundle-podman-2 (ocf::heartbeat:podman): Started nodec + +Tested a couple of stop/start cycles successfully. Needs the +corresponding pacemaker support https://github.com/ClusterLabs/pacemaker/pull/1564 +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/podman | 488 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 490 insertions(+) + create mode 100755 heartbeat/podman + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 145e5fd50..0bef88740 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -151,6 +151,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ + ocf_heartbeat_pingd.7 \ ++ ocf_heartbeat_podman.7 \ + ocf_heartbeat_portblock.7 \ + ocf_heartbeat_postfix.7 \ + ocf_heartbeat_pound.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index e7a3a4fac..993bff042 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -146,6 +146,7 @@ ocf_SCRIPTS = AoEtarget \ + pgagent \ + pgsql \ + pingd \ ++ podman \ + portblock \ + postfix \ + pound \ +diff --git a/heartbeat/podman b/heartbeat/podman +new file mode 100755 +index 000000000..88475f1df +--- /dev/null ++++ b/heartbeat/podman +@@ -0,0 +1,488 @@ ++#!/bin/sh ++# ++# The podman HA resource agent creates and launches a podman container ++# based off a supplied podman image. Containers managed by this agent ++# are both created and removed upon the agent's start and stop actions. ++# ++# Copyright (c) 2014 David Vossel ++# Michele Baldessari ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++meta_data() ++{ ++ cat < ++ ++ ++1.0 ++ ++ ++The podman HA resource agent creates and launches a podman container ++based off a supplied podman image. Containers managed by this agent ++are both created and removed upon the agent's start and stop actions. ++ ++Podman container resource agent. ++ ++ ++ ++ ++The podman image to base this container off of. ++ ++podman image ++ ++ ++ ++ ++ ++The name to give the created container. By default this will ++be that resource's instance name. ++ ++podman container name ++ ++ ++ ++ ++ ++Allow the image to be pulled from the configured podman registry when ++the image does not exist locally. NOTE, this can drastically increase ++the time required to start the container if the image repository is ++pulled over the network. ++ ++Allow pulling non-local images ++ ++ ++ ++ ++ ++Add options to be appended to the 'podman run' command which is used ++when creating the container during the start action. This option allows ++users to do things such as setting a custom entry point and injecting ++environment variables into the newly created container. Note the '-d' ++option is supplied regardless of this value to force containers to run ++in the background. ++ ++NOTE: Do not explicitly specify the --name argument in the run_opts. This ++agent will set --name using either the resource's instance or the name ++provided in the 'name' argument of this agent. ++ ++ ++run options ++ ++ ++ ++ ++ ++Specify a command to launch within the container once ++it has initialized. ++ ++run command ++ ++ ++ ++ ++ ++A comma separated list of directories that the container is expecting to use. ++The agent will ensure they exist by running 'mkdir -p' ++ ++Required mount points ++ ++ ++ ++ ++ ++Specify the full path of a command to launch within the container to check ++the health of the container. This command must return 0 to indicate that ++the container is healthy. A non-zero return code will indicate that the ++container has failed and should be recovered. ++ ++If 'podman exec' is supported, it is used to execute the command. If not, ++nsenter is used. ++ ++Note: Using this method for monitoring processes inside a container ++is not recommended, as containerd tries to track processes running ++inside the container and does not deal well with many short-lived ++processes being spawned. Ensure that your container monitors its ++own processes and terminates on fatal error rather than invoking ++a command from the outside. ++ ++monitor command ++ ++ ++ ++ ++ ++Kill a container immediately rather than waiting for it to gracefully ++shutdown ++ ++force kill ++ ++ ++ ++ ++ ++Allow the container to be reused after stopping the container. By default ++containers are removed after stop. With the reuse option containers ++will persist after the container stops. ++ ++reuse container ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++REQUIRE_IMAGE_PULL=0 ++ ++podman_usage() ++{ ++ cat </dev/null 2>&1; then ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? ++ else ++ out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) ++ rc=$? ++ fi ++ ++ if [ $rc -eq 127 ]; then ++ ocf_log err "monitor cmd failed (rc=$rc), output: $out" ++ ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." ++ # there is no recovering from this, exit immediately ++ exit $OCF_ERR_ARGS ++ elif [ $rc -ne 0 ]; then ++ ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" ++ rc=$OCF_ERR_GENERIC ++ else ++ ocf_log debug "monitor cmd passed: exit code = $rc" ++ fi ++ ++ return $rc ++} ++ ++container_exists() ++{ ++ podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++} ++ ++remove_container() ++{ ++ if ocf_is_true "$OCF_RESKEY_reuse"; then ++ # never remove the container if we have reuse enabled. ++ return 0 ++ fi ++ ++ container_exists ++ if [ $? -ne 0 ]; then ++ # don't attempt to remove a container that doesn't exist ++ return 0 ++ fi ++ ocf_log notice "Cleaning up inactive container, ${CONTAINER}." ++ ocf_run podman rm $CONTAINER ++} ++ ++podman_simple_status() ++{ ++ local val ++ ++ container_exists ++ if [ $? -ne 0 ]; then ++ return $OCF_NOT_RUNNING ++ fi ++ ++ # retrieve the 'Running' attribute for the container ++ val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) ++ if [ $? -ne 0 ]; then ++ #not running as a result of container not being found ++ return $OCF_NOT_RUNNING ++ fi ++ ++ if ocf_is_true "$val"; then ++ # container exists and is running ++ return $OCF_SUCCESS ++ fi ++ ++ return $OCF_NOT_RUNNING ++} ++ ++podman_monitor() ++{ ++ local rc=0 ++ ++ podman_simple_status ++ rc=$? ++ ++ if [ $rc -ne 0 ]; then ++ return $rc ++ fi ++ ++ monitor_cmd_exec ++} ++ ++podman_create_mounts() { ++ oldIFS="$IFS" ++ IFS="," ++ for directory in $OCF_RESKEY_mount_points; do ++ mkdir -p "$directory" ++ done ++ IFS="$oldIFS" ++} ++ ++podman_start() ++{ ++ podman_create_mounts ++ local run_opts="-d --name=${CONTAINER}" ++ # check to see if the container has already started ++ podman_simple_status ++ if [ $? -eq $OCF_SUCCESS ]; then ++ return $OCF_SUCCESS ++ fi ++ ++ if [ -n "$OCF_RESKEY_run_opts" ]; then ++ run_opts="$run_opts $OCF_RESKEY_run_opts" ++ fi ++ ++ if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then ++ ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}" ++ podman pull "${OCF_RESKEY_image}" ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}" ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then ++ ocf_log info "starting existing container $CONTAINER." ++ ocf_run podman start $CONTAINER ++ else ++ # make sure any previous container matching our container name is cleaned up first. ++ # we already know at this point it wouldn't be running ++ remove_container ++ ocf_log info "running container $CONTAINER for the first time" ++ ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd ++ fi ++ ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "podman failed to launch container" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ ++ # wait for monitor to pass before declaring that the container is started ++ while true; do ++ podman_simple_status ++ if [ $? -ne $OCF_SUCCESS ]; then ++ ocf_exit_reason "Newly created podman container exited after start" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ monitor_cmd_exec ++ if [ $? -eq $OCF_SUCCESS ]; then ++ ocf_log notice "Container $CONTAINER started successfully" ++ return $OCF_SUCCESS ++ fi ++ ++ ocf_exit_reason "waiting on monitor_cmd to pass after start" ++ sleep 1 ++ done ++} ++ ++podman_stop() ++{ ++ local timeout=60 ++ podman_simple_status ++ if [ $? -eq $OCF_NOT_RUNNING ]; then ++ remove_container ++ return $OCF_SUCCESS ++ fi ++ ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 )) ++ if [ $timeout -lt 10 ]; then ++ timeout=10 ++ fi ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_force_kill"; then ++ ocf_run podman kill $CONTAINER ++ else ++ ocf_log debug "waiting $timeout second[s] before killing container" ++ ocf_run podman stop -t=$timeout $CONTAINER ++ fi ++ ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ remove_container ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++image_exists() ++{ ++ # if no tag was specified, use default "latest" ++ local COLON_FOUND=0 ++ local SLASH_FOUND=0 ++ local SERVER_NAME="" ++ local IMAGE_NAME="${OCF_RESKEY_image}" ++ local IMAGE_TAG="latest" ++ ++ SLASH_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o '/' | grep -c .)" ++ ++ if [ ${SLASH_FOUND} -ge 1 ]; then ++ SERVER_NAME="$(echo ${IMAGE_NAME} | cut -d / -f 1-${SLASH_FOUND})" ++ IMAGE_NAME="$(echo ${IMAGE_NAME} | awk -F'/' '{print $NF}')" ++ fi ++ ++ COLON_FOUND="$(echo "${IMAGE_NAME}" | grep -o ':' | grep -c .)" ++ if [ ${COLON_FOUND} -ge 1 ]; then ++ IMAGE_TAG="$(echo ${IMAGE_NAME} | awk -F':' '{print $NF}')" ++ IMAGE_NAME="$(echo ${IMAGE_NAME} | cut -d : -f 1-${COLON_FOUND})" ++ fi ++ ++ # IMAGE_NAME might be following formats: ++ # - image ++ # - repository:port/image ++ # - docker.io/image (some distro will display "docker.io/" as prefix) ++ podman images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$" ++ if [ $? -eq 0 ]; then ++ # image found ++ return 0 ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_allow_pull"; then ++ REQUIRE_IMAGE_PULL=1 ++ ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start" ++ return 0 ++ fi ++ # image not found. ++ return 1 ++} ++ ++podman_validate() ++{ ++ check_binary podman ++ if [ -z "$OCF_RESKEY_image" ]; then ++ ocf_exit_reason "'image' option is required" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ if [ -n "$OCF_RESKEY_monitor_cmd" ]; then ++ podman exec --help >/dev/null 2>&1 ++ if [ ! $? ]; then ++ ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified" ++ check_binary nsenter ++ fi ++ fi ++ ++ image_exists ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++# TODO : ++# When a user starts plural clones in a node in globally-unique, a user cannot appoint plural name parameters. ++# When a user appoints reuse, the resource agent cannot connect plural clones with a container. ++ ++if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then ++ if [ -n "$OCF_RESKEY_name" ]; then ++ if [ -n "$OCF_RESKEY_CRM_meta_clone_node_max" ] && [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] ++ then ++ ocf_exit_reason "Cannot make plural clones from the same name parameter." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -n "$OCF_RESKEY_CRM_meta_master_node_max" ] && [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ] ++ then ++ ocf_exit_reason "Cannot make plural master from the same name parameter." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ fi ++ : ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`} ++else ++ : ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}} ++fi ++ ++CONTAINER=$OCF_RESKEY_name ++ ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS;; ++start) ++ podman_validate ++ podman_start;; ++stop) podman_stop;; ++monitor) podman_monitor;; ++validate-all) podman_validate;; ++usage|help) podman_usage ++ exit $OCF_SUCCESS ++ ;; ++*) podman_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc diff --git a/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch new file mode 100644 index 0000000..850c318 --- /dev/null +++ b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch @@ -0,0 +1,48 @@ +From c70924b69af760ec3762b01594afb6ff82c3820c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 19 Sep 2018 16:13:43 +0200 +Subject: [PATCH] systemd-tmpfiles: configure path with --with-rsctmpdir + +--- + configure.ac | 3 ++- + systemd/resource-agents.conf | 1 - + systemd/resource-agents.conf.in | 1 + + 3 files changed, 3 insertions(+), 2 deletions(-) + delete mode 100644 systemd/resource-agents.conf + create mode 100644 systemd/resource-agents.conf.in + +diff --git a/configure.ac b/configure.ac +index b7ffb99f3..e34d125e9 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -431,7 +431,7 @@ AC_SUBST(HA_VARRUNDIR) + + # Expand $prefix + eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" +-AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resouce agents keep state files) ++AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files) + AC_SUBST(HA_RSCTMPDIR) + + dnl Eventually move out of the heartbeat dir tree and create symlinks when needed +@@ -911,6 +911,7 @@ heartbeat/Makefile \ + heartbeat/ocf-shellfuncs \ + heartbeat/shellfuncs \ + systemd/Makefile \ ++ systemd/resource-agents.conf \ + tools/Makefile \ + tools/ocf-tester \ + tools/ocft/Makefile \ +diff --git a/systemd/resource-agents.conf b/systemd/resource-agents.conf +deleted file mode 100644 +index 1cb129c18..000000000 +--- a/systemd/resource-agents.conf ++++ /dev/null +@@ -1 +0,0 @@ +-d /var/run/resource-agents/ 1755 root root +diff --git a/systemd/resource-agents.conf.in b/systemd/resource-agents.conf.in +new file mode 100644 +index 000000000..7bd157ec0 +--- /dev/null ++++ b/systemd/resource-agents.conf.in +@@ -0,0 +1 @@ ++d @HA_RSCTMPDIR@ 1755 root root diff --git a/SOURCES/bz1633251-gcp-pd-move-1.patch b/SOURCES/bz1633251-gcp-pd-move-1.patch new file mode 100644 index 0000000..c7cbe8e --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-1.patch @@ -0,0 +1,425 @@ +From dedf420b8aa7e7e64fa56eeda2d7aeb5b2a5fcd9 Mon Sep 17 00:00:00 2001 +From: Gustavo Serra Scalet +Date: Mon, 17 Sep 2018 12:29:51 -0300 +Subject: [PATCH] Add gcp-pd-move python script + +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-pd-move.in | 370 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 373 insertions(+) + create mode 100755 heartbeat/gcp-pd-move.in + +diff --git a/configure.ac b/configure.ac +index 10f5314da..b7ffb99f3 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -958,6 +958,7 @@ AC_CONFIG_FILES([heartbeat/conntrackd], [chmod +x heartbeat/conntrackd]) + AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) ++AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 0bef88740..0235c9af6 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -115,6 +115,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_fio.7 \ + ocf_heartbeat_galera.7 \ + ocf_heartbeat_garbd.7 \ ++ ocf_heartbeat_gcp-pd-move.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ + ocf_heartbeat_gcp-vpc-move-vip.7 \ + ocf_heartbeat_gcp-vpc-move-route.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 993bff042..843186c98 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -111,6 +111,7 @@ ocf_SCRIPTS = AoEtarget \ + fio \ + galera \ + garbd \ ++ gcp-pd-move \ + gcp-vpc-move-ip \ + gcp-vpc-move-vip \ + gcp-vpc-move-route \ +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +new file mode 100755 +index 000000000..f9f6c3163 +--- /dev/null ++++ b/heartbeat/gcp-pd-move.in +@@ -0,0 +1,370 @@ ++#!@PYTHON@ -tt ++# - *- coding: utf- 8 - *- ++# ++# --------------------------------------------------------------------- ++# Copyright 2018 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++# --------------------------------------------------------------------- ++# Description: Google Cloud Platform - Disk attach ++# --------------------------------------------------------------------- ++ ++import json ++import logging ++import os ++import re ++import sys ++import time ++ ++OCF_FUNCTIONS_DIR = "%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++import ocf ++ ++try: ++ import googleapiclient.discovery ++except ImportError: ++ pass ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++CONN = None ++PROJECT = None ++ZONE = None ++REGION = None ++LIST_DISK_ATTACHED_INSTANCES = None ++INSTANCE_NAME = None ++ ++PARAMETERS = { ++ 'disk_name': None, ++ 'disk_scope': None, ++ 'disk_csek_file': None, ++ 'mode': None, ++ 'device_name': None, ++} ++ ++MANDATORY_PARAMETERS = ['disk_name', 'disk_scope'] ++ ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = ''' ++ ++ ++1.0 ++ ++Resource Agent that can attach or detach a regional/zonal disk on current GCP ++instance. ++Requirements : ++- Disk has to be properly created as regional/zonal in order to be used ++correctly. ++ ++Attach/Detach a persistent disk on current GCP instance ++ ++ ++The name of the GCP disk. ++Disk name ++ ++ ++ ++Disk scope ++Network name ++ ++ ++ ++Path to a Customer-Supplied Encryption Key (CSEK) key file ++Customer-Supplied Encryption Key file ++ ++ ++ ++Attachment mode (rw, ro) ++Attachment mode ++ ++ ++ ++An optional name that indicates the disk name the guest operating system will see. ++Optional device name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def populate_vars(): ++ global CONN ++ global INSTANCE_NAME ++ global PROJECT ++ global ZONE ++ global REGION ++ global LIST_DISK_ATTACHED_INSTANCES ++ ++ global PARAMETERS ++ ++ # Populate global vars ++ try: ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logger.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ ++ for param in PARAMETERS: ++ value = os.environ.get('OCF_RESKEY_%s' % param, None) ++ if not value and param in MANDATORY_PARAMETERS: ++ logger.error('Missing %s mandatory parameter' % param) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ PARAMETERS[param] = value ++ ++ try: ++ INSTANCE_NAME = get_metadata('instance/name') ++ except Exception as e: ++ logger.error( ++ 'Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ ++ PROJECT = get_metadata('project/project-id') ++ ZONE = get_metadata('instance/zone').split('/')[-1] ++ REGION = ZONE[:-2] ++ LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( ++ PARAMETERS['disk_name']) ++ ++ ++def configure_logs(): ++ # Prepare logging ++ global logger ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=INSTANCE_NAME) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:alias "%(message)s"') ++ handler.setFormatter(formatter) ++ ocf.log.addHandler(handler) ++ logger = logging.LoggerAdapter( ++ ocf.log, {'OCF_RESOURCE_INSTANCE': ocf.OCF_RESOURCE_INSTANCE}) ++ except ImportError: ++ logger.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def wait_for_operation(operation): ++ while True: ++ result = CONN.zoneOperations().get( ++ project=PROJECT, ++ zone=ZONE, ++ operation=operation['name']).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return ++ time.sleep(1) ++ ++ ++def get_disk_attached_instances(disk): ++ def get_users_list(): ++ fl = 'name="%s"' % disk ++ request = CONN.disks().aggregatedList(project=PROJECT, filter=fl) ++ while request is not None: ++ response = request.execute() ++ locations = response.get('items', {}) ++ for location in locations.values(): ++ for d in location.get('disks', []): ++ if d['name'] == disk: ++ return d.get('users', []) ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ raise Exception("Unable to find disk %s" % disk) ++ ++ def get_only_instance_name(user): ++ return re.sub('.*/instances/', '', user) ++ ++ return map(get_only_instance_name, get_users_list()) ++ ++ ++def is_disk_attached(instance): ++ return instance in LIST_DISK_ATTACHED_INSTANCES ++ ++ ++def detach_disk(instance, disk_name): ++ # Python API misses disk-scope argument. ++ ++ # Detaching a disk is only possible by using deviceName, which is retrieved ++ # as a disk parameter when listing the instance information ++ request = CONN.instances().get( ++ project=PROJECT, zone=ZONE, instance=instance) ++ response = request.execute() ++ ++ device_name = None ++ for disk in response['disks']: ++ if disk_name in disk['source']: ++ device_name = disk['deviceName'] ++ break ++ ++ if not device_name: ++ logger.error("Didn't find %(d)s deviceName attached to %(i)s" % { ++ 'd': disk_name, ++ 'i': instance, ++ }) ++ return ++ ++ request = CONN.instances().detachDisk( ++ project=PROJECT, zone=ZONE, instance=instance, deviceName=device_name) ++ wait_for_operation(request.execute()) ++ ++ ++def attach_disk(instance, disk_name): ++ location = 'zones/%s' % ZONE ++ if PARAMETERS['disk_scope'] == 'regional': ++ location = 'regions/%s' % REGION ++ prefix = 'https://www.googleapis.com/compute/v1' ++ body = { ++ 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { ++ 'prefix': prefix, ++ 'project': PROJECT, ++ 'location': location, ++ 'disk': disk_name, ++ }, ++ } ++ ++ # Customer-Supplied Encryption Key (CSEK) ++ if PARAMETERS['disk_csek_file']: ++ with open(PARAMETERS['disk_csek_file']) as csek_file: ++ body['diskEncryptionKey'] = { ++ 'rawKey': csek_file.read(), ++ } ++ ++ if PARAMETERS['device_name']: ++ body['deviceName'] = PARAMETERS['device_name'] ++ ++ if PARAMETERS['mode']: ++ body['mode'] = PARAMETERS['mode'] ++ ++ force_attach = None ++ if PARAMETERS['disk_scope'] == 'regional': ++ # Python API misses disk-scope argument. ++ force_attach = True ++ else: ++ # If this disk is attached to some instance, detach it first. ++ for other_instance in LIST_DISK_ATTACHED_INSTANCES: ++ logger.info("Detaching disk %(disk_name)s from other instance %(i)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'i': other_instance, ++ }) ++ detach_disk(other_instance, PARAMETERS['disk_name']) ++ ++ request = CONN.instances().attachDisk( ++ project=PROJECT, zone=ZONE, instance=instance, body=body, ++ forceAttach=force_attach) ++ wait_for_operation(request.execute()) ++ ++ ++def fetch_data(): ++ configure_logs() ++ populate_vars() ++ ++ ++def gcp_pd_move_start(): ++ fetch_data() ++ if not is_disk_attached(INSTANCE_NAME): ++ logger.info("Attaching disk %(disk_name)s to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ attach_disk(INSTANCE_NAME, PARAMETERS['disk_name']) ++ ++ ++def gcp_pd_move_stop(): ++ fetch_data() ++ if is_disk_attached(INSTANCE_NAME): ++ logger.info("Detaching disk %(disk_name)s to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ detach_disk(INSTANCE_NAME, PARAMETERS['disk_name']) ++ ++ ++def gcp_pd_move_status(): ++ fetch_data() ++ if is_disk_attached(INSTANCE_NAME): ++ logger.info("Disk %(disk_name)s is correctly attached to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ else: ++ sys.exit(ocf.OCF_NOT_RUNNING) ++ ++ ++def main(): ++ if len(sys.argv) < 2: ++ logger.error('Missing argument') ++ return ++ ++ command = sys.argv[1] ++ if 'meta-data' in command: ++ print(METADATA) ++ return ++ ++ if command in 'start': ++ gcp_pd_move_start() ++ elif command in 'stop': ++ gcp_pd_move_stop() ++ elif command in ('monitor', 'status'): ++ gcp_pd_move_status() ++ else: ++ configure_logs() ++ logger.error('no such function %s' % str(command)) ++ ++ ++if __name__ == "__main__": ++ main() diff --git a/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch b/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch new file mode 100644 index 0000000..9a9681c --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch @@ -0,0 +1,18 @@ +commit cbe0e6507992b50afbaebc46dfaf8955cc02e5ec +Author: Oyvind Albrigtsen + + Python agents: use OCF_FUNCTIONS_DIR env variable when available + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index f9f6c316..c5007a43 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -25,7 +25,7 @@ import re + import sys + import time + +-OCF_FUNCTIONS_DIR = "%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + import ocf diff --git a/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch b/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch new file mode 100644 index 0000000..5819b94 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch @@ -0,0 +1,48 @@ +From 4fa41a1d7b4bee31526649c40cc4c58bc6333917 Mon Sep 17 00:00:00 2001 +From: masaki-tamura +Date: Wed, 2 Oct 2019 17:12:42 +0900 +Subject: [PATCH 1/2] add parameter stackdriver_logging + +--- + heartbeat/gcp-pd-move.in | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index c5007a43c..fac5c9744 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -102,6 +102,11 @@ correctly. + Optional device name + + ++ ++Use stackdriver_logging output to global resource (yes, true, enabled) ++Use stackdriver_logging ++ ++ + + + + +From f762ce3da00e1775587a04751a8828ba004fb534 Mon Sep 17 00:00:00 2001 +From: masaki-tamura +Date: Wed, 2 Oct 2019 17:44:30 +0900 +Subject: [PATCH 2/2] defautl no + +--- + heartbeat/gcp-pd-move.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index fac5c9744..7fabc80dc 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -105,7 +105,7 @@ correctly. + + Use stackdriver_logging output to global resource (yes, true, enabled) + Use stackdriver_logging +- ++ + + + diff --git a/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch b/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch new file mode 100644 index 0000000..79e1bc0 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch @@ -0,0 +1,176 @@ +From 9dedf4d4ad3a94e4ce75e0f29ffdd018e3709ae3 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 28 May 2020 11:39:20 +0200 +Subject: [PATCH] gcp-pd-move: fixes and improvements + +- Fixed Python 3 encoding issue +- Improved metadata +- Change monitor loglevel to debug +- Removed "regional" functionality that doesnt work with attachDisk() +- Updated rw/ro to READ_WRITE/READ_ONLY in metadata/default value +--- + heartbeat/gcp-pd-move.in | 63 ++++++++++++++++++++-------------------- + 1 file changed, 32 insertions(+), 31 deletions(-) + mode change 100755 => 100644 heartbeat/gcp-pd-move.in + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +old mode 100755 +new mode 100644 +index 7fabc80dc..f82bd25e5 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -29,6 +29,7 @@ OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os. + sys.path.append(OCF_FUNCTIONS_DIR) + + import ocf ++from ocf import logger + + try: + import googleapiclient.discovery +@@ -48,16 +49,16 @@ else: + CONN = None + PROJECT = None + ZONE = None +-REGION = None + LIST_DISK_ATTACHED_INSTANCES = None + INSTANCE_NAME = None + + PARAMETERS = { +- 'disk_name': None, +- 'disk_scope': None, +- 'disk_csek_file': None, +- 'mode': None, +- 'device_name': None, ++ 'disk_name': '', ++ 'disk_scope': 'detect', ++ 'disk_csek_file': '', ++ 'mode': "READ_WRITE", ++ 'device_name': '', ++ 'stackdriver_logging': 'no', + } + + MANDATORY_PARAMETERS = ['disk_name', 'disk_scope'] +@@ -80,32 +81,32 @@ correctly. + + The name of the GCP disk. + Disk name +- ++ + +- +-Disk scope ++ ++Disk scope + Network name +- ++ + +- ++ + Path to a Customer-Supplied Encryption Key (CSEK) key file + Customer-Supplied Encryption Key file +- ++ + +- +-Attachment mode (rw, ro) ++ ++Attachment mode (READ_WRITE, READ_ONLY) + Attachment mode +- ++ + +- ++ + An optional name that indicates the disk name the guest operating system will see. + Optional device name +- ++ + +- ++ + Use stackdriver_logging output to global resource (yes, true, enabled) + Use stackdriver_logging +- ++ + + + +@@ -114,7 +115,9 @@ correctly. + + + +-''' ++'''.format(PARAMETERS['disk_name'], PARAMETERS['disk_scope'], ++ PARAMETERS['disk_csek_file'], PARAMETERS['mode'], PARAMETERS['device_name'], ++ PARAMETERS['stackdriver_logging']) + + + def get_metadata(metadata_key, params=None, timeout=None): +@@ -137,7 +140,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def populate_vars(): +@@ -145,11 +148,8 @@ def populate_vars(): + global INSTANCE_NAME + global PROJECT + global ZONE +- global REGION + global LIST_DISK_ATTACHED_INSTANCES + +- global PARAMETERS +- + # Populate global vars + try: + CONN = googleapiclient.discovery.build('compute', 'v1') +@@ -158,11 +158,12 @@ def populate_vars(): + sys.exit(ocf.OCF_ERR_CONFIGURED) + + for param in PARAMETERS: +- value = os.environ.get('OCF_RESKEY_%s' % param, None) ++ value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param]) + if not value and param in MANDATORY_PARAMETERS: + logger.error('Missing %s mandatory parameter' % param) + sys.exit(ocf.OCF_ERR_CONFIGURED) +- PARAMETERS[param] = value ++ elif value: ++ PARAMETERS[param] = value + + try: + INSTANCE_NAME = get_metadata('instance/name') +@@ -172,8 +173,10 @@ def populate_vars(): + sys.exit(ocf.OCF_ERR_CONFIGURED) + + PROJECT = get_metadata('project/project-id') +- ZONE = get_metadata('instance/zone').split('/')[-1] +- REGION = ZONE[:-2] ++ if PARAMETERS['disk_scope'] in ['detect', 'regional']: ++ ZONE = get_metadata('instance/zone').split('/')[-1] ++ else: ++ ZONE = PARAMETERS['disk_scope'] + LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( + PARAMETERS['disk_name']) + +@@ -270,8 +273,6 @@ def detach_disk(instance, disk_name): + + def attach_disk(instance, disk_name): + location = 'zones/%s' % ZONE +- if PARAMETERS['disk_scope'] == 'regional': +- location = 'regions/%s' % REGION + prefix = 'https://www.googleapis.com/compute/v1' + body = { + 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { +@@ -342,7 +343,7 @@ def gcp_pd_move_stop(): + def gcp_pd_move_status(): + fetch_data() + if is_disk_attached(INSTANCE_NAME): +- logger.info("Disk %(disk_name)s is correctly attached to %(instance)s" % { ++ logger.debug("Disk %(disk_name)s is correctly attached to %(instance)s" % { + 'disk_name': PARAMETERS['disk_name'], + 'instance': INSTANCE_NAME, + }) diff --git a/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch b/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch new file mode 100644 index 0000000..6d6b244 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch @@ -0,0 +1,10 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/gcp-pd-move.in 2020-05-28 14:46:28.396220588 +0200 ++++ /home/oalbrigt/src/resource-agents/gcp-pd-move.rhel8 2020-05-28 14:16:25.845308597 +0200 +@@ -32,6 +32,7 @@ + from ocf import logger + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + except ImportError: + pass diff --git a/SOURCES/bz1635785-redis-pidof-basename.patch b/SOURCES/bz1635785-redis-pidof-basename.patch new file mode 100644 index 0000000..32c57eb --- /dev/null +++ b/SOURCES/bz1635785-redis-pidof-basename.patch @@ -0,0 +1,61 @@ +From 2462caf264c487810805c40a546a4dc3f953c340 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 3 Oct 2018 18:07:31 +0200 +Subject: [PATCH] Do not use the absolute path in redis' pidof calls + +The reason for this is that newer kernels (we saw this on a 4.18 kernel) +can limit access to /proc//{cwd,exe,root} and so pidof will fail to +identify the process when using the full path names. +This access limitation happens even with the root user: +()[root@ra1 /]$ ls -l /proc/32/ |grep redis-server +ls: cannot read symbolic link '/proc/32/cwd': Permission denied +ls: cannot read symbolic link '/proc/32/root': Permission denied +ls: cannot read symbolic link '/proc/32/exe': Permission denied + +For this reason the 'pidof /usr/bin/redis-server' calls will fail +when running inside containers that have this kernel protection +mechanism. + +We tested this change and successfuly obtained a running redis cluster: + podman container set: redis-bundle [192.168.222.1:5000/redis:latest] + Replica[0] + redis-bundle-podman-0 (ocf::heartbeat:podman): Started ra1 + redis-bundle-0 (ocf::pacemaker:remote): Started ra1 + redis (ocf::heartbeat:redis): Master redis-bundle-0 + Replica[1] + redis-bundle-podman-1 (ocf::heartbeat:podman): Started ra2 + redis-bundle-1 (ocf::pacemaker:remote): Started ra2 + redis (ocf::heartbeat:redis): Slave redis-bundle-1 + Replica[2] + redis-bundle-podman-2 (ocf::heartbeat:podman): Started ra3 + redis-bundle-2 (ocf::pacemaker:remote): Started ra3 + redis (ocf::heartbeat:redis): Slave redis-bundle-2 + +Signed-off-By: Damien Ciabrini +Signed-off-by: Michele Baldessari +--- + heartbeat/redis.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index ddc62d8a7..1dff067e9 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -316,7 +316,7 @@ simple_status() { + fi + + pid="$(<"$REDIS_PIDFILE")" +- pidof "$REDIS_SERVER" | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING ++ pidof $(basename "$REDIS_SERVER") | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING + + ocf_log debug "monitor: redis-server running under pid $pid" + +@@ -465,7 +465,7 @@ redis_start() { + break + elif (( info[loading] == 1 )); then + sleep "${info[loading_eta_seconds]}" +- elif pidof "$REDIS_SERVER" >/dev/null; then ++ elif pidof $(basename "$REDIS_SERVER") >/dev/null; then + # unknown error, but the process still exists. + # This check is mainly because redis daemonizes before it starts listening, causing `redis-cli` to fail + # See https://github.com/antirez/redis/issues/2368 diff --git a/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch new file mode 100644 index 0000000..838a8aa --- /dev/null +++ b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch @@ -0,0 +1,43 @@ +diff -uNr a/heartbeat/nfsserver b/heartbeat/nfsserver +--- a/heartbeat/nfsserver 2018-10-10 17:02:47.873199077 +0200 ++++ b/heartbeat/nfsserver 2018-10-11 15:24:41.782048475 +0200 +@@ -402,7 +402,6 @@ + return + fi + +- [ -d "$fp" ] || mkdir -p $fp + [ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir + [ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery + +@@ -437,10 +436,21 @@ + return + fi + ++ [ -d "$fp" ] || mkdir -p $fp ++ + if is_bound /var/lib/nfs; then + ocf_log debug "$fp is already bound to /var/lib/nfs" + return 0 + fi ++ ++ case $EXEC_MODE in ++ [23]) if nfs_exec status var-lib-nfs-rpc_pipefs.mount > /dev/null 2>&1; then ++ ocf_log debug "/var/lib/nfs/rpc_pipefs already mounted. Unmounting in preparation to bind mount nfs dir" ++ systemctl stop var-lib-nfs-rpc_pipefs.mount ++ fi ++ ;; ++ esac ++ + mount --bind $fp /var/lib/nfs + [ $SELINUX_ENABLED -eq 0 ] && restorecon /var/lib/nfs + } +@@ -612,8 +622,8 @@ + fi + + is_redhat_based && set_env_args +- prepare_directory + bind_tree ++ prepare_directory + + if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then + mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir diff --git a/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch new file mode 100644 index 0000000..0b7d485 --- /dev/null +++ b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch @@ -0,0 +1,24 @@ +From 848d62c32b355a03c2ad8d246eb3e34b04af07ca Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 9 Jan 2019 16:49:41 +0100 +Subject: [PATCH] LVM-activate: dont fail initial probe + +--- + heartbeat/LVM-activate | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..49ab717a3 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -323,6 +323,10 @@ lvmlockd_check() + + # Good: lvmlockd is running, and clvmd is not running + if ! pgrep lvmlockd >/dev/null 2>&1 ; then ++ if ocf_is_probe; then ++ exit $OCF_NOT_RUNNING ++ fi ++ + ocf_exit_reason "lvmlockd daemon is not running!" + exit $OCF_ERR_CONFIGURED + fi diff --git a/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch new file mode 100644 index 0000000..5b975a1 --- /dev/null +++ b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch @@ -0,0 +1,27 @@ +From 4f122cd0cf46c1fdc1badb22049607a6abf0c885 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 4 Feb 2019 17:04:59 +0100 +Subject: [PATCH] LVM-activate: only check locking_type when LVM < v2.03 + +--- + heartbeat/LVM-activate | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index c2239d881..3c462c75c 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -311,7 +311,12 @@ config_verify() + lvmlockd_check() + { + config_verify "global/use_lvmlockd" "1" +- config_verify "global/locking_type" "1" ++ ++ # locking_type was removed from config in v2.03 ++ ocf_version_cmp "$(lvmconfig --version | awk '/LVM ver/ {sub(/\(.*/, "", $3); print $3}')" "2.03" ++ if [ "$?" -eq 0 ]; then ++ config_verify "global/locking_type" "1" ++ fi + + # We recommend to activate one LV at a time so that this specific volume + # binds to a proper filesystem to protect the data diff --git a/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch new file mode 100644 index 0000000..58b13ce --- /dev/null +++ b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch @@ -0,0 +1,12 @@ +diff -uNr a/heartbeat/vdo-vol b/heartbeat/vdo-vol +--- a/heartbeat/vdo-vol 2018-11-07 09:11:23.037835110 +0100 ++++ b/heartbeat/vdo-vol 2018-11-07 09:12:41.322373901 +0100 +@@ -145,7 +145,7 @@ + + vdo_monitor(){ + status=$(vdo status $OPTIONS 2>&1) +- MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}') ++ MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}') + + case "$status" in + *"Device mapper status: not available"*) diff --git a/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch new file mode 100644 index 0000000..571196b --- /dev/null +++ b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch @@ -0,0 +1,59 @@ +From b42ef7555de86cc29d165ae17682c223bfb23b6e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 5 Nov 2018 16:38:01 +0100 +Subject: [PATCH 1/2] tomcat: use systemd on RHEL when catalina.sh is + unavailable + +--- + heartbeat/tomcat | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/tomcat b/heartbeat/tomcat +index 4812a0133..833870038 100755 +--- a/heartbeat/tomcat ++++ b/heartbeat/tomcat +@@ -613,7 +613,6 @@ TOMCAT_NAME="${OCF_RESKEY_tomcat_name-tomcat}" + TOMCAT_CONSOLE="${OCF_RESKEY_script_log-/var/log/$TOMCAT_NAME.log}" + RESOURCE_TOMCAT_USER="${OCF_RESKEY_tomcat_user-root}" + RESOURCE_STATUSURL="${OCF_RESKEY_statusurl-http://127.0.0.1:8080}" +-OCF_RESKEY_force_systemd_default=0 + + JAVA_HOME="${OCF_RESKEY_java_home}" + JAVA_OPTS="${OCF_RESKEY_java_opts}" +@@ -630,6 +629,13 @@ if [ -z "$CATALINA_PID" ]; then + CATALINA_PID="${HA_RSCTMP}/${TOMCAT_NAME}_tomcatstate/catalina.pid" + fi + ++# Only default to true for RedHat systems without catalina.sh ++if [ -e "$CATALINA_HOME/bin/catalina.sh" ] || ! is_redhat_based; then ++ OCF_RESKEY_force_systemd_default=0 ++else ++ OCF_RESKEY_force_systemd_default=1 ++fi ++ + MAX_STOP_TIME="${OCF_RESKEY_max_stop_time}" + + : ${OCF_RESKEY_force_systemd=${OCF_RESKEY_force_systemd_default}} + +From 9cb2b142a9ecb3a2d5a51cdd51b4005f08b9a97b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 5 Nov 2018 17:09:43 +0100 +Subject: [PATCH 2/2] ocf-distro: add regex for RedHat version + +--- + heartbeat/ocf-distro | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-distro b/heartbeat/ocf-distro +index 530ee57ed..f69910c98 100644 +--- a/heartbeat/ocf-distro ++++ b/heartbeat/ocf-distro +@@ -39,7 +39,7 @@ get_os_ver() { + VER=$(cat $_DEBIAN_VERSION_FILE) + elif [ -f $_REDHAT_RELEASE_FILE ]; then + OS=RedHat # redhat or similar +- VER= # here some complex sed script ++ VER=$(sed "s/.* release \([^ ]\+\).*/\1/" $_REDHAT_RELEASE_FILE) + else + OS=$(uname -s) + VER=$(uname -r) diff --git a/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch new file mode 100644 index 0000000..af1974c --- /dev/null +++ b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch @@ -0,0 +1,23 @@ +From 13511f843b2b0fa1b8b306beac041e0855be05a6 Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Tue, 15 Jan 2019 15:45:03 +0100 +Subject: [PATCH] LVM-activate: make vgname not uniqe + +If activating one lvname at a time, vgname will not be unique. +--- + heartbeat/LVM-activate | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..bc448c9c1 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -102,7 +102,7 @@ because some DLM lockspaces might be in use and cannot be closed automatically. + This agent activates/deactivates logical volumes. + + +- ++ + + The volume group name. + diff --git a/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch new file mode 100644 index 0000000..5911e0e --- /dev/null +++ b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch @@ -0,0 +1,29 @@ +From ee9a47f97dd8b0cb51033db7879a79588aab409c Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Tue, 15 Jan 2019 15:40:01 +0100 +Subject: [PATCH] LVM-activate: fix dmsetup check + +When there are no devices in the system dmsetup outputs one line: + + # dmsetup info -c + No devices found +--- + heartbeat/LVM-activate | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..c3225e1cb 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -715,9 +715,9 @@ lvm_status() { + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | wc -l ) ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found') + else +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l ) ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found') + fi + + if [ $dm_count -eq 0 ]; then diff --git a/SOURCES/bz1669140-Route-make-family-parameter-optional.patch b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch new file mode 100644 index 0000000..81ab09d --- /dev/null +++ b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch @@ -0,0 +1,31 @@ +From d95765aba205ea59dcb99378bed4c6d0593ebdb4 Mon Sep 17 00:00:00 2001 +From: fpicot +Date: Fri, 11 Jan 2019 11:38:18 -0500 +Subject: [PATCH] Route: make family parameter optional + +--- + heartbeat/Route | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index 67bdf6bfc..2da58bce1 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -124,7 +124,7 @@ The routing table to be configured for the route. + + + +- ++ + + The address family to be used for the route + ip4 IP version 4 +@@ -132,7 +132,7 @@ ip6 IP version 6 + detect Detect from 'destination' address. + + Address Family +- ++ + + + diff --git a/SOURCES/bz1683548-redis-mute-password-warning.patch b/SOURCES/bz1683548-redis-mute-password-warning.patch new file mode 100644 index 0000000..b3b89e0 --- /dev/null +++ b/SOURCES/bz1683548-redis-mute-password-warning.patch @@ -0,0 +1,62 @@ +From 6303448af77d2ed64c7436a84b30cf7fa4941e19 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 30 Jan 2019 21:36:17 +0100 +Subject: [PATCH] redis: Filter warning from stderr when calling 'redis-cli -a' + +In some versions of redis (starting with 4.0.10) we have commits [1] and +[2] which add a warning on stderr which will be printed out every single +time a monitor operation takes place: + + foo pacemaker-remoted[57563]: notice: redis_monitor_20000:1930:stderr + [ Warning: Using a password with '-a' option on the command line interface may not be safe. ] + +Later on commit [3] (merged with 5.0rc4) was merged which added the option +'--no-auth-warning' to disable said warning since it broke a bunch of +scripts [4]. I tried to forcibly either try the command twice (first +with --no-auth-warning and then without in case of errors) but it is +impossible to distinguish between error due to missing param and other +errors. + +So instead of inspecting the version of the redis-cli tool and do the following: +- >= 5.0.0 use --no-auth-warning all the time +- >= 4.0.10 & < 5.0.0 filter the problematic line from stderr only +- else do it like before + +We simply filter out from stderr the 'Using a password' message +unconditionally while making sure we keep stdout just the same. + +Tested on a redis 4.0.10 cluster and confirmed that it is working as +intended. + +All this horror and pain is due to the fact that redis does not support +any other means to pass a password (we could in theory first connect to +the server and then issue an AUTH command, but that seems even more +complex and error prone). See [5] for more info (or [6] for extra fun) + +[1] https://github.com/antirez/redis/commit/c082221aefbb2a472c7193dbdbb90900256ce1a2 +[2] https://github.com/antirez/redis/commit/ef931ef93e909b4f504e8c6fbed350ed70c1c67c +[3] https://github.com/antirez/redis/commit/a4ef94d2f71a32f73ce4ebf154580307a144b48f +[4] https://github.com/antirez/redis/issues/5073 +[5] https://github.com/antirez/redis/issues/3483 +[6] https://github.com/antirez/redis/pull/2413 + +Signed-off-by: Michele Baldessari +--- + heartbeat/redis.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 1dff067e9..e257bcc5e 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -302,7 +302,9 @@ set_score() + redis_client() { + ocf_log debug "redis_client: '$REDIS_CLIENT' -s '$REDIS_SOCKET' $*" + if [ -n "$clientpasswd" ]; then +- "$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" | sed 's/\r//' ++ # Starting with 4.0.10 there is a warning on stderr when using a pass ++ # Once we stop supporting versions < 5.0.0 we can add --no-auth-warning here ++ ("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//' + else + "$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//' + fi diff --git a/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch new file mode 100644 index 0000000..1ebb942 --- /dev/null +++ b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch @@ -0,0 +1,70 @@ +From d228d41c61f57f2576dd87aa7be86f9ca26e3059 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 18 Mar 2019 16:03:14 +0100 +Subject: [PATCH] Squid: fix pid file issue due to new Squid version saving the + PID of the parent process instead of the listener child process + +--- + heartbeat/Squid.in | 21 +++++---------------- + 1 file changed, 5 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in +index a99892d75..0b3c8ea86 100644 +--- a/heartbeat/Squid.in ++++ b/heartbeat/Squid.in +@@ -96,12 +96,9 @@ for a squid instance managed by this RA. + + + +- +- +-This is a required parameter. This parameter specifies a process id file +-for a squid instance managed by this RA. +- +-Pidfile ++ ++Deprecated - do not use anymore ++deprecated - do not use anymore + + + +@@ -175,8 +172,8 @@ get_pids() + # Seek by pattern + SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN") + +- # Seek by pidfile +- SQUID_PIDS[1]=$(awk '1{print $1}' $SQUID_PIDFILE 2>/dev/null) ++ # Seek by child process ++ SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) + + if [[ -n "${SQUID_PIDS[1]}" ]]; then + typeset exe +@@ -306,7 +303,6 @@ stop_squid() + while true; do + get_pids + if is_squid_dead; then +- rm -f $SQUID_PIDFILE + return $OCF_SUCCESS + fi + (( lapse_sec = lapse_sec + 1 )) +@@ -326,7 +322,6 @@ stop_squid() + kill -KILL ${SQUID_PIDS[0]} ${SQUID_PIDS[2]} + sleep 1 + if is_squid_dead; then +- rm -f $SQUID_PIDFILE + return $OCF_SUCCESS + fi + done +@@ -389,12 +384,6 @@ if [[ ! -x "$SQUID_EXE" ]]; then + exit $OCF_ERR_CONFIGURED + fi + +-SQUID_PIDFILE="${OCF_RESKEY_squid_pidfile}" +-if [[ -z "$SQUID_PIDFILE" ]]; then +- ocf_exit_reason "SQUID_PIDFILE is not defined" +- exit $OCF_ERR_CONFIGURED +-fi +- + SQUID_PORT="${OCF_RESKEY_squid_port}" + if [[ -z "$SQUID_PORT" ]]; then + ocf_exit_reason "SQUID_PORT is not defined" diff --git a/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch new file mode 100644 index 0000000..bb6a894 --- /dev/null +++ b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch @@ -0,0 +1,24 @@ +From e370845f41d39d93f76fa34502d62e2513d5eb73 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 May 2019 14:07:46 +0200 +Subject: [PATCH] Squid: dont run pgrep -P without PID + +--- + heartbeat/Squid.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in +index 0b3c8ea86..e62e7ee66 100644 +--- a/heartbeat/Squid.in ++++ b/heartbeat/Squid.in +@@ -173,7 +173,9 @@ get_pids() + SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN") + + # Seek by child process +- SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) ++ if [[ -n "${SQUID_PIDS[0]}" ]]; then ++ SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) ++ fi + + if [[ -n "${SQUID_PIDS[1]}" ]]; then + typeset exe diff --git a/SOURCES/bz1691456-gcloud-dont-detect-python2.patch b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch new file mode 100644 index 0000000..9abbd09 --- /dev/null +++ b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch @@ -0,0 +1,29 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud +--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 2019-04-04 12:01:28.838027640 +0200 ++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2019-04-04 12:03:21.577089065 +0200 +@@ -74,24 +74,7 @@ + + # if CLOUDSDK_PYTHON is empty + if [ -z "$CLOUDSDK_PYTHON" ]; then +- # if python2 exists then plain python may point to a version != 2 +- if _cloudsdk_which python2 >/dev/null; then +- CLOUDSDK_PYTHON=python2 +- elif _cloudsdk_which python2.7 >/dev/null; then +- # this is what some OS X versions call their built-in Python +- CLOUDSDK_PYTHON=python2.7 +- elif _cloudsdk_which python >/dev/null; then +- # Use unversioned python if it exists. +- CLOUDSDK_PYTHON=python +- elif _cloudsdk_which python3 >/dev/null; then +- # We support python3, but only want to default to it if nothing else is +- # found. +- CLOUDSDK_PYTHON=python3 +- else +- # This won't work because it wasn't found above, but at this point this +- # is our best guess for the error message. +- CLOUDSDK_PYTHON=python +- fi ++ CLOUDSDK_PYTHON="/usr/libexec/platform-python" + fi + + # $PYTHONHOME can interfere with gcloud. Users should use diff --git a/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch b/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch new file mode 100644 index 0000000..d50b231 --- /dev/null +++ b/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch @@ -0,0 +1,31 @@ +From 9273b83edf6ee72a59511f307e168813ca3d31fd Mon Sep 17 00:00:00 2001 +From: colttt +Date: Fri, 12 Oct 2018 15:29:48 +0200 +Subject: [PATCH] possible fix for #1026 + +add an if-condition and remove an useless 'targetcli create' +--- + heartbeat/iSCSITarget.in | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/iSCSITarget.in b/heartbeat/iSCSITarget.in +index e49a79016..9128fdc55 100644 +--- a/heartbeat/iSCSITarget.in ++++ b/heartbeat/iSCSITarget.in +@@ -340,13 +340,13 @@ iSCSITarget_start() { + ocf_take_lock $TARGETLOCKFILE + ocf_release_lock_on_exit $TARGETLOCKFILE + ocf_run targetcli /iscsi set global auto_add_default_portal=false || exit $OCF_ERR_GENERIC +- ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC ++ if ! [ -d /sys/kernel/config/target/iscsi/${OCF_RESKEY_iqn} ] ; then ++ ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC ++ fi + for portal in ${OCF_RESKEY_portals}; do + if [ $portal != ${OCF_RESKEY_portals_default} ] ; then + IFS=':' read -a sep_portal <<< "$portal" + ocf_run targetcli /iscsi/${OCF_RESKEY_iqn}/tpg1/portals create "${sep_portal[0]}" "${sep_portal[1]}" || exit $OCF_ERR_GENERIC +- else +- ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC + fi + done + # in lio, we can set target parameters by manipulating diff --git a/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch b/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch new file mode 100644 index 0000000..a349e46 --- /dev/null +++ b/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch @@ -0,0 +1,24 @@ +From 0d53e80957a00016418080967892337b1b13f99d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 30 Jul 2019 11:23:07 +0200 +Subject: [PATCH] iSCSILogicalUnit: only create acls if it doesnt exist + +--- + heartbeat/iSCSILogicalUnit.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/iSCSILogicalUnit.in b/heartbeat/iSCSILogicalUnit.in +index 0fe85b593..02045d754 100644 +--- a/heartbeat/iSCSILogicalUnit.in ++++ b/heartbeat/iSCSILogicalUnit.in +@@ -420,8 +420,8 @@ iSCSILogicalUnit_start() { + + if [ -n "${OCF_RESKEY_allowed_initiators}" ]; then + for initiator in ${OCF_RESKEY_allowed_initiators}; do +- ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC +- ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC ++ [ -d "/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/acls" ] || ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC ++ [ -d "/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/acls/${initiator}" ] || ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC + done + fi + diff --git a/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch b/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch new file mode 100644 index 0000000..16f6caa --- /dev/null +++ b/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch @@ -0,0 +1,93 @@ +From db6d12f4b7b10e214526512abe35307270f81c03 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 8 Aug 2019 14:48:13 +0200 +Subject: [PATCH] mysql/mariadb/galera: use runuser/su to avoid using SELinux + DAC_OVERRIDE + +--- + heartbeat/galera | 11 ++++++----- + heartbeat/mysql-common.sh | 16 ++++++++++++---- + 2 files changed, 18 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 9b9fe5569..056281fb8 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -624,8 +624,7 @@ detect_last_commit() + local recover_args="--defaults-file=$OCF_RESKEY_config \ + --pid-file=$OCF_RESKEY_pid \ + --socket=$OCF_RESKEY_socket \ +- --datadir=$OCF_RESKEY_datadir \ +- --user=$OCF_RESKEY_user" ++ --datadir=$OCF_RESKEY_datadir" + local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p' + local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p' + +@@ -654,7 +653,8 @@ detect_last_commit() + + ocf_log info "now attempting to detect last commit version using 'mysqld_safe --wsrep-recover'" + +- ${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null" + + last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" + if [ -z "$last_commit" ]; then +@@ -670,8 +670,9 @@ detect_last_commit() + # we can only rollback the transaction, but that's OK + # since the DB will get resynchronized anyway + ocf_log warn "local node <${NODENAME}> was not shutdown properly. Rollback stuck transaction with --tc-heuristic-recover" +- ${OCF_RESKEY_binary} $recover_args --wsrep-recover \ +- --tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} $recover_args --wsrep-recover \ ++ --tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null" + + last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" + if [ ! -z "$last_commit" ]; then +diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh +index d5ac972cd..65db9bf85 100755 +--- a/heartbeat/mysql-common.sh ++++ b/heartbeat/mysql-common.sh +@@ -2,6 +2,13 @@ + + ####################################################################### + ++# Use runuser if available for SELinux. ++if [ -x /sbin/runuser ]; then ++ SU=runuser ++else ++ SU=su ++fi ++ + # Attempt to detect a default binary + OCF_RESKEY_binary_default=$(which mysqld_safe 2> /dev/null) + if [ "$OCF_RESKEY_binary_default" = "" ]; then +@@ -207,7 +214,7 @@ mysql_common_prepare_dirs() + # already existed, check whether it is writable by the configured + # user + for dir in $pid_dir $socket_dir; do +- if ! su -s /bin/sh - $OCF_RESKEY_user -c "test -w $dir"; then ++ if ! $SU -s /bin/sh - $OCF_RESKEY_user -c "test -w $dir"; then + ocf_exit_reason "Directory $dir is not writable by $OCF_RESKEY_user" + exit $OCF_ERR_PERM; + fi +@@ -219,14 +226,15 @@ mysql_common_start() + local mysql_extra_params="$1" + local pid + +- ${OCF_RESKEY_binary} --defaults-file=$OCF_RESKEY_config \ ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} --defaults-file=$OCF_RESKEY_config \ + --pid-file=$OCF_RESKEY_pid \ + --socket=$OCF_RESKEY_socket \ + --datadir=$OCF_RESKEY_datadir \ + --log-error=$OCF_RESKEY_log \ +- --user=$OCF_RESKEY_user $OCF_RESKEY_additional_parameters \ ++ $OCF_RESKEY_additional_parameters \ + $mysql_extra_params >/dev/null 2>&1 & +- pid=$! ++ pid=$!" + + # Spin waiting for the server to come up. + # Let the CRM/LRM time us out if required. diff --git a/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch new file mode 100644 index 0000000..8899055 --- /dev/null +++ b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch @@ -0,0 +1,104 @@ +From 57f695d336cab33c61e754e463654ad6400f7b58 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Tue, 27 Nov 2018 17:06:05 +0000 +Subject: [PATCH 1/4] Enable --query flag in DescribeRouteTable API call to + avoid race condition with grep + +--- + heartbeat/aws-vpc-move-ip | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 9b2043aca..d2aed7490 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,9 +167,10 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table" ++ cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId'' + ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_ip | awk '{ print $3 }')" ++ ROUTE_TO_INSTANCE=$($cmd) ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + if [ -z "$ROUTE_TO_INSTANCE" ]; then + ROUTE_TO_INSTANCE="" + fi + +From 4d6371aca5dca35b902a480e07a08c1dc3373ca5 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 11:39:26 +0000 +Subject: [PATCH 2/4] aws-vpc-move-ip: Fixed outer quotes and removed inner + quotes + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index d2aed7490..ced69bd13 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,7 +167,7 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId'' ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE=$($cmd) + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + +From 09f4b061690a0e681aaf7314f1fc3e6f4e597cc8 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 11:55:05 +0000 +Subject: [PATCH 3/4] aws-vpc-move-ip: Replaced indentation spaces with tabs + for consistency with the rest of the code + +--- + heartbeat/aws-vpc-move-ip | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index ced69bd13..3e827283e 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,10 +167,10 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" + ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE=$($cmd) +- ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" ++ ROUTE_TO_INSTANCE=$($cmd) ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + if [ -z "$ROUTE_TO_INSTANCE" ]; then + ROUTE_TO_INSTANCE="" + fi + +From fcf85551ce70cb4fb7ce24e21c361fdbe6fcce6b Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 13:07:32 +0000 +Subject: [PATCH 4/4] aws-vpc-move-ip: In cmd variable on ec2ip_monitor(): + replaced _address with _ip and modified to use single quotes + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3e827283e..331ee184f 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,7 +167,7 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE=$($cmd) + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" diff --git a/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch b/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch new file mode 100644 index 0000000..05e00fb --- /dev/null +++ b/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch @@ -0,0 +1,82 @@ +From 4ee9a7026d7ed15b0b5cd26f06a21d04fc05d14e Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Mon, 1 Apr 2019 22:57:26 +0800 +Subject: [PATCH 1/2] LVM-activate: return OCF_NOT_RUNNING on initial probe + +In the use case of lvm on top of cluster md/raid. When the fenced node +rejoins to the cluster, Pacemaker will run the monitor action for the +probe operation. At that time, LVM PV and VG won't exist before cluster +md/raid get assembled, and the probe should return $OCF_NOT_RUNNING +instead of $OCF_ERR_CONFIGURED. + +Signed-off-by: Roger Zhou +--- + heartbeat/LVM-activate | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 3c462c75c..91ac05c34 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -329,6 +329,7 @@ lvmlockd_check() + # Good: lvmlockd is running, and clvmd is not running + if ! pgrep lvmlockd >/dev/null 2>&1 ; then + if ocf_is_probe; then ++ ocf_log info "initial probe: lvmlockd is not running yet." + exit $OCF_NOT_RUNNING + fi + +@@ -481,6 +482,11 @@ lvm_validate() { + exit $OCF_SUCCESS + fi + ++ if ocf_is_probe; then ++ ocf_log info "initial probe: VG [${VG}] is not found on any block device yet." ++ exit $OCF_NOT_RUNNING ++ fi ++ + ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!" + exit $OCF_ERR_CONFIGURED + fi + +From df2f58c400b1f6f239f9e1c1fdf6ce0875639b43 Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Mon, 1 Apr 2019 23:02:54 +0800 +Subject: [PATCH 2/2] LVM-activate: align dmsetup report command to standard + +Namely to change 'vgname/lvname' to 'vg_name/lv_name'. The dmsetup +report command follows lvm2 selection criteria field name standard. +- dmsetup v1.02.86 (lvm2 v2_02_107) - 23rd June 2014 + "Add dmsetup -S/--select to define selection criteria" +- dmsetup info -c -S help + +Signed-off-by: Roger Zhou +--- + heartbeat/LVM-activate | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 91ac05c34..730d9a09d 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -707,7 +707,7 @@ tagging_deactivate() { + # method: + # + # lv_count=$(vgs --foreign -o lv_count --noheadings ${VG} 2>/dev/null | tr -d '[:blank:]') +-# dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-") ++# dm_count=$(dmsetup --noheadings info -c -S "vg_name=${VG}" 2>/dev/null | grep -c "${VG}-") + # test $lv_count -eq $dm_count + # + # It works, but we cannot afford to use LVM command in lvm_status. LVM command is expensive +@@ -730,9 +730,9 @@ lvm_status() { + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found') ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG} && lv_name=${LV}" | grep -c -v '^No devices found') + else +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found') ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG}" | grep -c -v '^No devices found') + fi + + if [ $dm_count -eq 0 ]; then diff --git a/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch new file mode 100644 index 0000000..9ad4c1d --- /dev/null +++ b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch @@ -0,0 +1,46 @@ +From 17fe1dfeef1534b270e4765277cb8d7b42c4a9c4 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 5 Apr 2019 09:15:40 +0200 +Subject: [PATCH] gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding + issue + +--- + heartbeat/gcp-vpc-move-route.in | 2 +- + heartbeat/gcp-vpc-move-vip.in | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 591b97b1c..7dd47150d 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -193,7 +193,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def validate(ctx): +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index bd6cf86cd..953d61ed7 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -106,7 +106,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -162,7 +162,7 @@ def get_alias(project, zone, instance): + + def get_localhost_alias(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) +- net_iface = json.loads(net_iface.decode('utf-8')) ++ net_iface = json.loads(net_iface) + try: + return net_iface[0]['ipAliases'][0] + except (KeyError, IndexError): diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch new file mode 100644 index 0000000..b724aa3 --- /dev/null +++ b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch @@ -0,0 +1,122 @@ +--- a/heartbeat/aws-vpc-move-ip 2019-05-20 10:54:01.527329668 +0200 ++++ b/heartbeat/aws-vpc-move-ip 2019-05-20 11:33:35.386089091 +0200 +@@ -93,11 +93,19 @@ + + + ++ ++ ++Deprecated IP address param. Use the ip param instead. ++ ++Deprecated VPC private IP Address ++ ++ ++ + + +-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++Name of the routing table(s), where the route for the IP address should be changed. If declaring multiple routing tables they should be separated by comma. Example: rtb-XXXXXXXX,rtb-YYYYYYYYY + +-routing table name ++routing table name(s) + + + +@@ -129,6 +137,13 @@ + END + } + ++ec2ip_set_address_param_compat(){ ++ # Include backward compatibility for the deprecated address parameter ++ if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then ++ OCF_RESKEY_ip="$OCF_RESKEY_address" ++ fi ++} ++ + ec2ip_validate() { + for cmd in aws ip curl; do + check_binary "$cmd" +@@ -150,20 +165,29 @@ + } + + ec2ip_monitor() { +- if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ]; then +- ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE=$($cmd) +- ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +- if [ -z "$ROUTE_TO_INSTANCE" ]; then +- ROUTE_TO_INSTANCE="" +- fi ++ MON_RES="" ++ if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then ++ for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do ++ ocf_log info "monitor: check routing table (API call) - $rtb" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ ocf_log debug "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd)" ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" ++ if [ -z "$ROUTE_TO_INSTANCE" ]; then ++ ROUTE_TO_INSTANCE="" ++ fi ++ ++ if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb" ++ MON_RES="$MON_RES $rtb" ++ fi ++ sleep 1 ++ done + +- if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ];then +- ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" ++ if [ ! -z "$MON_RES" ]; then + return $OCF_NOT_RUNNING + fi ++ + else + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi +@@ -195,19 +219,23 @@ + } + + ec2ip_get_and_configure() { +- # Adjusting the routing table +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile ec2 replace-route --route-table-id $OCF_RESKEY_routing_table --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" +- ocf_log debug "executing command: $cmd" +- $cmd +- rc=$? +- if [ "$rc" != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi ++ for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" ++ ocf_log debug "executing command: $cmd" ++ $cmd ++ rc=$? ++ if [ "$rc" != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done + + # Reconfigure the local ip address + ec2ip_drop +- ip addr add "${OCF_RESKEY_ip}/32" dev $OCF_RESKEY_interface ++ cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface" ++ ocf_log debug "executing command: $cmd" ++ $cmd + rc=$? + if [ $rc != 0 ]; then + ocf_log warn "command failed, rc: $rc" +@@ -289,6 +317,8 @@ + exit $OCF_ERR_PERM + fi + ++ec2ip_set_address_param_compat ++ + ec2ip_validate + + case $__OCF_ACTION in diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch new file mode 100644 index 0000000..c283801 --- /dev/null +++ b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch @@ -0,0 +1,221 @@ +From 9f2b9cc09f7e2df163ff95585374f860f3dc58eb Mon Sep 17 00:00:00 2001 +From: Tomas Krojzl +Date: Tue, 16 Apr 2019 18:40:29 +0200 +Subject: [PATCH 1/6] Fix for VM having multiple network interfaces + +--- + heartbeat/aws-vpc-move-ip | 22 +++++++++++++++++++++- + 1 file changed, 21 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 090956434..a91c2dd11 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -219,8 +219,28 @@ ec2ip_drop() { + } + + ec2ip_get_and_configure() { ++ cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" ++ ocf_log debug "executing command: $cmd" ++ EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + ocf_log debug "executing command: $cmd" + $cmd + rc=$? + +From a871a463134ebb2456b5f37a343bf9034f5f4074 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Tue, 16 Apr 2019 18:49:32 +0200 +Subject: [PATCH 2/6] Fixing indentation + +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index a91c2dd11..a46d10d30 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -227,7 +227,7 @@ ec2ip_get_and_configure() { + ocf_log warn "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" + ocf_log debug "executing command: $cmd" +@@ -237,7 +237,7 @@ ec2ip_get_and_configure() { + ocf_log warn "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +- ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" + + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + +From 068680427dff620a948ae25f090bc154b02f17b9 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Wed, 17 Apr 2019 14:22:31 +0200 +Subject: [PATCH 3/6] Requested fix to avoid using AWS API + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index a46d10d30..2910552f2 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -229,7 +229,7 @@ ec2ip_get_and_configure() { + fi + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" ++ cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + rc=$? + +From 207a2ba66ba7196180d27674aa204980fcd25de2 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:14:21 +0200 +Subject: [PATCH 4/6] More robust approach of getting MAC address + +--- + heartbeat/aws-vpc-move-ip | 29 +++++++++++++++++++++-------- + 1 file changed, 21 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 2910552f2..3a848b7e3 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -219,15 +219,28 @@ ec2ip_drop() { + } + + ec2ip_get_and_configure() { +- cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC ++ MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" ++ if [ -f $MAC_FILE ]; then ++ cmd="cat ${MAC_FILE}" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ else ++ cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + + cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + +From cdcc12a9c1431125b0d5298176e5242bfc9fbe29 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:20:09 +0200 +Subject: [PATCH 5/6] Moving shared part outside if + +--- + heartbeat/aws-vpc-move-ip | 25 +++++++++---------------- + 1 file changed, 9 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3a848b7e3..bfe23e5bf 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -222,26 +222,19 @@ ec2ip_get_and_configure() { + MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" + if [ -f $MAC_FILE ]; then + cmd="cat ${MAC_FILE}" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + else + cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + fi + ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ + cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + +From c3fc114fc64f6feb015c5342923fd2afc367ae28 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:22:55 +0200 +Subject: [PATCH 6/6] Linting adjustment + +--- + heartbeat/aws-vpc-move-ip | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index bfe23e5bf..2757c27d0 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -225,7 +225,6 @@ ec2ip_get_and_configure() { + else + cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" + fi +- + ocf_log debug "executing command: $cmd" + MAC_ADDR="$(eval $cmd)" + rc=$? diff --git a/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch new file mode 100644 index 0000000..4de33f1 --- /dev/null +++ b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch @@ -0,0 +1,32 @@ +From aae26ca70ef910e83485778c1fb450941fe79e8a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Mon, 3 Dec 2018 16:48:14 +0100 +Subject: [PATCH] Do not log at debug log level when HA_debug is unset + +There might be situations (e.g. bundles) where the HA_debug variable +is unset. It makes little sense to enable debug logging when the HA_debug env +variable is unset. +So let's skip debug logs when HA_debug is set to 0 or is unset. + +Tested inside a bundle and observed that previously seen 'ocf_log debug' +calls are now correctly suppressed (w/ HA_debug being unset inside the +container) + +Signed-off-by: Michele Baldessari +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 043ab9bf2..b17297e1a 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -257,7 +257,7 @@ ha_log() + + ha_debug() { + +- if [ "x${HA_debug}" = "x0" ] ; then ++ if [ "x${HA_debug}" = "x0" ] || [ -z "${HA_debug}" ] ; then + return 0 + fi + if tty >/dev/null; then diff --git a/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch new file mode 100644 index 0000000..00e34b8 --- /dev/null +++ b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch @@ -0,0 +1,22 @@ +From 73b35b74b743403aeebab43205475be6f2938cd5 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 12 Jun 2019 10:11:07 +0200 +Subject: [PATCH] ocf_is_true: add True to regexp + +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index b17297e1a..7a97558a5 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -101,7 +101,7 @@ ocf_is_decimal() { + + ocf_is_true() { + case "$1" in +- yes|true|1|YES|TRUE|ja|on|ON) true ;; ++ yes|true|1|YES|TRUE|True|ja|on|ON) true ;; + *) false ;; + esac + } diff --git a/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch new file mode 100644 index 0000000..9cf643e --- /dev/null +++ b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch @@ -0,0 +1,21 @@ +From d1fc6920718284431a2c2cc28562498d6c8ea792 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 19 Jun 2019 11:12:33 +0200 +Subject: [PATCH] Filesystem: remove removed notify-action from metadata + +--- + heartbeat/Filesystem | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 780ba63a4..c46ec3cca 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -221,7 +221,6 @@ block if unresponsive nfs mounts are in use on the system. + + + +- + + + diff --git a/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch b/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch new file mode 100644 index 0000000..5aeada6 --- /dev/null +++ b/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch @@ -0,0 +1,46 @@ +From d8400a30604229d349f36855c30a6a438204023b Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 11:29:17 +0200 +Subject: [PATCH] Avoid double call to podman inspect in podman_simple_status() + +Right now podman_simple_status() does the following: +- It calls container_exists() which then calls "podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1" +- Then it calls "podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null" + +This duplication is unnecessary and we can rely on the second podman inspect +call. We need to do this because podman inspect calls are very expensive as +soon as moderate I/O kicks in. + +Tested as follows: +1) Injected the change on an existing bundle-based cluster +2) Observed that monitoring operations kept working okay +3) Verified by adding set -x that only a single podman inspect per monitor + operation was called (as opposed to two before) +4) Restarted a bundle with an OCF resource inside correctly +5) Did a podman stop of a bundle and correctly observed that: +5.a) It was detected as non running: +* haproxy-bundle-podman-1_monitor_60000 on controller-0 'not running' (7): call=192, status=complete, exitreason='', + last-rc-change='Wed Jun 12 09:22:18 2019', queued=0ms, exec=0ms +5.b) It was correctly started afterwards + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 34e11da6b..b2b3081f9 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -238,11 +238,6 @@ podman_simple_status() + { + local val + +- container_exists +- if [ $? -ne 0 ]; then +- return $OCF_NOT_RUNNING +- fi +- + # retrieve the 'Running' attribute for the container + val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then diff --git a/SOURCES/bz1718219-podman-2-improve-monitor-action.patch b/SOURCES/bz1718219-podman-2-improve-monitor-action.patch new file mode 100644 index 0000000..1537139 --- /dev/null +++ b/SOURCES/bz1718219-podman-2-improve-monitor-action.patch @@ -0,0 +1,63 @@ +From 9685e8e6bf2896377a9cf0e07a85de5dd5fcf2df Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 12:00:31 +0200 +Subject: [PATCH] Simplify podman_monitor() + +Before this change podman_monitor() does two things: +\-> podman_simple_status() + \-> podman inspect {{.State.Running}} +\-> if podman_simple_status == 0 then monitor_cmd_exec() + \-> if [ -z "$OCF_RESKEY_monitor_cmd" ]; then # so if OCF_RESKEY_monitor_cmd is empty we just return SUCCESS + return $rc + fi + # if OCF_RESKEY_monitor_cmd is set to something we execute it + podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd + +Let's actually only rely on podman exec as invoked inside monitor_cmd_exec +when $OCF_RESKEY_monitor_cmd is non empty (which is the default as it is set to "/bin/true"). +When there is no monitor_cmd command defined then it makes sense to rely on podman inspect +calls container in podman_simple_status(). + +Tested as follows: +1) Injected the change on an existing bundle-based cluster +2) Observed that monitoring operations kept working okay +3) Restarted rabbitmq-bundle and galera-bundle successfully +4) Killed a container and we correctly detected the monitor failure +Jun 12 09:52:12 controller-0 pacemaker-controld[25747]: notice: controller-0-haproxy-bundle-podman-1_monitor_60000:230 [ ocf-exit-reason:monitor cmd failed (rc=125), output: cannot exec into container that is not running\n ] +5) Container correctly got restarted after the monitor failure: + haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 +6) Stopped and removed a container and pcmk detected it correctly: +Jun 12 09:55:15 controller-0 podman(haproxy-bundle-podman-1)[841411]: ERROR: monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container +Jun 12 09:55:15 controller-0 pacemaker-execd[25744]: notice: haproxy-bundle-podman-1_monitor_60000:841411:stderr [ ocf-exit-reason:monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container ] +7) pcmk was able to start the container that was stopped and removed: +Jun 12 09:55:16 controller-0 pacemaker-controld[25747]: notice: Result of start operation for haproxy-bundle-podman-1 on controller-0: 0 (ok) +8) Added 'set -x' to the RA and correctly observed that no 'podman inspect' has been invoked during monitoring operations + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 11 +++-------- + 1 file changed, 3 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index b2b3081f9..a9bd57dea 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -255,15 +255,10 @@ podman_simple_status() + + podman_monitor() + { +- local rc=0 +- +- podman_simple_status +- rc=$? +- +- if [ $rc -ne 0 ]; then +- return $rc ++ if [ -z "$OCF_RESKEY_monitor_cmd" ]; then ++ podman_simple_status ++ return $? + fi +- + monitor_cmd_exec + } + diff --git a/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch b/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch new file mode 100644 index 0000000..56f7302 --- /dev/null +++ b/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch @@ -0,0 +1,34 @@ +From 69c5d35a7a5421d4728db824558007bbb91a9d4a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 12:02:06 +0200 +Subject: [PATCH] Remove unneeded podman exec --help call + +There are no podman releases that do not have the exec argument, so +let's just drop this remnant that came from the docker RA. + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 10 ++-------- + 1 file changed, 2 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index a9bd57dea..858023555 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -190,14 +190,8 @@ monitor_cmd_exec() + return $rc + fi + +- if podman exec --help >/dev/null 2>&1; then +- out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) +- rc=$? +- else +- out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) +- rc=$? +- fi +- ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? + if [ $rc -eq 127 ]; then + ocf_log err "monitor cmd failed (rc=$rc), output: $out" + ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." diff --git a/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch b/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch new file mode 100644 index 0000000..351207f --- /dev/null +++ b/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch @@ -0,0 +1,161 @@ +From 6016283dfdcb45bf750f96715fc653a4c0904bca Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Fri, 28 Jun 2019 13:34:40 +0200 +Subject: [PATCH] podman: only use exec to manage container's lifecycle + +Under heavy IO load, podman may be impacted and take a long time +to execute some actions. If that takes more than the default +20s container monitoring timeout, containers will restart unexpectedly. + +Replace all IO-sensitive podman calls (inspect, exists...) by +equivalent "podman exec" calls, because the latter command seems +less prone to performance degradation under IO load. + +With this commit, the resource agent now requires podman 1.0.2+, +because it relies on of two different patches [1,2] that improve +IO performance and enable to distinguish "container stopped" +"container doesn't exist" error codes. + +Tested on an OpenStack environment with podman 1.0.2, with the +following scenario: + . regular start/stop/monitor operations + . probe operations (pcs resource cleanup/refresh) + . unmanage/manage operations + . reboot + +[1] https://github.com/containers/libpod/commit/90b835db69d589de559462d988cb3fae5cf1ef49 +[2] https://github.com/containers/libpod/commit/a19975f96d2ee7efe186d9aa0be42285cfafa3f4 +--- + heartbeat/podman | 75 ++++++++++++++++++++++++------------------------ + 1 file changed, 37 insertions(+), 38 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 51f6ba883..8fc2c4695 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -129,9 +129,6 @@ the health of the container. This command must return 0 to indicate that + the container is healthy. A non-zero return code will indicate that the + container has failed and should be recovered. + +-If 'podman exec' is supported, it is used to execute the command. If not, +-nsenter is used. +- + Note: Using this method for monitoring processes inside a container + is not recommended, as containerd tries to track processes running + inside the container and does not deal well with many short-lived +@@ -192,17 +189,13 @@ monitor_cmd_exec() + local rc=$OCF_SUCCESS + local out + +- if [ -z "$OCF_RESKEY_monitor_cmd" ]; then +- return $rc +- fi +- + out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) + rc=$? +- if [ $rc -eq 127 ]; then +- ocf_log err "monitor cmd failed (rc=$rc), output: $out" +- ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." +- # there is no recovering from this, exit immediately +- exit $OCF_ERR_ARGS ++ # 125: no container with name or ID ${CONTAINER} found ++ # 126: container state improper (not running) ++ # 127: any other error ++ if [ $rc -eq 125 ] || [ $rc -eq 126 ]; then ++ rc=$OCF_NOT_RUNNING + elif [ $rc -ne 0 ]; then + ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" + rc=$OCF_ERR_GENERIC +@@ -215,7 +208,16 @@ monitor_cmd_exec() + + container_exists() + { +- podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++ local rc ++ local out ++ ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? ++ # 125: no container with name or ID ${CONTAINER} found ++ if [ $rc -ne 125 ]; then ++ return 0 ++ fi ++ return 1 + } + + remove_container() +@@ -236,30 +238,30 @@ remove_container() + + podman_simple_status() + { +- local val +- +- # retrieve the 'Running' attribute for the container +- val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) +- if [ $? -ne 0 ]; then +- #not running as a result of container not being found +- return $OCF_NOT_RUNNING +- fi ++ local rc + +- if ocf_is_true "$val"; then +- # container exists and is running +- return $OCF_SUCCESS ++ # simple status is implemented via podman exec ++ # everything besides success is considered "not running" ++ monitor_cmd_exec ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ rc=$OCF_NOT_RUNNING; + fi +- +- return $OCF_NOT_RUNNING ++ return $rc + } + + podman_monitor() + { +- if [ -z "$OCF_RESKEY_monitor_cmd" ]; then +- podman_simple_status +- return $? +- fi ++ # We rely on running podman exec to monitor the container ++ # state because that command seems to be less prone to ++ # performance issue under IO load. ++ # ++ # For probes to work, we expect cmd_exec to be able to report ++ # when a container is not running. Here, we're not interested ++ # in distinguishing whether it's stopped or non existing ++ # (there's function container_exists for that) + monitor_cmd_exec ++ return $? + } + + podman_create_mounts() { +@@ -416,14 +418,6 @@ podman_validate() + exit $OCF_ERR_CONFIGURED + fi + +- if [ -n "$OCF_RESKEY_monitor_cmd" ]; then +- podman exec --help >/dev/null 2>&1 +- if [ ! $? ]; then +- ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified" +- check_binary nsenter +- fi +- fi +- + image_exists + if [ $? -ne 0 ]; then + ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found." +@@ -457,6 +451,11 @@ fi + + CONTAINER=$OCF_RESKEY_name + ++# Note: we currently monitor podman containers by with the "podman exec" ++# command, so make sure that invocation is always valid by enforcing the ++# exec command to be non-empty ++: ${OCF_RESKEY_monitor_cmd:=/bin/true} ++ + case $__OCF_ACTION in + meta-data) meta_data + exit $OCF_SUCCESS;; diff --git a/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch new file mode 100644 index 0000000..82a46c1 --- /dev/null +++ b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch @@ -0,0 +1,28 @@ +From c8c073ed81884128b0b3955fb0b0bd23661044a2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 12 Jun 2019 12:45:08 +0200 +Subject: [PATCH] dhcpd: keep SELinux context + +--- + heartbeat/dhcpd | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/dhcpd b/heartbeat/dhcpd +index 8b2d8b695..46027b39b 100755 +--- a/heartbeat/dhcpd ++++ b/heartbeat/dhcpd +@@ -337,12 +337,12 @@ dhcpd_initialize_chroot() { + done | sort -u` + for i in $cplibs ; do + if [ -s "$i" ]; then +- cp -pL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" || ++ cp -aL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" || + { ocf_exit_reason "could not copy $i to chroot jail"; return $OCF_ERR_GENERIC; } + fi + done + +- return $OCF_SUCCESS ++ return $OCF_SUCCESS + } + + # Initialize a non-chroot environment diff --git a/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch b/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch new file mode 100644 index 0000000..fe1bcd8 --- /dev/null +++ b/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch @@ -0,0 +1,22 @@ +From ef37f8a2461b5763f4510d51e08d27d8b1f76937 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 23 Jul 2019 15:47:17 +0200 +Subject: [PATCH] LVM-activate: fix monitor might hang due to lvm_validate + which was added by accident + +--- + heartbeat/LVM-activate | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 9c7c721bf..3df40c894 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -852,7 +852,6 @@ stop) + lvm_stop + ;; + monitor) +- lvm_validate + lvm_status + ;; + validate-all) diff --git a/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch b/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch new file mode 100644 index 0000000..cb13c0a --- /dev/null +++ b/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch @@ -0,0 +1,39 @@ +From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Fri, 27 Jul 2018 16:02:26 +0200 +Subject: [PATCH] CTDB: explicitly use bash shell + +Upcoming recovery lock substring processing is bash specific. + +Signed-off-by: David Disseldorp +--- + configure.ac | 1 + + heartbeat/{CTDB => CTDB.in} | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + rename heartbeat/{CTDB => CTDB.in} (99%) + +diff --git a/configure.ac b/configure.ac +index 039b4942c..10f5314da 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -978,6 +978,7 @@ AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd]) + AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE]) + AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng]) + AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd]) ++AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB]) + AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh]) + AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh]) + AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh]) +diff --git a/heartbeat/CTDB b/heartbeat/CTDB.in +similarity index 99% +rename from heartbeat/CTDB +rename to heartbeat/CTDB.in +index 28e58cea0..7d87a4ef7 100755 +--- a/heartbeat/CTDB ++++ b/heartbeat/CTDB.in +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!@BASH_SHELL@ + # + # OCF Resource Agent for managing CTDB + # diff --git a/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch b/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch new file mode 100644 index 0000000..c30bfee --- /dev/null +++ b/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch @@ -0,0 +1,40 @@ +From 61f7cb5954d1727f58fab6d642a124ef342c8641 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 20 Feb 2019 11:24:28 +0100 +Subject: [PATCH] CTDB: add ctdb_max_open_files parameter + +--- + heartbeat/CTDB.in | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 0d58c850a..bbf8ef627 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -288,6 +288,14 @@ What debug level to run at (0-10). Higher means more verbose. + + + ++ ++ ++Maximum number of open files (for ulimit -n) ++ ++Max open files ++ ++ ++ + + + Path to default samba config file. Only necessary if CTDB +@@ -611,6 +619,11 @@ ctdb_start() { + start_as_disabled="--start-as-disabled" + ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled="" + ++ # set nofile ulimit for ctdbd process ++ if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then ++ ulimit -n "$OCF_RESKEY_ctdb_max_open_files" ++ fi ++ + # Start her up + "$OCF_RESKEY_ctdbd_binary" \ + --reclock="$OCF_RESKEY_ctdb_recovery_lock" \ diff --git a/SOURCES/bz1732867-CTDB-3-fixes.patch b/SOURCES/bz1732867-CTDB-3-fixes.patch new file mode 100644 index 0000000..813bf81 --- /dev/null +++ b/SOURCES/bz1732867-CTDB-3-fixes.patch @@ -0,0 +1,131 @@ +From 8c61f2019d11781b737251b5cf839437b25fc53f Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 25 Jul 2018 23:15:10 +0200 +Subject: [PATCH 1/3] CTDB: fix incorrect db corruption reports (bsc#1101668) + +If a database was disconnected during an active transaction, then +tdbdump may fail with e.g.: +> /usr/bin/tdbdump /var/lib/ctdb/persistent/secrets.tdb.1 +Failed to open /var/lib/ctdb/persistent/secrets.tdb.1 +tdb(/var/lib/ctdb/persistent/secrets.tdb.1): FATAL: +tdb_transaction_recover: attempt to recover read only database + +This does *not* indicate corruption, only that tdbdump, which opens the +database readonly, isn't able to perform recovery. + +Using tdbtool check, instead of tdbdump, passes: +> tdbtool /var/lib/ctdb/persistent/secrets.tdb.1 check +tdb_transaction_recover: recovered 2146304 byte database +Database integrity is OK and has 2 records. + +Drop the tdbdump checks, and instead rely on the core ctdb event script, +which performs the same checks with tdbtool. + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 18 ++++-------------- + 1 file changed, 4 insertions(+), 14 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 1456ea32b..28e58cea0 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -392,6 +392,8 @@ enable_event_scripts() { + local event_dir + event_dir=$OCF_RESKEY_ctdb_config_dir/events.d + ++ chmod u+x "$event_dir/00.ctdb" # core database health check ++ + if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then + chmod u+x "$event_dir/10.interface" + else +@@ -563,17 +565,6 @@ ctdb_start() { + rv=$? + [ $rv -ne 0 ] && return $rv + +- # Die if databases are corrupted +- persistent_db_dir="${OCF_RESKEY_ctdb_dbdir}/persistent" +- mkdir -p $persistent_db_dir 2>/dev/null +- for pdbase in $persistent_db_dir/*.tdb.[0-9]; do +- [ -f "$pdbase" ] || break +- /usr/bin/tdbdump "$pdbase" >/dev/null 2>/dev/null || { +- ocf_exit_reason "Persistent database $pdbase is corrupted! CTDB will not start." +- return $OCF_ERR_GENERIC +- } +- done +- + # Add necessary configuration to smb.conf + init_smb_conf + if [ $? -ne 0 ]; then +@@ -737,9 +728,8 @@ ctdb_monitor() { + + + ctdb_validate() { +- # Required binaries (full path to tdbdump is intentional, as that's +- # what's used in ctdb_start, which was lifted from the init script) +- for binary in pkill /usr/bin/tdbdump; do ++ # Required binaries ++ for binary in pkill; do + check_binary $binary + done + + +From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Fri, 27 Jul 2018 16:02:26 +0200 +Subject: [PATCH 2/3] CTDB: explicitly use bash shell + +Upcoming recovery lock substring processing is bash specific. + +Signed-off-by: David Disseldorp +--- + configure.ac | 1 + + heartbeat/CTDB.in | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 7d87a4ef7..f9b5c564f 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -134,8 +134,8 @@ For more information see http://linux-ha.org/wiki/CTDB_(resource_agent) + + + +-The location of a shared lock file, common across all nodes. +-This must be on shared storage, e.g.: /shared-fs/samba/ctdb.lock ++The location of a shared lock file or helper binary, common across all nodes. ++See CTDB documentation for details. + + CTDB shared lock file + +@@ -757,13 +757,24 @@ ctdb_validate() { + return $OCF_ERR_CONFIGURED + fi + +- lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock") +- touch "$lock_dir/$$" 2>/dev/null +- if [ $? != 0 ]; then +- ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable." +- return $OCF_ERR_ARGS ++ if [ "${OCF_RESKEY_ctdb_recovery_lock:0:1}" == '!' ]; then ++ # '!' prefix means recovery lock is handled via a helper binary ++ binary="${OCF_RESKEY_ctdb_recovery_lock:1}" ++ binary="${binary%% *}" # trim any parameters ++ if [ -z "$binary" ]; then ++ ocf_exit_reason "ctdb_recovery_lock invalid helper" ++ return $OCF_ERR_CONFIGURED ++ fi ++ check_binary "${binary}" ++ else ++ lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock") ++ touch "$lock_dir/$$" 2>/dev/null ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable." ++ return $OCF_ERR_ARGS ++ fi ++ rm "$lock_dir/$$" + fi +- rm "$lock_dir/$$" + + return $OCF_SUCCESS + } diff --git a/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch b/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch new file mode 100644 index 0000000..a3332ef --- /dev/null +++ b/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch @@ -0,0 +1,452 @@ +From 30b9f55325d2acfba27aa6859c7360e10b7201d7 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 5 Jun 2019 00:41:13 +0200 +Subject: [PATCH 1/3] CTDB: support Samba 4.9+ + +With Samba 4.9+, all ctdbd parameters have moved to config files. +Generate a new /etc/ctdb/ctdb.conf file during ctdb startup, based on RA +configuration. + +Event scripts in Samba 4.9+ are also no longer enabled/disabled based on +file mode. Use the "ctdb event script enable/disable" helpers, which now +work without a running ctdbd. + +Fixes: https://github.com/ClusterLabs/resource-agents/issues/1196 +Signed-off-by: David Disseldorp +Signed-off-by: Noel Power +Signed-off-by: Amitay Isaacs +--- + heartbeat/CTDB.in | 214 ++++++++++++++++++++++++++++++++++++---------- + 1 file changed, 167 insertions(+), 47 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 4dd646896..79a2f97e7 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -143,6 +143,10 @@ OCF_RESKEY_smb_fileid_algorithm_default="" + + ####################################################################### + ++ctdb_version() { ++ $OCF_RESKEY_ctdb_binary version | awk '{print $NF}' | sed "s/[-\.]\?[[:alpha:]].*//" ++} ++ + meta_data() { + cat < +@@ -256,7 +260,7 @@ host any public ip addresses. + + The directory containing various CTDB configuration files. + The "nodes" and "notify.sh" scripts are expected to be +-in this directory, as is the "events.d" subdirectory. ++in this directory. + + CTDB config file directory + +@@ -282,8 +286,10 @@ Full path to the CTDB cluster daemon binary. + + Full path to the domain socket that ctdbd will create, used for + local clients to attach and communicate with the ctdb daemon. ++With CTDB 4.9.0 and later the socket path is hardcoded at build ++time, so this parameter is ignored. + +-CTDB socket location ++CTDB socket location (ignored with CTDB 4.9+) + + + +@@ -421,16 +427,28 @@ invoke_ctdb() { + timeout=$((OCF_RESKEY_CRM_meta_timeout/1000)) + timelimit=$((OCF_RESKEY_CRM_meta_timeout/1000)) + fi +- $OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \ +- -t $timeout -T $timelimit \ +- "$@" ++ ++ local vers=$(ctdb_version) ++ ocf_version_cmp "$vers" "4.9.0" ++ ++ # if version < 4.9.0 specify '--socket' otherwise it's ++ # a compiled option ++ if [ "$?" -eq "0" ]; then ++ $OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \ ++ -t $timeout -T $timelimit \ ++ "$@" ++ else ++ $OCF_RESKEY_ctdb_binary \ ++ -t $timeout -T $timelimit \ ++ "$@" ++ fi + } + + # Enable any event scripts that are explicitly required. + # Any others will ultimately be invoked or not based on how they ship + # with CTDB, but will generally have no effect, beacuase the relevant + # CTDB_MANAGES_* options won't be set in /etc/sysconfig/ctdb. +-enable_event_scripts() { ++enable_event_scripts_chmod() { + local event_dir + event_dir=$OCF_RESKEY_ctdb_config_dir/events.d + +@@ -454,6 +472,36 @@ enable_event_scripts() { + fi + } + ++enable_event_scripts_symlink() { ++ # event scripts are symlinked once enabled, with the link source in... ++ mkdir -p "$OCF_RESKEY_ctdb_config_dir/events/legacy" 2>/dev/null ++ ++ invoke_ctdb event script enable legacy 00.ctdb ++ ++ if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then ++ invoke_ctdb event script enable legacy 10.interface ++ else ++ invoke_ctdb event script disable legacy 10.interface ++ fi ++ if [ -f "${OCF_RESKEY_ctdb_config_dir}/static-routes" ]; then ++ invoke_ctdb event script enable legacy 11.routing ++ else ++ invoke_ctdb event script disable legacy 11.routing ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_ctdb_manages_winbind"; then ++ invoke_ctdb event script enable legacy 49.winbind ++ else ++ invoke_ctdb event script disable legacy 49.winbind ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba"; then ++ invoke_ctdb event script enable legacy 50.samba ++ else ++ invoke_ctdb event script disable legacy 50.samba ++ fi ++} ++ + # This function has no effect (currently no way to set CTDB_SET_*) + # but remains here in case we need it in future. + set_ctdb_variables() { +@@ -556,6 +604,46 @@ append_ctdb_sysconfig() { + [ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG" + } + ++generate_ctdb_config() { ++ local ctdb_config="$OCF_RESKEY_ctdb_config_dir/ctdb.conf" ++ ++ # Backup existing config if we're not already using an auto-generated one ++ grep -qa '# CTDB-RA: Auto-generated' $ctdb_config || cp -p $ctdb_config ${ctdb_config}.ctdb-ra-orig ++ if [ $? -ne 0 ]; then ++ ocf_log warn "Unable to backup $ctdb_config to ${ctdb_config}.ctdb-ra-orig" ++ fi ++ ++ local log_option="file:$OCF_RESKEY_ctdb_logfile" ++ if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then ++ log_option="syslog" ++ fi ++ ++ local start_as_disabled="false" ++ ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" && start_as_disabled="true" ++ ++ local dbdir_volatile="$OCF_RESKEY_ctdb_dbdir/volatile" ++ [ -d "$dbdir_volatile" ] || mkdir -p "$dbdir_volatile" 2>/dev/null ++ local dbdir_persistent="$OCF_RESKEY_ctdb_dbdir/persistent" ++ [ -d "$dbdir_persistent" ] || mkdir -p "$dbdir_persistent" 2>/dev/null ++ local dbdir_state="$OCF_RESKEY_ctdb_dbdir/state" ++ [ -d "$dbdir_state" ] || mkdir -p "$dbdir_state" 2>/dev/null ++ ++cat >$ctdb_config </dev/null + +- # public addresses file (should not be present, but need to set for correctness if it is) +- local pub_addr_option +- pub_addr_option="" +- [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ] && \ +- pub_addr_option="--public-addresses=${OCF_RESKEY_ctdb_config_dir}/public_addresses" +- # start as disabled +- local start_as_disabled +- start_as_disabled="--start-as-disabled" +- ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled="" +- + # set nofile ulimit for ctdbd process + if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then + ulimit -n "$OCF_RESKEY_ctdb_max_open_files" + fi + + # Start her up +- "$OCF_RESKEY_ctdbd_binary" \ +- --reclock="$OCF_RESKEY_ctdb_recovery_lock" \ +- --nlist="$OCF_RESKEY_ctdb_config_dir/nodes" \ +- --socket="$OCF_RESKEY_ctdb_socket" \ +- --dbdir="$OCF_RESKEY_ctdb_dbdir" \ +- --dbdir-persistent="$OCF_RESKEY_ctdb_dbdir/persistent" \ +- --event-script-dir="$OCF_RESKEY_ctdb_config_dir/events.d" \ +- --notification-script="$OCF_RESKEY_ctdb_config_dir/notify.sh" \ +- --transport=tcp \ +- $start_as_disabled $log_option $pub_addr_option \ +- -d "$OCF_RESKEY_ctdb_debuglevel" ++ invoke_ctdbd "$version" ++ + if [ $? -ne 0 ]; then + # cleanup smb.conf + cleanup_smb_conf +@@ -688,7 +808,7 @@ ctdb_start() { + if [ $? -ne 0 ]; then + # CTDB will be running, kill it before returning + ctdb_stop +- ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary --socket=$OCF_RESKEY_ctdb_socket status" ++ ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary status" + return $OCF_ERR_GENERIC + fi + if ! echo "$status" | grep -qs 'UNHEALTHY (THIS'; then +@@ -725,7 +845,7 @@ ctdb_stop() { + [ $count -gt 10 ] && { + ocf_log info "killing ctdbd " + pkill -9 -f "$OCF_RESKEY_ctdbd_binary" +- pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events.d/" ++ pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events" + } + done + + +From b4753b7cb46045bb9e7ed5e3a0a20f6104264b12 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 10 Jul 2019 17:11:50 +0200 +Subject: [PATCH 2/3] CTDB: generate script.options file for 4.9+ + +Event scripts in CTDB 4.9+ ignore sysconfig configuration and instead +parse parameters in ctdb_config_dir/script.options . + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 35 ++++++++++++++++++++++++++++++----- + 1 file changed, 30 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 79a2f97e7..0906f3da9 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -242,6 +242,7 @@ If the amount of free memory drops below this value the node will + become unhealthy and ctdb and all managed services will be shutdown. + Once this occurs, the administrator needs to find the reason for the + OOM situation, rectify it and restart ctdb with "service ctdb start". ++With CTDB 4.4.0 and later this parameter is ignored. + + Minimum amount of free memory (MB) + +@@ -600,8 +601,10 @@ cleanup_smb_conf() { + mv "$OCF_RESKEY_smb_conf.$$" "$OCF_RESKEY_smb_conf" + } + +-append_ctdb_sysconfig() { +- [ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG" ++append_conf() { ++ local file_path="$1" ++ shift ++ [ -n "$2" ] && echo "$1=$2" >> "$file_path" + } + + generate_ctdb_config() { +@@ -644,6 +647,25 @@ cat >$ctdb_config <$script_options < +Date: Wed, 10 Jul 2019 17:54:01 +0200 +Subject: [PATCH 3/3] CTDB: drop sysconfig presence check during validate + +There are two reasons to avoid this check: +- for ctdb versions prior to 4.9.0, the sysconfig file is generated by + the resource agent start hook *after* ctdb_validate() is called. +- post 4.9.0 versions don't use the sysconfig file. + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 0906f3da9..15d78902e 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -925,11 +925,6 @@ ctdb_validate() { + check_binary $binary + done + +- if [ -z "$CTDB_SYSCONFIG" ]; then +- ocf_exit_reason "Can't find CTDB config file (expecting /etc/sysconfig/ctdb, /etc/default/ctdb or similar)" +- return $OCF_ERR_INSTALLED +- fi +- + if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" && [ ! -f "$OCF_RESKEY_smb_conf" ]; then + ocf_exit_reason "Samba config file '$OCF_RESKEY_smb_conf' does not exist." + return $OCF_ERR_INSTALLED diff --git a/SOURCES/bz1736746-podman-drop-in-support.patch b/SOURCES/bz1736746-podman-drop-in-support.patch new file mode 100644 index 0000000..8c4be1a --- /dev/null +++ b/SOURCES/bz1736746-podman-drop-in-support.patch @@ -0,0 +1,193 @@ +From 462ada6164cb77c81f5291d88287d68506d38056 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Tue, 9 Jul 2019 23:14:21 +0200 +Subject: [PATCH] Generate addition drop-in dependencies for podman containers + +When podman creates a container, it creates two additional systemd +scope files dynamically: + + - libpod-conmon-.scope - runs a conmon process that + tracks a container's pid1 into a dedicated pidfile. + - libpod-.scope - created dynamically by runc, + for cgroups accounting + +On shutdown, it can happen that systemd stops those scope early, +which in turn sends a SIGTERM to pacemaker-managed containers +before pacemaker has scheduled any stop operation. That +confuses the cluster and may break shutdown. + +Add a new option in the resource-agent to inject additional +dependencies into the dynamically created scope files, so that +systemd is not allowed to stop scopes before the pacemaker +service itself is stopped. + +When that option is enabled, the scopes look like: + + # podman ps | grep galera + c329819a1227 192.168.122.8:8787/rhosp15/openstack-mariadb:latest dumb-init -- /bin... About an hour ago Up About an hour ago galera-bundle-podman-0 + + # systemctl cat libpod*c329819a1227* + # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope + # This is a transient unit file, created programmatically via the systemd API. Do not edit. + [Scope] + Slice=machine.slice + Delegate=yes + + [Unit] + DefaultDependencies=no + + # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf + [Unit] + Before=pacemaker.service + + # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope + # This is a transient unit file, created programmatically via the systemd API. Do not edit. + [Unit] + Description=libcontainer container c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b + + [Scope] + Slice=machine.slice + Delegate=yes + MemoryAccounting=yes + CPUAccounting=yes + BlockIOAccounting=yes + + [Unit] + DefaultDependencies=no + + # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf + [Unit] + Before=pacemaker.service + +Effectively, this prevents systemd from managing the shutdown of any +pacemaker-managed podman container. + +Related: rhbz#1726442 +--- + heartbeat/podman | 82 +++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 81 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 8fc2c4695..8a916eb8c 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -158,6 +158,16 @@ to have the particular one persist when this happens. + reuse container + + ++ ++ ++ ++Use transient drop-in files to add extra dependencies to the systemd ++scopes associated to the container. During reboot, this prevents systemd ++to stop the container before pacemaker. ++ ++drop-in dependency ++ ++ + + + +@@ -273,8 +283,57 @@ podman_create_mounts() { + IFS="$oldIFS" + } + ++podman_container_id() ++{ ++ # Retrieve the container ID by doing a "podman ps" rather than ++ # a "podman inspect", because the latter has performance issues ++ # under IO load. ++ # We could have run "podman start $CONTAINER" to get the ID back ++ # but if the container is stopped, the command will return a ++ # name instead of a container ID. This would break us. ++ podman ps --no-trunc --format '{{.ID}} {{.Names}}' | grep -F -w -m1 "$CONTAINER" | cut -d' ' -f1 ++} ++ ++ ++create_transient_drop_in_dependency() ++{ ++ local cid=$1 ++ local rc=$OCF_SUCCESS ++ ++ if [ -z "$cid" ]; then ++ ocf_log error "Container ID not found for \"$CONTAINER\". Not creating drop-in dependency" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ ocf_log info "Creating drop-in dependency for \"$CONTAINER\" ($cid)" ++ for scope in "libpod-$cid.scope.d" "libpod-conmon-$cid.scope.d"; do ++ if [ $rc -eq $OCF_SUCCESS ] && [ ! -d /run/systemd/transient/"$scope" ]; then ++ mkdir -p /run/systemd/transient/"$scope" && \ ++ echo -e "[Unit]\nBefore=pacemaker.service" > /run/systemd/transient/"$scope"/dep.conf && \ ++ chmod ago+r /run/systemd/transient/"$scope" /run/systemd/transient/"$scope"/dep.conf ++ rc=$? ++ fi ++ done ++ ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log error "Could not create drop-in dependency for \"$CONTAINER\" ($cid)" ++ else ++ systemctl daemon-reload ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log error "Could not refresh service definition after creating drop-in for \"$CONTAINER\"" ++ fi ++ fi ++ ++ return $rc ++} ++ ++ + podman_start() + { ++ local cid ++ local rc ++ + podman_create_mounts + local run_opts="-d --name=${CONTAINER}" + # check to see if the container has already started +@@ -306,8 +365,17 @@ podman_start() + ocf_log info "running container $CONTAINER for the first time" + ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd + fi ++ rc=$? + +- if [ $? -ne 0 ]; then ++ # if the container was stopped or didn't exist before, systemd ++ # removed the libpod* scopes. So always try to recreate the drop-ins ++ if [ $rc -eq 0 ] && ocf_is_true "$OCF_RESKEY_drop_in_dependency"; then ++ cid=$(podman_container_id) ++ create_transient_drop_in_dependency "$cid" ++ rc=$? ++ fi ++ ++ if [ $rc -ne 0 ]; then + ocf_exit_reason "podman failed to launch container" + return $OCF_ERR_GENERIC + fi +@@ -353,6 +421,8 @@ podman_stop() + else + ocf_log debug "waiting $timeout second[s] before killing container" + ocf_run podman stop -t=$timeout $CONTAINER ++ # on stop, systemd will automatically delete any transient ++ # drop-in conf that has been created earlier + fi + + if [ $? -ne 0 ]; then +@@ -456,6 +526,16 @@ CONTAINER=$OCF_RESKEY_name + # exec command to be non-empty + : ${OCF_RESKEY_monitor_cmd:=/bin/true} + ++# When OCF_RESKEY_drop_in_dependency is not populated, we ++# look at another file-based way of enabling the option. ++# Otherwise, consider it disabled. ++if [ -z "$OCF_RESKEY_drop_in_dependency" ]; then ++ if [ -f "/etc/sysconfig/podman_drop_in" ] || \ ++ [ -f "/etc/default/podman_drop_in" ]; then ++ OCF_RESKEY_drop_in_dependency=yes ++ fi ++fi ++ + case $__OCF_ACTION in + meta-data) meta_data + exit $OCF_SUCCESS;; diff --git a/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch b/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch new file mode 100644 index 0000000..4725d8e --- /dev/null +++ b/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch @@ -0,0 +1,48 @@ +From 6c24147ebe0e979c48db93a5f8ec6094b8707591 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 26 Sep 2019 12:52:39 +0200 +Subject: [PATCH] LVM-activate: move pvscan --cache to validate + +It needs to be called before validate attempts to look at the VG. +--- + configure.ac | 2 +- + heartbeat/LVM-activate | 6 +++++- + 2 files changed, 6 insertions(+), 2 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 97dac7cf8..1eb65cf34 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -21,7 +21,7 @@ dnl checks for system services + + AC_INIT([resource-agents], + m4_esyscmd([make/git-version-gen .tarball-version]), +- [to_be_defined@foobar.org]) ++ [developers@clusterlabs.org]) + + AC_USE_SYSTEM_EXTENSIONS + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 3df40c894..9b7c0aa7f 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -489,6 +489,11 @@ lvm_validate() { + check_binary lvm + check_binary dmsetup + ++ # This is necessary when using system ID to update lvm hints, ++ # or in older versions of lvm, this is necessary to update the ++ # lvmetad cache. ++ pvscan --cache ++ + if ! vgs --foreign ${VG} >/dev/null 2>&1 ; then + # stop action exits successfully if the VG cannot be accessed... + if [ $__OCF_ACTION = "stop" ]; then +@@ -627,7 +632,6 @@ clvmd_activate() { + systemid_activate() { + local cur_systemid + +- pvscan --cache + cur_systemid=$(vgs --foreign --noheadings -o systemid ${VG} | tr -d '[:blank:]') + + # Put our system ID on the VG diff --git a/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch b/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch new file mode 100644 index 0000000..f713613 --- /dev/null +++ b/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch @@ -0,0 +1,66 @@ +From 34b46b172857babbb2bca5e012c7827ed6a26b01 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 6 Nov 2019 10:00:31 +0100 +Subject: [PATCH] IPaddr2: add noprefixroute parameter + +--- + heartbeat/IPaddr2 | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 1d39ae514..6f8e8c734 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -88,6 +88,7 @@ OCF_RESKEY_arp_sender_default="" + OCF_RESKEY_send_arp_opts_default="" + OCF_RESKEY_flush_routes_default="false" + OCF_RESKEY_run_arping_default=false ++OCF_RESKEY_noprefixroute_default="false" + OCF_RESKEY_preferred_lft_default="forever" + OCF_RESKEY_network_namespace_default="" + +@@ -109,6 +110,7 @@ OCF_RESKEY_network_namespace_default="" + : ${OCF_RESKEY_send_arp_opts=${OCF_RESKEY_send_arp_opts_default}} + : ${OCF_RESKEY_flush_routes=${OCF_RESKEY_flush_routes_default}} + : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}} ++: ${OCF_RESKEY_noprefixroute=${OCF_RESKEY_noprefixroute_default}} + : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}} + : ${OCF_RESKEY_network_namespace=${OCF_RESKEY_network_namespace_default}} + +@@ -377,6 +379,14 @@ Whether or not to run arping for IPv4 collision detection check. + + + ++ ++ ++Use noprefixroute flag (see 'man ip-address'). ++ ++Use noprefixroute flag ++ ++ ++ + + + For IPv6, set the preferred lifetime of the IP address. +@@ -397,8 +407,8 @@ the namespace. + Network namespace to use + + +- + ++ + + + +@@ -640,6 +650,11 @@ add_interface () { + msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface" + fi + ++ if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then ++ cmd="$cmd noprefixroute" ++ msg="${msg} (with noprefixroute)" ++ fi ++ + if [ ! -z "$label" ]; then + cmd="$cmd label $label" + msg="${msg} (with label $label)" diff --git a/SOURCES/bz1741843-LVM-activate-partial-activation.patch b/SOURCES/bz1741843-LVM-activate-partial-activation.patch new file mode 100644 index 0000000..1eec112 --- /dev/null +++ b/SOURCES/bz1741843-LVM-activate-partial-activation.patch @@ -0,0 +1,69 @@ +diff -uNr a/heartbeat/LVM-activate b/heartbeat/LVM-activate +--- a/heartbeat/LVM-activate 2019-10-08 12:10:11.755991580 +0200 ++++ b/heartbeat/LVM-activate 2019-10-08 12:14:38.388288176 +0200 +@@ -42,6 +42,11 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Parameter defaults ++OCF_RESKEY_partial_activation_default="false" ++ ++: ${OCF_RESKEY_partial_activation=${OCF_RESKEY_partial_activation_default}} ++ + # If LV is given, only activate this named LV; otherwise, activate all + # LVs in the named VG. + VG=${OCF_RESKEY_vgname} +@@ -150,6 +155,16 @@ + + + ++ ++ ++If set, the volume group will be activated partially even with some ++physical volumes missing. It helps to set to true when using mirrored ++logical volumes. ++ ++Activate VG partially when missing PVs ++ ++ ++ + + + +@@ -486,6 +501,25 @@ + exit $OCF_ERR_CONFIGURED + fi + ++ # Inconsistency might be due to missing physical volumes, which doesn't ++ # automatically mean we should fail. If partial_activation=true then ++ # we should let start try to handle it, or if no PVs are listed as ++ # "unknown device" then another node may have marked a device missing ++ # where we have access to all of them and can start without issue. ++ case $(vgs -o attr --noheadings $VG | tr -d ' ') in ++ ???p??*) ++ if ! ocf_is_true "$OCF_RESKEY_partial_activation" ; then ++ # We are missing devices and cannot activate partially ++ ocf_exit_reason "Volume group [$VG] has devices missing. Consider partial_activation=true to attempt to activate partially" ++ exit $OCF_ERR_GENERIC ++ else ++ # We are missing devices but are allowed to activate partially. ++ # Assume that caused the vgck failure and carry on ++ ocf_log warn "Volume group inconsistency detected with missing device(s) and partial_activation enabled. Proceeding with requested action." ++ fi ++ ;; ++ esac ++ + # Get the access mode from VG metadata and check if it matches the input + # value. Skip to check "tagging" mode because there's no reliable way to + # automatically check if "tagging" mode is being used. +@@ -545,6 +579,10 @@ + do_activate() { + local activate_opt=$1 + ++ if ocf_is_true "$OCF_RESKEY_partial_activation" ; then ++ activate_opt="${activate_opt} --partial" ++ fi ++ + # Only activate the specific LV if it's given + if [ -n "$LV" ]; then + ocf_run lvchange $activate_opt ${VG}/${LV} diff --git a/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch b/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch new file mode 100644 index 0000000..a57e5b0 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch @@ -0,0 +1,39 @@ +From 2aa8015bc4ff0bd61eca13eceb59aaa672335b76 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 30 Aug 2018 18:36:11 -0700 +Subject: [PATCH] Filesystem: Support symlink as mountpoint directory + +Filesystem monitor operation fails when the `directory` attribute is a +symlink. + +The monitor operation calls the `list_mounts` function, which cats +`/proc/mounts` if it exists, else cats `/etc/mtab` if it exists, else +runs the `mount` command. It then greps for `" $MOUNTPOINT "` in the +output, where `$MOUNTPOINT` is the value of the `directory` attribute. + +`/proc/mounts`, `/etc/mtab`, and the `mount` command resolve symlinks +to their canonical targets. So while the monitor operation greps for +the symlink path (surrounded by spaces) as defined in the directory +attribute, the symlink will not be present in the `list_mounts` output. +Only the symlink's target will be present. + +This patch uses `readlink -f $MOUNTPOINT` to resolve the symlink to its +canonical name before using it as a grep pattern in the +`Filesystem_status` function. +--- + heartbeat/Filesystem | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 7c73b0b97..fc4b8fcd5 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $MOUNTPOINT " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $(readlink -f $MOUNTPOINT) " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else diff --git a/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch b/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch new file mode 100644 index 0000000..d5cf49f --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch @@ -0,0 +1,43 @@ +From e2c3ec91cdd123b8afc6010f45ecd22ee6d8ecf7 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Mon, 3 Sep 2018 00:30:01 -0700 +Subject: [PATCH] Filesystem: Canonicalize mountpoint symlinks + +Commit 2aa8015 added support to `Filesystem_status()` for mountpoints +that are symlinks. However, it missed two other places where `readlink` +calls should have been added to canonicalize symlinks. +--- + heartbeat/Filesystem | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index fc4b8fcd5..2a43d1daa 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -278,7 +278,7 @@ determine_blockdevice() { + nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none) + : ;; + *) +- DEVICE=`list_mounts | grep " $MOUNTPOINT " | cut -d' ' -f1` ++ DEVICE=`list_mounts | grep " $(readlink -f "$MOUNTPOINT" ) " | cut -d' ' -f1` + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -396,7 +396,7 @@ fstype_supported() + Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint +- if list_mounts | grep -q -E " $MOUNTPOINT/\w+" >/dev/null 2>&1; then ++ if list_mounts | grep -q -E " $(readlink -f "$MOUNTPOINT" )/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $(readlink -f $MOUNTPOINT) " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $(readlink -f "$MOUNTPOINT" ) " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else diff --git a/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch b/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch new file mode 100644 index 0000000..1a53120 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch @@ -0,0 +1,53 @@ +From 69d607dc7568168e874f99d5a8b6bdb66b579d8b Mon Sep 17 00:00:00 2001 +From: "yusk.iida" +Date: Tue, 7 May 2019 19:37:26 +0900 +Subject: [PATCH] Low: Filesystem: Fix a problem umount is not executed in the + event of a disk failure + +--- + heartbeat/Filesystem | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 2a43d1daa..bd974f8f3 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -278,7 +278,7 @@ determine_blockdevice() { + nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none) + : ;; + *) +- DEVICE=`list_mounts | grep " $(readlink -f "$MOUNTPOINT" ) " | cut -d' ' -f1` ++ DEVICE=`list_mounts | grep " $CANONICALIZED_MOUNTPOINT " | cut -d' ' -f1` + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -396,7 +396,7 @@ fstype_supported() + Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint +- if list_mounts | grep -q -E " $(readlink -f "$MOUNTPOINT" )/\w+" >/dev/null 2>&1; then ++ if list_mounts | grep -q -E " $CANONICALIZED_MOUNTPOINT/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $(readlink -f "$MOUNTPOINT" ) " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $CANONICALIZED_MOUNTPOINT " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else +@@ -804,6 +804,11 @@ if [ -z "$OCF_RESKEY_directory" ]; then + else + MOUNTPOINT=$(echo $OCF_RESKEY_directory | sed 's/\/*$//') + : ${MOUNTPOINT:=/} ++ CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi + # At this stage, $MOUNTPOINT does not contain trailing "/" unless it is "/" + # TODO: / mounted via Filesystem sounds dangerous. On stop, we'll + # kill the whole system. Is that a good idea? diff --git a/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch b/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch new file mode 100644 index 0000000..68b9313 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch @@ -0,0 +1,32 @@ +From 48a7ebcea5ce0522021cf3079b62107a06b530b9 Mon Sep 17 00:00:00 2001 +From: James Oakley +Date: Thu, 8 Aug 2019 05:56:14 -0700 +Subject: [PATCH] Don't call readlink on path if it does not exist + +--- + heartbeat/Filesystem | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 4bbbc06d3..738e3c08e 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -858,10 +858,14 @@ if [ -z "$OCF_RESKEY_directory" ]; then + else + MOUNTPOINT=$(echo $OCF_RESKEY_directory | sed 's/\/*$//') + : ${MOUNTPOINT:=/} +- CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") +- if [ $? -ne 0 ]; then +- ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" +- exit $OCF_ERR_GENERIC ++ if [ -e "$MOUNTPOINT" ] ; then ++ CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ else ++ CANONICALIZED_MOUNTPOINT="$MOUNTPOINT" + fi + # At this stage, $MOUNTPOINT does not contain trailing "/" unless it is "/" + # TODO: / mounted via Filesystem sounds dangerous. On stop, we'll diff --git a/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch b/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch new file mode 100644 index 0000000..1184817 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch @@ -0,0 +1,46 @@ +From b67278bc92cfb0b9947ff5fff65f46f420a42c2c Mon Sep 17 00:00:00 2001 +From: Kazutomo Nakahira +Date: Fri, 10 May 2019 14:30:51 +0900 +Subject: [PATCH] Low: Filesystem: Fix missing mount point due to corrupted + mount list + +--- + heartbeat/Filesystem | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 2a43d1daa..c38ae12d4 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -255,16 +255,26 @@ is_bind_mount() { + } + list_mounts() { + local inpf="" ++ local mount_list="" ++ local check_list="x" ++ + if [ -e "/proc/mounts" ] && ! is_bind_mount; then + inpf=/proc/mounts + elif [ -f "/etc/mtab" -a -r "/etc/mtab" ]; then + inpf=/etc/mtab + fi +- if [ "$inpf" ]; then +- cut -d' ' -f1,2,3 < $inpf +- else +- $MOUNT | cut -d' ' -f1,3,5 +- fi ++ ++ # Make sure that the mount list has not been changed while reading. ++ while [ "$mount_list" != "$check_list" ]; do ++ check_list=$mount_list ++ if [ "$inpf" ]; then ++ mount_list=$(cut -d' ' -f1,2,3 < $inpf) ++ else ++ mount_list=$($MOUNT | cut -d' ' -f1,3,5) ++ fi ++ done ++ ++ echo "$mount_list" + } + + determine_blockdevice() { diff --git a/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch b/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch new file mode 100644 index 0000000..f642548 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch @@ -0,0 +1,52 @@ +From bfbc99003ebd96d79bbf8ad50be0b5e714a92fd7 Mon Sep 17 00:00:00 2001 +From: ytakeshita +Date: Fri, 7 Jun 2019 15:20:52 +0900 +Subject: [PATCH] Medium: Filesystem: Prevents to all root user processes are + killed when bind mounting a directory on rootfs. + +if a directory is bound mounting on rootfs and "force_umount" is not set "safe", change "force_umount" to "safe". +--- + heartbeat/Filesystem | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index c46ec3cca..1b29a08b3 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -314,6 +314,24 @@ bind_kernel_check() { + [ $? -ne 0 ] && + ocf_log warn "kernel `uname -r` cannot handle read only bind mounts" + } ++ ++bind_rootfs_check() { ++ local SOURCE ++ local TARGET ++ local ROOTFS ++ ++ SOURCE=$1 ++ TARGET=$(df --output=target $SOURCE | tail -n 1) ++ ++ ROOTFS=$(list_mounts | grep -w rootfs | cut -d' ' -f 2) ++ ++ if [ "${TARGET}" = "${ROOTFS}" ]; then ++ return 1 ++ else ++ return 0 ++ fi ++} ++ + bind_mount() { + if is_bind_mount && [ "$options" != "-o bind" ] + then +@@ -476,6 +494,11 @@ get_pids() + local procs + local mmap_procs + ++ if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_rootfs_check "$DEVICE"; then ++ ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" ++ FORCE_UNMOUNT=safe ++ fi ++ + if ocf_is_true "$FORCE_UNMOUNT"; then + if [ "X${HOSTOS}" = "XOpenBSD" ];then + fstat | grep $dir | awk '{print $3}' diff --git a/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch b/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch new file mode 100644 index 0000000..9bb2f11 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch @@ -0,0 +1,42 @@ +From f8e5d2afc5b9bbf676ac20894f0f26e6ec998557 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 10 Sep 2019 15:40:12 +0200 +Subject: [PATCH] Filesystem: improve "/" check for bind mounts + +--- + heartbeat/Filesystem | 15 +++------------ + 1 file changed, 3 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 738e3c08e..e66ddc77f 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -337,17 +337,8 @@ bind_kernel_check() { + ocf_log warn "kernel `uname -r` cannot handle read only bind mounts" + } + +-bind_rootfs_check() { +- local SOURCE +- local TARGET +- local ROOTFS +- +- SOURCE=$1 +- TARGET=$(df --output=target $SOURCE | tail -n 1) +- +- ROOTFS=$(list_mounts | grep -w rootfs | cut -d' ' -f 2) +- +- if [ "${TARGET}" = "${ROOTFS}" ]; then ++bind_root_mount_check() { ++ if [ "$(df -P "$1" | awk 'END{print $6}')" = "/" ]; then + return 1 + else + return 0 +@@ -516,7 +507,7 @@ get_pids() + local procs + local mmap_procs + +- if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_rootfs_check "$DEVICE"; then ++ if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_root_mount_check "$DEVICE"; then + ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" + FORCE_UNMOUNT=safe + fi diff --git a/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch b/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch new file mode 100644 index 0000000..d11f12d --- /dev/null +++ b/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch @@ -0,0 +1,34 @@ +From f8e1b1407b613657ebd90381d53e6a567b92b241 Mon Sep 17 00:00:00 2001 +From: Kazutomo Nakahira +Date: Mon, 17 Dec 2018 14:15:24 +0900 +Subject: [PATCH] Medium: pgsql: Set initial score for primary and hot standby + in the probe. + +--- + heartbeat/pgsql | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 842dc0ac4..8ef84dd3e 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -974,11 +974,19 @@ pgsql_real_monitor() { + case "$output" in + f) ocf_log debug "PostgreSQL is running as a primary." + if [ "$OCF_RESKEY_monitor_sql" = "$OCF_RESKEY_monitor_sql_default" ]; then ++ if ocf_is_probe; then ++ # Set initial score for primary. ++ exec_with_retry 0 $CRM_MASTER -v $PROMOTE_ME ++ fi + return $OCF_RUNNING_MASTER + fi + ;; + + t) ocf_log debug "PostgreSQL is running as a hot standby." ++ if ocf_is_probe; then ++ # Set initial score for hot standby. ++ exec_with_retry 0 $CRM_MASTER -v $CAN_NOT_PROMOTE ++ fi + return $OCF_SUCCESS;; + + *) ocf_exit_reason "$CHECK_MS_SQL output is $output" diff --git a/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch b/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch new file mode 100644 index 0000000..daca241 --- /dev/null +++ b/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch @@ -0,0 +1,34 @@ +From ac430f79c333d73e6cd59ae59178c7040e7dbfda Mon Sep 17 00:00:00 2001 +From: Kazunori INOUE +Date: Wed, 8 May 2019 18:23:59 +0900 +Subject: [PATCH] pgsql: enhance checks in pgsql_real_start to prevent + incorrect status gets + +--- + heartbeat/pgsql | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 842dc0ac4..5d04618e6 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -483,7 +483,7 @@ runasowner() { + "-q") + quietrun="-q" + shift 1;; +- "warn"|"err") ++ "info"|"warn"|"err") + loglevel="-$1" + shift 1;; + *) +@@ -544,7 +544,9 @@ pgsql_real_start() { + local postgres_options + local rc + +- if pgsql_status; then ++ pgsql_real_monitor info ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS -o $rc -eq $OCF_RUNNING_MASTER ]; then + ocf_log info "PostgreSQL is already running. PID=`cat $PIDFILE`" + if is_replication; then + return $OCF_ERR_GENERIC diff --git a/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch b/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch new file mode 100644 index 0000000..cb4fde4 --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch @@ -0,0 +1,202 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/IPsrcaddr 2019-08-15 16:02:10.055827624 +0200 ++++ /home/oalbrigt/src/resource-agents/heartbeat/IPsrcaddr 2019-08-15 15:45:50.690757838 +0200 +@@ -1,6 +1,6 @@ + #!/bin/sh + # +-# Description: IPsrcaddr - Preferred source address modification ++# Description: IPsrcaddr - Preferred source(/dest) address modification + # + # Author: John Sutton + # Support: users@clusterlabs.org +@@ -11,7 +11,7 @@ + # + # This script manages the preferred source address associated with + # packets which originate on the localhost and are routed through the +-# default route. By default, i.e. without the use of this script or ++# matching route. By default, i.e. without the use of this script or + # similar, these packets will carry the IP of the primary i.e. the + # non-aliased interface. This can be a nuisance if you need to ensure + # that such packets carry the same IP irrespective of which host in +@@ -27,7 +27,7 @@ + # + # NOTES: + # +-# 1) There must be one and not more than 1 default route! Mainly because ++# 1) There must be one and not more than 1 matching route! Mainly because + # I can't see why you should have more than one. And if there is more + # than one, we would have to box clever to find out which one is to be + # modified, or we would have to pass its identity as an argument. +@@ -54,16 +54,25 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults ++OCF_RESKEY_ipaddress_default="" ++OCF_RESKEY_cidr_netmask_default="" ++OCF_RESKEY_destination_default="0.0.0.0/0" + OCF_RESKEY_proto_default="" ++OCF_RESKEY_table_default="" + ++: ${OCF_RESKEY_ipaddress=${OCF_RESKEY_ipaddress_default}} ++: ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}} ++: ${OCF_RESKEY_destination=${OCF_RESKEY_destination_default}} + : ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}} ++: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}} + ####################################################################### + + [ -z "$OCF_RESKEY_proto" ] && PROTO="" || PROTO="proto $OCF_RESKEY_proto" ++[ -z "$OCF_RESKEY_table" ] && TABLE="" || TABLE="table $OCF_RESKEY_table" + + USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + +- CMDSHOW="$IP2UTIL route show to exact 0.0.0.0/0" ++ CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + + SYSTYPE="`uname -s`" +@@ -91,7 +100,7 @@ + The IP address. + + IP address +- ++ + + + +@@ -100,7 +109,15 @@ + dotted quad notation 255.255.255.0). + + Netmask +- ++ ++ ++ ++ ++ ++The destination IP/subnet for the route (default: $OCF_RESKEY_destination_default) ++ ++Destination IP/subnet ++ + + + +@@ -108,7 +125,17 @@ + Proto to match when finding network. E.g. "kernel". + + Proto +- ++ ++ ++ ++ ++ ++Table to modify. E.g. "local". ++ ++The table has to have a route matching the "destination" parameter. ++ ++Table ++ + + + +@@ -151,21 +178,22 @@ + export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress + + srca_read() { +- # Capture the default route - doublequotes prevent word splitting... +- DEFROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" +- +- # ... so we can make sure there is only 1 default route +- [ 1 -eq `echo "$DEFROUTE" | wc -l` ] || \ +- errorexit "more than 1 default route exists" ++ # Capture matching route - doublequotes prevent word splitting... ++ ROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" + +- # But there might still be no default route +- [ -z "$DEFROUTE" ] && errorexit "no default route exists" ++ # ... so we can make sure there is only 1 matching route ++ [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ ++ errorexit "more than 1 matching route exists" ++ ++ # But there might still be no matching route ++ [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \ ++ ! ocf_is_probe && errorexit "no matching route exists" + + # Sed out the source ip address if it exists +- SRCIP=`echo $DEFROUTE | sed -n "s/$MATCHROUTE/\3/p"` ++ SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"` + + # and what remains after stripping out the source ip address clause +- ROUTE_WO_SRC=`echo $DEFROUTE | sed "s/$MATCHROUTE/\1\5/"` ++ ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\5/"` + + [ -z "$SRCIP" ] && return 1 + [ $SRCIP = $1 ] && return 0 +@@ -185,11 +213,13 @@ + rc=$OCF_SUCCESS + ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" + else +- $IP2UTIL route replace $NETWORK dev $INTERFACE src $1 || \ +- errorexit "command 'ip route replace $NETWORK dev $INTERFACE src $1' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed" + +- $CMDCHANGE $ROUTE_WO_SRC src $1 || \ +- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $1' failed" ++ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ $CMDCHANGE $ROUTE_WO_SRC src $1 || \ ++ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $1' failed" ++ fi + rc=$? + fi + +@@ -201,7 +231,7 @@ + # If one exists but it's not the same as the one specified, that's + # an error. Maybe that's the wrong behaviour because if this fails + # then when IPaddr releases the associated interface (if there is one) +-# your default route will also get dropped ;-( ++# your matching route will also get dropped ;-( + # The exit code should conform to LSB exit codes. + # + +@@ -217,11 +247,13 @@ + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- $IP2UTIL route replace $NETWORK dev $INTERFACE || \ +- errorexit "command 'ip route replace $NETWORK dev $INTERFACE' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE' failed" + +- $CMDCHANGE $ROUTE_WO_SRC || \ +- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed" ++ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ $CMDCHANGE $ROUTE_WO_SRC || \ ++ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed" ++ fi + + return $? + } +@@ -406,6 +438,10 @@ + return $OCF_ERR_CONFIGURED + fi + ++ if ! echo "$OCF_RESKEY_destination" | grep -q "/"; then ++ return $OCF_ERR_CONFIGURED ++ fi ++ + + if ! [ "x$SYSTYPE" = "xLinux" ]; then + # checks after this point are only relevant for linux. +@@ -486,7 +522,11 @@ + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` +-NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++else ++ NETWORK="$OCF_RESKEY_destination" ++fi + + case $1 in + start) srca_start $ipaddress diff --git a/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch b/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch new file mode 100644 index 0000000..cca64cb --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch @@ -0,0 +1,42 @@ +From 0e73d3f474d08779b64ed99fb3f80c1e806ff1b7 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 28 Nov 2019 16:11:51 +0100 +Subject: [PATCH] IPsrcaddr: fixes to replace local rule if using local table, + and set src back to primary for device on stop + +--- + heartbeat/IPsrcaddr | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index d80b72165..f9085f082 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -75,6 +75,10 @@ USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + ++if [ "$OCF_RESKEY_table" = "local" ]; then ++ TABLE="$TABLE local" ++fi ++ + SYSTYPE="`uname -s`" + + usage() { +@@ -247,8 +251,14 @@ srca_stop() { + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE || \ +- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE' failed" ++ OPTS="" ++ if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then ++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev eth0 primary | awk '{split($4,a,"/");print a[1]}')" ++ OPTS="proto kernel scope host src $PRIMARY_IP" ++ fi ++ ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed" + + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + $CMDCHANGE $ROUTE_WO_SRC || \ diff --git a/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch b/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch new file mode 100644 index 0000000..b9f8e7e --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch @@ -0,0 +1,45 @@ +From 7afc581f6cd8fc37c3e14ece12fb16d31f1886f9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 10 Jan 2020 14:35:56 +0100 +Subject: [PATCH] IPsrcaddr: fixes to avoid failing during probe + +--- + heartbeat/IPsrcaddr | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index f9085f082..0ef8b391f 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -75,6 +75,10 @@ USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + ++if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then ++ CMDSHOW="$CMDSHOW src $OCF_RESKEY_ipaddress" ++fi ++ + if [ "$OCF_RESKEY_table" = "local" ]; then + TABLE="$TABLE local" + fi +@@ -183,7 +187,7 @@ export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress + + srca_read() { + # Capture matching route - doublequotes prevent word splitting... +- ROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" ++ ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" + + # ... so we can make sure there is only 1 matching route + [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ +@@ -199,6 +203,11 @@ srca_read() { + # and what remains after stripping out the source ip address clause + ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\5/"` + ++ # using "src " only returns output if there's a match ++ if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then ++ [ -z "$ROUTE" ] && return 1 || return 0 ++ fi ++ + [ -z "$SRCIP" ] && return 1 + [ $SRCIP = $1 ] && return 0 + return 2 diff --git a/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch b/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch new file mode 100644 index 0000000..e0e1d04 --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch @@ -0,0 +1,23 @@ +From 5f0d15ad70098510a3782d6fd18d6eacfb51b0cf Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 16 Jan 2020 14:59:26 +0100 +Subject: [PATCH] IPsrcaddr: remove hardcoded device when using destination + parameter + +--- + heartbeat/IPsrcaddr | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index 0ef8b391f..7cdc3a9fe 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -262,7 +262,7 @@ srca_stop() { + + OPTS="" + if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then +- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev eth0 primary | awk '{split($4,a,"/");print a[1]}')" ++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" + OPTS="proto kernel scope host src $PRIMARY_IP" + fi + diff --git a/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch b/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch new file mode 100644 index 0000000..fab8bfd --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch @@ -0,0 +1,57 @@ +From fcaa52bb98a8686d993550c6f4ab7867625c8059 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Wed, 29 Aug 2018 16:18:55 -0400 +Subject: [PATCH] rabbitmq-cluster: get cluster status from mnesia during + monitor + +If mnesia is not running (for example if `rabbitmqctl stop_app` has +been called, or the service has paused during partition due to the +pause_minority strategy) then the cluster_status command to +rabbitmqctl will read the cached cluster status from disk and the +command returns 0 even though the service isn't really running at all. + +Instead, force the cluster status to be read from mnesia. If mnesia +is not running due to the above or similar circumstances, the command +will catch that and properly fail the monitor action. + +Resolves: RHBZ#1595753 +--- + heartbeat/rabbitmq-cluster | 20 +++++--------------- + 1 file changed, 5 insertions(+), 15 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index a7d2db614..204917475 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -181,26 +181,16 @@ remove_pid () { + rmq_monitor() { + local rc + +- $RMQ_CTL cluster_status > /dev/null 2>&1 +- rc=$? +- case "$rc" in +- 0) ++ if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then + ocf_log debug "RabbitMQ server is running normally" + rmq_write_nodename +- ++ + return $OCF_SUCCESS +- ;; +- 2|68|69|70|75|78) +- ocf_log info "RabbitMQ server is not running" ++ else ++ ocf_log info "RabbitMQ server could not get cluster status from mnesia" + rmq_delete_nodename + return $OCF_NOT_RUNNING +- ;; +- *) +- ocf_log err "Unexpected return code from '$RMQ_CTL cluster_status' exit code: $rc" +- rmq_delete_nodename +- return $OCF_ERR_GENERIC +- ;; +- esac ++ fi + } + + rmq_init_and_wait() diff --git a/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch b/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch new file mode 100644 index 0000000..72f5ff6 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch @@ -0,0 +1,96 @@ +From cc23c5523a0185fa557a5ab9056d50a60300d12a Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Tue, 16 Oct 2018 16:21:25 -0400 +Subject: [PATCH] rabbitmq-cluster: fail monitor when node is in minority + partition + +It's possible for mnesia to still be running, but for mnesia to be +partitioned. And it's also possible to get into this state without +pacemaker seeing the node go down so no corrective action is taken. + +When monitoring, check the number of nodes that pacemaker thinks is +running, and compare to the number of nodes that mnesia thinks is +running. If mnesia only sees a minority of the total nodes, fail it +so corrective action can be taken to rejoin the cluster. + +This also adds a new function, rmq_app_running, which simply checks +whether the app is running or not and does not care about the +partition status. This is now used instead of the full monitor in a +few places where we don't care about partition state. + +Resolves: RHBZ#1639826 +--- + heartbeat/rabbitmq-cluster | 28 +++++++++++++++++++++++++--- + 1 file changed, 25 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 204917475..78b2bbadf 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -178,10 +178,31 @@ remove_pid () { + rm -f ${RMQ_PID_FILE} > /dev/null 2>&1 + } + ++rmq_app_running() { ++ if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then ++ ocf_log debug "RabbitMQ application is running" ++ return $OCF_SUCCESS ++ else ++ ocf_log debug "RabbitMQ application is stopped" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ + rmq_monitor() { + local rc + + if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then ++ pcs_running=$(rmq_join_list | wc -w) ++ ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" ++ rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') ++ ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running" ++ ++ if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then ++ ocf_log info "RabbitMQ is a minority partition, failing monitor" ++ rmq_delete_nodename ++ return $OCF_ERR_GENERIC ++ fi ++ + ocf_log debug "RabbitMQ server is running normally" + rmq_write_nodename + +@@ -215,7 +236,7 @@ rmq_init_and_wait() + return $OCF_ERR_GENERIC + fi + +- rmq_monitor ++ rmq_app_running + return $? + } + +@@ -236,6 +257,7 @@ rmq_start_first() + if [ $rc -eq 0 ]; then + rc=$OCF_SUCCESS + ocf_log info "cluster bootstrapped" ++ rmq_write_nodename + + if [ -n "$OCF_RESKEY_set_policy" ]; then + # do not quote set_policy, we are passing in arguments +@@ -492,7 +514,7 @@ rmq_stop() { + end. + " + +- rmq_monitor ++ rmq_app_running + if [ $? -eq $OCF_NOT_RUNNING ]; then + return $OCF_SUCCESS + fi +@@ -508,7 +530,7 @@ rmq_stop() { + #TODO add kill logic + stop_wait=1 + while [ $stop_wait = 1 ]; do +- rmq_monitor ++ rmq_app_running + rc=$? + if [ "$rc" -eq $OCF_NOT_RUNNING ]; then + stop_wait=0 diff --git a/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch b/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch new file mode 100644 index 0000000..8b422eb --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch @@ -0,0 +1,63 @@ +From 19ee29342f8bb573722991b8cbe4503309ad0bf9 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Fri, 2 Nov 2018 13:12:53 -0400 +Subject: [PATCH] rabbitmq-cluster: fix regression in rmq_stop + +This regression was introduced in PR#1249 (cc23c55). The stop action +was modified to use rmq_app_running in order to check the service +status, which allows for the following sequence of events: + +- service is started, unclustered +- stop_app is called +- cluster_join is attempted and fails +- stop is called + +Because stop_app was called, rmq_app_running returns $OCF_NOT_RUNNING +and the stop action is a no-op. This means the erlang VM continues +running. + +When the start action is attempted again, a new erlang VM is launched, +but this VM fails to boot because the old one is still running and is +registered with the same name (rabbit@nodename). + +This adds a new function, rmq_node_alive, which does a simple eval to +test whether the erlang VM is up, independent of the rabbit app. The +stop action now uses rmq_node_alive to check the service status, so +even if stop_app was previously called, the erlang VM will be stopped +properly. + +Resolves: RHBZ#1639826 +--- + heartbeat/rabbitmq-cluster | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 78b2bbadf..a2de9dc20 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -188,6 +188,16 @@ rmq_app_running() { + fi + } + ++rmq_node_alive() { ++ if $RMQ_CTL eval 'ok.'; then ++ ocf_log debug "RabbitMQ node is alive" ++ return $OCF_SUCCESS ++ else ++ ocf_log debug "RabbitMQ node is down" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ + rmq_monitor() { + local rc + +@@ -514,7 +524,7 @@ rmq_stop() { + end. + " + +- rmq_app_running ++ rmq_node_alive + if [ $? -eq $OCF_NOT_RUNNING ]; then + return $OCF_SUCCESS + fi diff --git a/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch b/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch new file mode 100644 index 0000000..80fe18b --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch @@ -0,0 +1,83 @@ +From 63c9449bfa9a7fecbc0f00394699a475a384671d Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Thu, 9 Aug 2018 16:33:26 +0200 +Subject: [PATCH] rabbitmq-cluster: retry start when cluster join fails + +When a node tries to join an existing cluster, it fetches a node +list to try to connect from any of those running nodes. + +If the nodes from this list become unavailable while we're joining +the cluster, the rabbitmq server will fail to get clustered and +make the start operation fail. + +Give the resource a chance to start anyway by retrying the entire +start actions until it succeeds or until the start timeout is +reached and pacemaker stops the start operation. + +Co-Authored-by: +Suggested-by: +--- + heartbeat/rabbitmq-cluster | 29 ++++++++++++++++++++++++++--- + 1 file changed, 26 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 9ff49e075..84f383460 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -31,6 +31,12 @@ + + ####################################################################### + ++# This arbitrary value here is used by the rmq_start action to ++# signify that the resource agent must retry the start process ++# It might potentially conflict with OCF assigned error code ++# in the future. ++RMQ_TRY_RESTART_ERROR_CODE=126 ++ + RMQ_SERVER=/usr/sbin/rabbitmq-server + RMQ_CTL=/usr/sbin/rabbitmqctl + RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" +@@ -354,7 +360,7 @@ rmq_notify() { + return $OCF_SUCCESS + } + +-rmq_start() { ++rmq_try_start() { + local join_list="" + local rc + +@@ -384,8 +390,16 @@ rmq_start() { + rc=$? + + if [ $rc -ne 0 ]; then +- ocf_log info "node failed to join even after reseting local data. Check SELINUX policy" +- return $OCF_ERR_GENERIC ++ # we could not join the rabbitmq cluster from any of the running nodes ++ # this might be due to a unexpected reset of those nodes. Give ourself ++ # a chance to start by retrying the entire start sequence. ++ ++ ocf_log warn "Failed to join the RabbitMQ cluster from nodes ${join_list}. Stopping local unclustered rabbitmq" ++ rmq_stop ++ ++ ocf_log warn "Re-detect available rabbitmq nodes and try to start again" ++ # return an unused OCF value to signify a "retry" condition ++ return $RMQ_TRY_RESTART_ERROR_CODE + fi + + # Restore users, user permissions, and policies (if any) +@@ -443,6 +457,15 @@ rmq_start() { + return $OCF_SUCCESS + } + ++rmq_start() { ++ local rc=$RMQ_TRY_RESTART_ERROR_CODE ++ while [ $rc -eq $RMQ_TRY_RESTART_ERROR_CODE ]; do ++ rmq_try_start ++ rc=$? ++ done ++ return $rc ++} ++ + rmq_stop() { + # Backup users, user permissions, and policies + BaseDataDir=`dirname $RMQ_DATA_DIR` diff --git a/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch b/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch new file mode 100644 index 0000000..0a25333 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch @@ -0,0 +1,42 @@ +From 8ed87936e9ad06318cc49ea767885a405dfde11e Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Wed, 5 Dec 2018 11:45:43 -0500 +Subject: [PATCH] rabbitmq-cluster: better ensure node attributes are removed + +Ensure that the attribute is removed at the end of the stop action. +Also if rmq_app_running or rmq_node_alive shows the service as down, +ensure the attribute is deleted as well. + +Resolves: RHBZ#1656368 +--- + heartbeat/rabbitmq-cluster | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 1643dd1e7..2dca3e216 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -184,6 +184,7 @@ rmq_app_running() { + return $OCF_SUCCESS + else + ocf_log debug "RabbitMQ application is stopped" ++ rmq_delete_nodename + return $OCF_NOT_RUNNING + fi + } +@@ -194,6 +195,7 @@ rmq_node_alive() { + return $OCF_SUCCESS + else + ocf_log debug "RabbitMQ node is down" ++ rmq_delete_nodename + return $OCF_NOT_RUNNING + fi + } +@@ -554,6 +556,7 @@ rmq_stop() { + sleep 1 + done + ++ rmq_delete_nodename + remove_pid + return $OCF_SUCCESS + } diff --git a/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch b/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch new file mode 100644 index 0000000..b39150a --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch @@ -0,0 +1,32 @@ +From 2b6e4a94c847129dd014a1efa733cd1b4a2448e6 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Fri, 2 Nov 2018 10:11:41 -0400 +Subject: [PATCH] rabbitmq-cluster: debug log detailed output when mnesia query + fails + +--- + heartbeat/rabbitmq-cluster | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 78b2bbadf..fabfeedfb 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -191,7 +191,8 @@ rmq_app_running() { + rmq_monitor() { + local rc + +- if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then ++ status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) ++ if echo "${status}" | grep -q '^{ok'; then + pcs_running=$(rmq_join_list | wc -w) + ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" + rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') +@@ -209,6 +210,7 @@ rmq_monitor() { + return $OCF_SUCCESS + else + ocf_log info "RabbitMQ server could not get cluster status from mnesia" ++ ocf_log debug "${status}" + rmq_delete_nodename + return $OCF_NOT_RUNNING + fi diff --git a/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch b/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch new file mode 100644 index 0000000..8b58191 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch @@ -0,0 +1,87 @@ +From 5a33171b2c40e2e1587e82aad0cb7e39abcf615d Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Thu, 13 Dec 2018 12:58:43 -0500 +Subject: [PATCH] rabbitmq-cluster: always use quiet flag for eval calls + +On older rabbitmq versions, rabbitmqctl appends "...done." at the end +of the output. However we expect eval without this extra output so it +can be used for further processing. The -q option to rabbitmqctl +suppresses the extra output, so ensure we always pass that when +calling eval. + +Resolves: RHBZ#1659072 +--- + heartbeat/rabbitmq-cluster | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 2dca3e216..e82ac2399 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -39,6 +39,7 @@ RMQ_TRY_RESTART_ERROR_CODE=126 + + RMQ_SERVER=/usr/sbin/rabbitmq-server + RMQ_CTL=/usr/sbin/rabbitmqctl ++RMQ_EVAL="${RMQ_CTL} eval -q" + RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" + RMQ_PID_DIR="/var/run/rabbitmq" + RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid" +@@ -179,7 +180,7 @@ remove_pid () { + } + + rmq_app_running() { +- if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then ++ if $RMQ_EVAL 'application:which_applications().' | grep -q '{rabbit,'; then + ocf_log debug "RabbitMQ application is running" + return $OCF_SUCCESS + else +@@ -190,7 +191,7 @@ rmq_app_running() { + } + + rmq_node_alive() { +- if $RMQ_CTL eval 'ok.'; then ++ if $RMQ_EVAL 'ok.'; then + ocf_log debug "RabbitMQ node is alive" + return $OCF_SUCCESS + else +@@ -203,11 +204,11 @@ rmq_node_alive() { + rmq_monitor() { + local rc + +- status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) ++ status=$($RMQ_EVAL 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) + if echo "${status}" | grep -q '^{ok'; then + pcs_running=$(rmq_join_list | wc -w) + ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" +- rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') ++ rmq_running=$($RMQ_EVAL 'length(mnesia:system_info(running_db_nodes)).') + ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running" + + if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then +@@ -294,7 +295,7 @@ rmq_start_first() + + rmq_is_clustered() + { +- $RMQ_CTL eval 'rabbit_mnesia:is_clustered().' | grep -q true ++ $RMQ_EVAL 'rabbit_mnesia:is_clustered().' | grep -q true + } + + rmq_join_existing() +@@ -432,7 +433,7 @@ rmq_try_start() { + + # Restore users, user permissions, and policies (if any) + BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_CTL eval " ++ $RMQ_EVAL " + %% Run only if Mnesia is ready. + lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso + begin +@@ -497,7 +498,7 @@ rmq_start() { + rmq_stop() { + # Backup users, user permissions, and policies + BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_CTL eval " ++ $RMQ_EVAL " + %% Run only if Mnesia is still available. + lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso + begin diff --git a/SOURCES/bz1748768-docker-fix-stop-issues.patch b/SOURCES/bz1748768-docker-fix-stop-issues.patch new file mode 100644 index 0000000..d4e6f16 --- /dev/null +++ b/SOURCES/bz1748768-docker-fix-stop-issues.patch @@ -0,0 +1,88 @@ +From 5949405d0031a4aba91c81cb28c24821ad2d439a Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 3 Jan 2019 15:05:20 -0800 +Subject: [PATCH] docker: Fix issues with stop operation + +The docker RA's stop operation doesn't behave properly in some cases. + 1. It returns a false success code in case of an error response from + the daemon. + 2. It fails at `remove_container()` if the container does not exist + but another docker object of the same name does exist. + +In case #1, the `container_exists()` function returns the same exit code +(1) if the container is not found (an expected error) or if there is an +error response from the docker daemon (an unexpected error). These types +of errors should be handled differently. + +In case #2, the `docker inspect` calls do not limit their search to +containers. So if a non-container object is found with a matching name, +the RA attempts to remove a container by that name. Such a container may +not exist. + +This patch fixes these issues as follows: + 1. Match an error response in `container_exists()` against the string + "No such container". + 2. Add `--type=container` to the `docker inspect` calls to restrict + the match. +--- + heartbeat/docker | 26 ++++++++++++++++++++++---- + 1 file changed, 22 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/docker b/heartbeat/docker +index f5ba83ff2..c206344ad 100755 +--- a/heartbeat/docker ++++ b/heartbeat/docker +@@ -215,7 +215,7 @@ monitor_cmd_exec() + out=$(docker exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) + rc=$? + else +- out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) ++ out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --type=container --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) + rc=$? + fi + +@@ -236,7 +236,25 @@ monitor_cmd_exec() + + container_exists() + { +- docker inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++ local err ++ ++ err=$(docker inspect --type=container $CONTAINER 2>&1 >/dev/null) ++ ++ if [ $? -ne $OCF_SUCCESS ]; then ++ case $err in ++ *"No such container"*) ++ # Return failure instead of exiting if container does not exist ++ return 1 ++ ;; ++ *) ++ # Exit if error running command ++ ocf_exit_reason "$err" ++ exit $OCF_ERR_GENERIC ++ ;; ++ esac ++ fi ++ ++ return $OCF_SUCCESS + } + + remove_container() +@@ -265,7 +283,7 @@ docker_simple_status() + fi + + # retrieve the 'Running' attribute for the container +- val=$(docker inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) ++ val=$(docker inspect --type=container --format {{.State.Running}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then + #not running as a result of container not being found + return $OCF_NOT_RUNNING +@@ -295,7 +313,7 @@ docker_health_status() + # if starting takes longer than monitor timeout then upstream will make this fail. + while + +- val=$(docker inspect --format {{.State.Health.Status}} $CONTAINER 2>/dev/null) ++ val=$(docker inspect --type=container --format {{.State.Health.Status}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then + #not healthy as a result of container not being found + return $OCF_NOT_RUNNING diff --git a/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch b/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch new file mode 100644 index 0000000..6c39248 --- /dev/null +++ b/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch @@ -0,0 +1,35 @@ +From 1286636b768bb635e9a6b1f1fbf6267c9c3f4b03 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 19 Aug 2019 13:31:06 +0200 +Subject: [PATCH] Route: dont fence node when parameters arent set + +--- + heartbeat/Route | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index b4011e37d..9f92eff3a 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -249,18 +249,18 @@ route_validate() { + if [ "${OCF_RESKEY_CRM_meta_clone}" ]; then + if [ "${OCF_RESKEY_CRM_meta_clone_node_max}" != 1 ]; then + ocf_exit_reason "Misconfigured clone parameters. Must set meta attribute \"clone_node_max\" to 1, got ${OCF_RESKEY_CRM_meta_clone_node_max}." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + fi + # Did we get a destination? + if [ -z "${OCF_RESKEY_destination}" ]; then + ocf_exit_reason "Missing required parameter \"destination\"." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + # Did we get either a device or a gateway address? + if [ -z "${OCF_RESKEY_device}" -a -z "${OCF_RESKEY_gateway}" ]; then + ocf_exit_reason "Must specify either \"device\", or \"gateway\", or both." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + # If a device has been configured, is it available on this system? + if [ -n "${OCF_RESKEY_device}" ]; then diff --git a/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch b/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch new file mode 100644 index 0000000..e2d012e --- /dev/null +++ b/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch @@ -0,0 +1,40 @@ +From 444bdc44fc47c65f848efc0c39c1e8e6620ce10d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 11 Oct 2019 12:12:52 +0200 +Subject: [PATCH] Route: only validate for start and validate-all actions + +--- + heartbeat/Route | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index 9f92eff3a..8898e1afd 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -187,6 +187,8 @@ END + } + + route_start() { ++ route_validate || exit $? ++ + route_status + status=$? + if [ $status -eq $OCF_SUCCESS ]; then +@@ -313,8 +315,6 @@ for binary in ip grep; do + check_binary $binary + done + +-route_validate || exit $? +- + case $OCF_RESKEY_family in + ip4) addr_family="-4" ;; + ip6) addr_family="-6" ;; +@@ -334,7 +334,7 @@ status|monitor) route_status;; + reload) ocf_log info "Reloading..." + route_start + ;; +-validate-all) ;; ++validate-all) route_validate;; + *) route_usage + exit $OCF_ERR_UNIMPLEMENTED + ;; diff --git a/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch b/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch new file mode 100644 index 0000000..1399a58 --- /dev/null +++ b/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch @@ -0,0 +1,148 @@ +From c0b6356bbf5b9a1fb76b011486dfce258d395ef8 Mon Sep 17 00:00:00 2001 +From: Peter Lemenkov +Date: Fri, 6 Sep 2019 14:22:46 +0200 +Subject: [PATCH] Restore users/perms/policies even if starting in a single + node mode + +See https://bugzilla.redhat.com/1744467#c1 + +Signed-off-by: Peter Lemenkov +--- + heartbeat/rabbitmq-cluster | 109 ++++++++++++++++++++----------------- + 1 file changed, 58 insertions(+), 51 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index cf8ca21a6..7837e9e3c 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -114,6 +114,62 @@ rmq_wipe_data() + rm -rf $RMQ_DATA_DIR > /dev/null 2>&1 + } + ++rmq_restore_users_perms_policies() ++{ ++ # Restore users, user permissions, and policies (if any) ++ BaseDataDir=`dirname $RMQ_DATA_DIR` ++ $RMQ_EVAL " ++ %% Run only if Mnesia is ready. ++ lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso ++ begin ++ Restore = fun(Table, PostprocessFun, Filename) -> ++ case file:consult(Filename) of ++ {error, _} -> ++ ok; ++ {ok, [Result]} -> ++ lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result), ++ file:delete(Filename) ++ end ++ end, ++ ++ %% Restore users ++ ++ Upgrade = fun ++ ({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5}; ++ ({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D} ++ end, ++ ++ Downgrade = fun ++ ({internal_user, A, B, C}) -> {internal_user, A, B, C}; ++ ({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C}; ++ %% Incompatible scheme, so we will loose user's password ('B' value) during conversion. ++ %% Unfortunately, this case will require manual intervention - user have to run: ++ %% rabbitmqctl change_password ++ ({internal_user, A, B, C, _}) -> {internal_user, A, B, C} ++ end, ++ ++ %% Check db scheme first ++ [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), ++ case WildPattern of ++ %% Version < 3.6.0 ++ {internal_user,'_','_','_'} -> ++ Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\"); ++ %% Version >= 3.6.0 ++ {internal_user,'_','_','_','_'} -> ++ Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\") ++ end, ++ ++ NoOp = fun(X) -> X end, ++ ++ %% Restore user permissions ++ Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"), ++ ++ %% Restore policies ++ Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\") ++ end. ++ " ++} ++ + rmq_local_node() + { + +@@ -411,6 +467,7 @@ rmq_try_start() { + if [ -z "$join_list" ]; then + rmq_start_first + rc=$? ++ rmq_restore_users_perms_policies + return $rc + fi + +@@ -437,58 +494,8 @@ rmq_try_start() { + return $RMQ_TRY_RESTART_ERROR_CODE + fi + +- # Restore users, user permissions, and policies (if any) +- BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_EVAL " +- %% Run only if Mnesia is ready. +- lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso +- begin +- Restore = fun(Table, PostprocessFun, Filename) -> +- case file:consult(Filename) of +- {error, _} -> +- ok; +- {ok, [Result]} -> +- lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result), +- file:delete(Filename) +- end +- end, ++ rmq_restore_users_perms_policies + +- %% Restore users +- +- Upgrade = fun +- ({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5}; +- ({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D} +- end, +- +- Downgrade = fun +- ({internal_user, A, B, C}) -> {internal_user, A, B, C}; +- ({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C}; +- %% Incompatible scheme, so we will loose user's password ('B' value) during conversion. +- %% Unfortunately, this case will require manual intervention - user have to run: +- %% rabbitmqctl change_password +- ({internal_user, A, B, C, _}) -> {internal_user, A, B, C} +- end, +- +- %% Check db scheme first +- [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), +- case WildPattern of +- %% Version < 3.6.0 +- {internal_user,'_','_','_'} -> +- Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\"); +- %% Version >= 3.6.0 +- {internal_user,'_','_','_','_'} -> +- Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\") +- end, +- +- NoOp = fun(X) -> X end, +- +- %% Restore user permissions +- Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"), +- +- %% Restore policies +- Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\") +- end. +- " + return $OCF_SUCCESS + } + diff --git a/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch b/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch new file mode 100644 index 0000000..74d3628 --- /dev/null +++ b/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch @@ -0,0 +1,47 @@ +From 8ecfa95fff384ed047fd804016abdbbdcdd96d27 Mon Sep 17 00:00:00 2001 +From: Keisuke MORI +Date: Wed, 11 Sep 2019 15:33:37 +0900 +Subject: [PATCH] Low: IPaddr2: fix to work properly with unsanitized IPv6 + addresses + +`ip route get` shows the sanitized address at $1 or $2 depending on +the address is already assigned to the node or not. +``` +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:101::0001 +2001:db8:101::1 dev eth1 proto ra src 2001:db8:101:0:XXXX:XXXX:XXXX:XXXX metric 100 +[root@centos73-1 ~]# /sbin/ip addr add 2001:db8:101::0001/64 dev eth1 +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:101::0001 +local 2001:db8:101::1 dev lo table local proto none src 2001:db8:101::1 metric 0 +``` + +It can not be sanitized if the address is unreachable and on the recent distributions +(probably depending on the iproute package version) +``` +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:201::0001 +unreachable 2001:db8:201::1 dev lo table unspec proto kernel src 2001:db8:101:0:XXXX:XXXX:XXXX:XXXX metric 429496 +``` +``` +[root@rhel80-1 ~]# /sbin/ip route get 200:db8:201::0001 +RTNETLINK answers: Network is unreachable +``` +--- + heartbeat/IPaddr2 | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 041ace3a2..4f28ddab6 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -477,6 +477,12 @@ ip_init() { + fi + else + FAMILY=inet6 ++ # address sanitization defined in RFC5952 ++ SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip | awk '$1~/:/ {print $1} $2~/:/ {print $2}') ++ if [ -n "$SANITIZED_IP" ]; then ++ OCF_RESKEY_ip="$SANITIZED_IP" ++ fi ++ + if ocf_is_true $OCF_RESKEY_lvs_support ;then + ocf_exit_reason "The IPv6 does not support lvs_support" + exit $OCF_ERR_CONFIGURED diff --git a/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch b/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch new file mode 100644 index 0000000..32f5e06 --- /dev/null +++ b/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch @@ -0,0 +1,22 @@ +From 7eff4e17641cc1463e61d772af16d17264477523 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 12 Sep 2019 12:51:54 +0200 +Subject: [PATCH] IPaddr2: IPv6 return empty string when sanitation fails + +--- + heartbeat/IPaddr2 | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 4f28ddab6..1d39ae514 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -478,7 +478,7 @@ ip_init() { + else + FAMILY=inet6 + # address sanitization defined in RFC5952 +- SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip | awk '$1~/:/ {print $1} $2~/:/ {print $2}') ++ SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip 2> /dev/null | awk '$1~/:/ {print $1} $2~/:/ {print $2}') + if [ -n "$SANITIZED_IP" ]; then + OCF_RESKEY_ip="$SANITIZED_IP" + fi diff --git a/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch b/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch new file mode 100644 index 0000000..e577a2e --- /dev/null +++ b/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch @@ -0,0 +1,77 @@ +From 530c48138f7dedaf99ae1ca98865d2f8b7432475 Mon Sep 17 00:00:00 2001 +From: Eberhard Kuemmerle +Date: Thu, 12 Sep 2019 21:10:43 +0200 +Subject: [PATCH] nfsserver: performance improvements for systemd enabled + systems + +> I found two critical actions in the script: +> - systemctl status nfs-server (which also calls journalctl) +> - systemctl list-unit-files + +source: +https://lists.clusterlabs.org/pipermail/developers/2019-September/002214.html +--- + heartbeat/nfsserver | 37 +++++++++++++++++++------------------ + 1 file changed, 19 insertions(+), 18 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index bf59da98e..8527a90f3 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -233,24 +233,25 @@ set_exec_mode() + return 0 + fi + +- ## +- # Attempt systemd (with nfs-lock.service). +- ## + if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files | grep nfs-server > /dev/null && systemctl list-unit-files | grep nfs-lock > /dev/null; then +- EXEC_MODE=2 +- # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. +- return 0 +- fi +- fi ++ if systemctl list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then ++ ++ ## ++ # Attempt systemd (with nfs-lock.service). ++ ## ++ if systemctl list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then ++ EXEC_MODE=2 ++ # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. ++ return 0 ++ fi + +- ## +- # Attempt systemd (with rpc-statd.service). +- ## +- if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files | grep nfs-server > /dev/null && systemctl list-unit-files | grep rpc-statd > /dev/null; then +- EXEC_MODE=3 +- return 0 ++ ## ++ # Attempt systemd (with rpc-statd.service). ++ ## ++ if systemctl list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then ++ EXEC_MODE=3 ++ return 0 ++ fi + fi + fi + +@@ -272,12 +273,12 @@ nfs_exec() + 2) if ! echo $svc | grep -q "\."; then + svc="${svc}.service" + fi +- systemctl $cmd $svc ++ systemctl -n0 $cmd $svc + ;; + 3) if ! echo $svc | grep -q "\."; then + svc="${svc}.service" + fi +- systemctl $cmd $svc ++ systemctl -n0 $cmd $svc + ;; + esac + } diff --git a/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch b/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch new file mode 100644 index 0000000..bbdc807 --- /dev/null +++ b/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch @@ -0,0 +1,38 @@ +From ca9d2f9c2d23a9dc783e0d52419790d0d441232c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 24 Sep 2019 09:12:47 +0200 +Subject: [PATCH] nfsserver: use "--no-legend" for systemctl "list-unit-files" + calls + +--- + heartbeat/nfsserver | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 8527a90f3..acef0147a 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -234,12 +234,12 @@ set_exec_mode() + fi + + if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then ++ if systemctl --no-legend list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then + + ## + # Attempt systemd (with nfs-lock.service). + ## +- if systemctl list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then ++ if systemctl --no-legend list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then + EXEC_MODE=2 + # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. + return 0 +@@ -248,7 +248,7 @@ set_exec_mode() + ## + # Attempt systemd (with rpc-statd.service). + ## +- if systemctl list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then ++ if systemctl --no-legend list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then + EXEC_MODE=3 + return 0 + fi diff --git a/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch b/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch new file mode 100644 index 0000000..9b429d7 --- /dev/null +++ b/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch @@ -0,0 +1,50 @@ +From 8b9c49fd965f73709d5a6e2c21987ba26af4856b Mon Sep 17 00:00:00 2001 +From: Luca Miccini +Date: Wed, 25 Sep 2019 17:12:39 +0200 +Subject: [PATCH] Add a configurable delay to Nova Evacuate calls + +In case /var/lib/nova/instances resides on NFS we have seen migrations +failing with 'Failed to get "write" lock - Is another process using the +image' errors. + +This has been tracked down to grace/lease timeouts not having expired +before attempting the migration/evacuate, so in this cases it might be +desirable to delay the nova evacuate call to give the storage time to +release the locks. + +Change-Id: Ie2fe784202d754eda38092479b1ab3ff4d02136a +Resolves: rhbz#1740069 +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 810f30a..596f520 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -125,6 +125,15 @@ + + + ++ ++ ++Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean ++up eventual locks/leases. ++ ++Nova evacuate delay ++ ++ ++ + + + +@@ -216,6 +225,11 @@ + fence_agent="fence_evacuate" + fi + ++ if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then ++ ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds" ++ sleep ${OCF_RESKEY_evacuate_delay} ++ fi ++ + ocf_log notice "Initiating evacuation of $node with $fence_agent" + $fence_agent ${fence_options} -o status -n ${node} + if [ $? = 1 ]; then diff --git a/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch b/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch new file mode 100644 index 0000000..ed60aca --- /dev/null +++ b/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch @@ -0,0 +1,75 @@ +From 6052e8fd37d23f46db217f915b445c7e67dccb34 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 4 Apr 2019 13:31:27 +0200 +Subject: [PATCH] IPsrcaddr: make proto optional to fix regression when used + without NetworkManager + +--- + heartbeat/IPsrcaddr | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index 4ca3d2364..5a447196e 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -50,12 +50,17 @@ + + ####################################################################### + # Initialization: +- + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Defaults ++OCF_RESKEY_proto_default="" ++ ++: ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}} + ####################################################################### + ++[ -z "$OCF_RESKEY_proto" ] && PROTO="" || PROTO="proto $OCF_RESKEY_proto" ++ + USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + + CMDSHOW="$IP2UTIL route show to exact 0.0.0.0/0" +@@ -97,6 +102,14 @@ dotted quad notation 255.255.255.0). + Netmask + + ++ ++ ++ ++Proto to match when finding network. E.g. "kernel". ++ ++Proto ++ ++ + + + +@@ -172,7 +185,7 @@ srca_start() { + rc=$OCF_SUCCESS + ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" + else +- ip route replace $NETWORK dev $INTERFACE src $1 || \ ++ $IP2UTIL route replace $NETWORK dev $INTERFACE src $1 || \ + errorexit "command 'ip route replace $NETWORK dev $INTERFACE src $1' failed" + + $CMDCHANGE $ROUTE_WO_SRC src $1 || \ +@@ -204,7 +217,7 @@ srca_stop() { + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- ip route replace $NETWORK dev $INTERFACE || \ ++ $IP2UTIL route replace $NETWORK dev $INTERFACE || \ + errorexit "command 'ip route replace $NETWORK dev $INTERFACE' failed" + + $CMDCHANGE $ROUTE_WO_SRC || \ +@@ -473,7 +486,7 @@ rc=$? + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` +-NETWORK=`ip route list dev $INTERFACE scope link proto kernel match $ipaddress|grep -o '^[^ ]*'` ++NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` + + case $1 in + start) srca_start $ipaddress diff --git a/SOURCES/bz1759115-aws-vpc-route53-1-update.patch b/SOURCES/bz1759115-aws-vpc-route53-1-update.patch new file mode 100644 index 0000000..9c689b1 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-1-update.patch @@ -0,0 +1,273 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/aws-vpc-route53.in 2018-06-29 14:05:02.000000000 +0200 ++++ /home/oalbrigt/src/resource-agents/heartbeat/aws-vpc-route53.in 2019-11-07 12:24:18.822111495 +0100 +@@ -152,9 +152,15 @@ + END + } + +-ec2ip_validate() { ++r53_validate() { + ocf_log debug "function: validate" + ++ # Check for required binaries ++ ocf_log debug "Checking for required binaries" ++ for command in curl dig; do ++ check_binary "$command" ++ done ++ + # Full name + [[ -z "$OCF_RESKEY_fullname" ]] && ocf_log error "Full name parameter not set $OCF_RESKEY_fullname!" && exit $OCF_ERR_CONFIGURED + +@@ -175,32 +181,111 @@ + ocf_log debug "ok" + + if [ -n "$OCF_RESKEY_profile" ]; then +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile" ++ AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + else +- AWS_PROFILE_OPT="--profile default" ++ AWS_PROFILE_OPT="--profile default --cli-connect-timeout 10" + fi + + return $OCF_SUCCESS + } + +-ec2ip_monitor() { +- ec2ip_validate ++r53_monitor() { ++ # ++ # For every start action the agent will call Route53 API to check for DNS record ++ # otherwise it will try to get results directly bu querying the DNS using "dig". ++ # Due to complexity in some DNS architectures "dig" can fail, and if this happens ++ # the monitor will fallback to the Route53 API call. ++ # ++ # There will be no failure, failover or restart of the agent if the monitor operation fails ++ # hence we only return $OCF_SUCESS in this function ++ # ++ # In case of the monitor operation detects a wrong or non-existent Route53 DNS entry ++ # it will try to fix the existing one, or create it again ++ # ++ # ++ ARECORD="" ++ IPREGEX="^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$" ++ r53_validate + ocf_log debug "Checking Route53 record sets" +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )" +- ocf_log debug "Found IP address: $ARECORD ." +- if [ "${ARECORD}" == "${IPADDRESS}" ]; then +- ocf_log debug "ARECORD $ARECORD found" ++ # ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ # ++ if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then ++ # ++ cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ ocf_log info "Route53 Agent Starting or probing - executing monitoring API call: $cmd" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli returned code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Skipping cluster action due to API call error" ++ return $OCF_ERR_GENERIC ++ fi ++ ARECORD=$(echo $CLIRES | grep RESOURCERECORDS | awk '{ print $5 }') ++ # ++ if ocf_is_probe; then ++ # ++ # Prevent R53 record change during probe ++ # ++ if [[ $ARECORD =~ $IPREGEX ]] && [ "$ARECORD" != "$IPADDRESS" ]; then ++ ocf_log debug "Route53 DNS record $ARECORD found at probing, disregarding" ++ return $OCF_NOT_RUNNING ++ fi ++ fi ++ else ++ # ++ cmd="dig +retries=3 +time=5 +short $OCF_RESKEY_fullname 2>/dev/null" ++ ocf_log info "executing monitoring command : $cmd" ++ ARECORD="$($cmd)" ++ rc=$? ++ ocf_log debug "dig return code: $rc" ++ # ++ if [[ ! $ARECORD =~ $IPREGEX ]] || [ $rc -ne 0 ]; then ++ ocf_log info "Fallback to Route53 API query due to DNS resolution failure" ++ cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ ocf_log debug "executing monitoring API call: $cmd" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli return code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Monitor skipping cluster action due to API call error" ++ return $OCF_SUCCESS ++ fi ++ ARECORD=$(echo $CLIRES | grep RESOURCERECORDS | awk '{ print $5 }') ++ fi ++ # ++ fi ++ ocf_log info "Route53 DNS record pointing $OCF_RESKEY_fullname to IP address $ARECORD" ++ # ++ if [ "$ARECORD" == "$IPADDRESS" ]; then ++ ocf_log info "Route53 DNS record $ARECORD found" ++ return $OCF_SUCCESS ++ elif [[ $ARECORD =~ $IPREGEX ]] && [ "$ARECORD" != "$IPADDRESS" ]; then ++ ocf_log info "Route53 DNS record points to a different host, setting DNS record on Route53 to this host" ++ _update_record "UPSERT" "$IPADDRESS" + return $OCF_SUCCESS + else +- ocf_log debug "No ARECORD found" +- return $OCF_NOT_RUNNING ++ ocf_log info "No Route53 DNS record found, setting DNS record on Route53 to this host" ++ _update_record "UPSERT" "$IPADDRESS" ++ return $OCF_SUCCESS + fi + + return $OCF_SUCCESS + } + + _update_record() { ++ # ++ # This function is the one that will actually execute Route53's API call ++ # and configure the DNS record using the correct API calls and parameters ++ # ++ # It creates a temporary JSON file under /tmp with the required API payload ++ # ++ # Failures in this function are critical and will cause the agent to fail ++ # + update_action="$1" + IPADDRESS="$2" + ocf_log info "Updating Route53 $OCF_RESKEY_hostedzoneid with $IPADDRESS for $OCF_RESKEY_fullname" +@@ -209,19 +294,19 @@ + ocf_exit_reason "Failed to create temporary file for record update" + exit $OCF_ERR_GENERIC + fi +- cat >>"${ROUTE53RECORD}" <<-EOF ++ cat >>"$ROUTE53RECORD" <<-EOF + { + "Comment": "Update record to reflect new IP address for a system ", + "Changes": [ + { +- "Action": "${update_action}", ++ "Action": "$update_action", + "ResourceRecordSet": { +- "Name": "${OCF_RESKEY_fullname}", ++ "Name": "$OCF_RESKEY_fullname", + "Type": "A", +- "TTL": ${OCF_RESKEY_ttl}, ++ "TTL": $OCF_RESKEY_ttl, + "ResourceRecords": [ + { +- "Value": "${IPADDRESS}" ++ "Value": "$IPADDRESS" + } + ] + } +@@ -229,46 +314,53 @@ + ] + } + EOF +- cmd="aws --profile ${OCF_RESKEY_profile} route53 change-resource-record-sets --hosted-zone-id ${OCF_RESKEY_hostedzoneid} \ +- --change-batch file://${ROUTE53RECORD} " ++ cmd="aws --profile $OCF_RESKEY_profile route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " + ocf_log debug "Executing command: $cmd" +- CHANGEID=$($cmd | grep CHANGEINFO | /usr/bin/awk -F'\t' '{ print $3 }' ) +- ocf_log debug "Change id: ${CHANGEID}" +- rmtempfile ${ROUTE53RECORD} +- CHANGEID=$(echo $CHANGEID |cut -d'/' -f 3 |cut -d'"' -f 1 ) +- ocf_log debug "Change id: ${CHANGEID}" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli returned code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Skipping cluster action due to API call error" ++ return $OCF_ERR_GENERIC ++ fi ++ CHANGEID=$(echo $CLIRES | awk '{ print $12 }') ++ ocf_log debug "Change id: $CHANGEID" ++ rmtempfile $ROUTE53RECORD ++ CHANGEID=$(echo $CHANGEID | cut -d'/' -f 3 | cut -d'"' -f 1 ) ++ ocf_log debug "Change id: $CHANGEID" + STATUS="PENDING" +- MYSECONDS=2 ++ MYSECONDS=20 + while [ "$STATUS" = 'PENDING' ]; do +- sleep ${MYSECONDS} +- STATUS="$(aws --profile ${OCF_RESKEY_profile} route53 get-change --id $CHANGEID | grep CHANGEINFO | /usr/bin/awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" +- ocf_log debug "Waited for ${MYSECONDS} seconds and checked execution of Route 53 update status: ${STATUS} " ++ sleep $MYSECONDS ++ STATUS="$(aws --profile $OCF_RESKEY_profile route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" ++ ocf_log debug "Waited for $MYSECONDS seconds and checked execution of Route 53 update status: $STATUS " + done + } + +-ec2ip_stop() { +- ocf_log info "Bringing down Route53 agent. (Will remove ARECORD)" +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )" +- ocf_log debug "Found IP address: $ARECORD ." +- if [ ${ARECORD} != ${IPADDRESS} ]; then +- ocf_log debug "No ARECORD found" +- return $OCF_SUCCESS +- else +- # determine IP address +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- # Patch file +- ocf_log debug "Deleting IP address to ${IPADDRESS}" +- return $OCF_SUCCESS +- fi +- +- _update_record "DELETE" "$IPADDRESS" ++r53_stop() { ++ # ++ # Stop operation doesn't perform any API call or try to remove the DNS record ++ # this mostly because this is not necessarily mandatory or desired ++ # the start and monitor functions will take care of changing the DNS record ++ # if the agent starts in a different cluster node ++ # ++ ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" + return $OCF_SUCCESS + } + +-ec2ip_start() { +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- _update_record "UPSERT" "$IPADDRESS" ++r53_start() { ++ # ++ # Start agent and config DNS in Route53 ++ # ++ ocf_log info "Starting Route53 DNS update...." ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ r53_monitor ++ if [ $? != $OCF_SUCCESS ]; then ++ ocf_log info "Could not start agent - check configurations" ++ return $OCF_ERR_GENERIC ++ fi + return $OCF_SUCCESS + } + +@@ -284,16 +376,16 @@ + exit $OCF_SUCCESS + ;; + monitor) +- ec2ip_monitor ++ r53_monitor + ;; + stop) +- ec2ip_stop ++ r53_stop + ;; + validate-all) +- ec2ip_validate ++ r53_validate + ;; + start) +- ec2ip_start ++ r53_start + ;; + *) + usage diff --git a/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch b/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch new file mode 100644 index 0000000..afb8bb6 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch @@ -0,0 +1,220 @@ +From 9b77d06bfe3308692946b8ac08bc7ec3399a762b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 2 Apr 2020 13:38:30 +0200 +Subject: [PATCH 1/2] aws-vpc-route53: cleanup and improvements + +--- + heartbeat/aws-vpc-route53.in | 73 ++++++++++++++++++++---------------- + 1 file changed, 41 insertions(+), 32 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index b276dfb3c..1cfc2b01f 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -43,8 +43,14 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++OCF_RESKEY_hostedzoneid_default="" ++OCF_RESKEY_fullname_default="" ++OCF_RESKEY_ip_default="local" + OCF_RESKEY_ttl_default=10 + ++: ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}} ++: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} ++: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} + : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} + + ####################################################################### +@@ -104,7 +110,7 @@ Hosted zone ID of Route 53. This is the table of + the Route 53 record. + + AWS hosted zone ID +- ++ + + + +@@ -113,7 +119,7 @@ Example: service.cloud.example.corp. + Note: The trailing dot is important to Route53! + + Full service name +- ++ + + + +@@ -189,6 +195,31 @@ r53_validate() { + return $OCF_SUCCESS + } + ++r53_start() { ++ # ++ # Start agent and config DNS in Route53 ++ # ++ ocf_log info "Starting Route53 DNS update...." ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ r53_monitor ++ if [ $? != $OCF_SUCCESS ]; then ++ ocf_log info "Could not start agent - check configurations" ++ return $OCF_ERR_GENERIC ++ fi ++ return $OCF_SUCCESS ++} ++ ++r53_stop() { ++ # ++ # Stop operation doesn't perform any API call or try to remove the DNS record ++ # this mostly because this is not necessarily mandatory or desired ++ # the start and monitor functions will take care of changing the DNS record ++ # if the agent starts in a different cluster node ++ # ++ ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" ++ return $OCF_SUCCESS ++} ++ + r53_monitor() { + # + # For every start action the agent will call Route53 API to check for DNS record +@@ -339,31 +370,6 @@ _update_record() { + done + } + +-r53_stop() { +- # +- # Stop operation doesn't perform any API call or try to remove the DNS record +- # this mostly because this is not necessarily mandatory or desired +- # the start and monitor functions will take care of changing the DNS record +- # if the agent starts in a different cluster node +- # +- ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" +- return $OCF_SUCCESS +-} +- +-r53_start() { +- # +- # Start agent and config DNS in Route53 +- # +- ocf_log info "Starting Route53 DNS update...." +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" +- r53_monitor +- if [ $? != $OCF_SUCCESS ]; then +- ocf_log info "Could not start agent - check configurations" +- return $OCF_ERR_GENERIC +- fi +- return $OCF_SUCCESS +-} +- + ############################################################################### + + case $__OCF_ACTION in +@@ -375,20 +381,23 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; +- monitor) +- r53_monitor ++ start) ++ r53_validate || exit $? ++ r53_start + ;; + stop) + r53_stop + ;; ++ monitor) ++ r53_monitor ++ ;; + validate-all) + r53_validate + ;; +- start) +- r53_start +- ;; + *) + usage + exit $OCF_ERR_UNIMPLEMENTED + ;; + esac ++ ++exit $? + +From 745c6b9b3e331ed3705a641f1ec03a2604de3a1d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 2 Apr 2020 13:40:33 +0200 +Subject: [PATCH 2/2] aws-vpc-route53: add support for public and secondary + private IPs + +--- + heartbeat/aws-vpc-route53.in | 31 +++++++++++++++++++++++++++++-- + 1 file changed, 29 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index 1cfc2b01f..ca6556951 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -121,6 +121,15 @@ Note: The trailing dot is important to Route53! + Full service name + + ++ ++ ++IP (local (default), public or secondary private IP address (e.g. 10.0.0.1). ++ ++A secondary private IP can be setup with the awsvip agent. ++ ++Type of IP or secondary private IP address (local, public or e.g. 10.0.0.1) ++ ++ + + + Time to live for Route53 ARECORD +@@ -173,6 +182,15 @@ r53_validate() { + # Hosted Zone ID + [[ -z "$OCF_RESKEY_hostedzoneid" ]] && ocf_log error "Hosted Zone ID parameter not set $OCF_RESKEY_hostedzoneid!" && exit $OCF_ERR_CONFIGURED + ++ # Type of IP/secondary IP address ++ case $OCF_RESKEY_ip in ++ local|public|*.*.*.*) ++ ;; ++ *) ++ ocf_exit_reason "Invalid value for ip: ${OCF_RESKEY_ip}" ++ exit $OCF_ERR_CONFIGURED ++ esac ++ + # profile + [[ -z "$OCF_RESKEY_profile" ]] && ocf_log error "AWS CLI profile not set $OCF_RESKEY_profile!" && exit $OCF_ERR_CONFIGURED + +@@ -200,7 +218,7 @@ r53_start() { + # Start agent and config DNS in Route53 + # + ocf_log info "Starting Route53 DNS update...." +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ _get_ip + r53_monitor + if [ $? != $OCF_SUCCESS ]; then + ocf_log info "Could not start agent - check configurations" +@@ -239,7 +257,7 @@ r53_monitor() { + r53_validate + ocf_log debug "Checking Route53 record sets" + # +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ _get_ip + # + if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then + # +@@ -308,6 +326,15 @@ r53_monitor() { + return $OCF_SUCCESS + } + ++_get_ip() { ++ case $OCF_RESKEY_ip in ++ local|public) ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4)";; ++ *.*.*.*) ++ IPADDRESS="${OCF_RESKEY_ip}";; ++ esac ++} ++ + _update_record() { + # + # This function is the one that will actually execute Route53's API call diff --git a/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch b/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch new file mode 100644 index 0000000..07a02c3 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch @@ -0,0 +1,302 @@ +From 01d3e07ec6c5240633633cb56d1bc915190f40a5 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Fri, 24 Apr 2020 18:19:19 -0400 +Subject: [PATCH 1/4] Replace aws command line with OCF_RESKEY_awscli property. + +--- + heartbeat/aws-vpc-move-ip | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 26ca6007d..af697adbe 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -159,14 +159,14 @@ END + execute_cmd_as_role(){ + cmd=$1 + role=$2 +- output="$(aws sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile --output=text)" ++ output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile --output=text)" + export AWS_ACCESS_KEY_ID="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $5}')" + export AWS_SECRET_ACCESS_KEY="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $7}')" + export AWS_SESSION_TOKEN="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $8}')" + + #Execute command + ocf_log debug "Assumed Role ${role}" +- ocf_log debug "$(aws sts get-caller-identity)" ++ ocf_log debug "$($OCF_RESKEY_awscli sts get-caller-identity)" + ocf_log debug "executing command: $cmd" + response="$($cmd)" + unset output AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN +@@ -181,7 +181,7 @@ ec2ip_set_address_param_compat(){ + } + + ec2ip_validate() { +- for cmd in aws ip curl; do ++ for cmd in $OCF_RESKEY_awscli ip curl; do + check_binary "$cmd" + done + + +From 20466ba91c21a489303774ac9a1f5f5fd7b86f12 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Fri, 24 Apr 2020 18:20:17 -0400 +Subject: [PATCH 2/4] - Replace aws command line with OCF_RESKEY_awscli + property. - Add OCF_RESKEY_awscli and OCF_RESKEY_profile default variables. - + Add awscli (Path to AWS CLI tools) parameter. - Remove required attribute on + profile parameter. - Replace --profile $OCF_RESKEY_profile with + AWS_PROFILE_OPT. + +--- + heartbeat/aws-vpc-route53.in | 71 ++++++++++++++++++++++-------------- + 1 file changed, 43 insertions(+), 28 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index ca6556951..3042b345b 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -43,11 +43,16 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Defaults ++OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_profile_default="default" + OCF_RESKEY_hostedzoneid_default="" + OCF_RESKEY_fullname_default="" + OCF_RESKEY_ip_default="local" + OCF_RESKEY_ttl_default=10 + ++: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} ++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} + : ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}} + : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} + : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} +@@ -103,7 +108,35 @@ primitive res_route53 ocf:heartbeat:aws-vpc-route53 \ + meta target-role=Started + + Update Route53 VPC record for AWS EC2 ++ + ++ ++ ++Path to command line tools for AWS ++ ++Path to AWS CLI tools ++ ++ ++ ++ ++ ++The name of the AWS CLI profile of the root account. This ++profile will have to use the "text" format for CLI output. ++The file /root/.aws/config should have an entry which looks ++like: ++ ++ [profile cluster] ++ region = us-east-1 ++ output = text ++ ++"cluster" is the name which has to be used in the cluster ++configuration. The region has to be the current one. The ++output has to be "text". ++ ++AWS Profile Name ++ ++ ++ + + + Hosted zone ID of Route 53. This is the table of +@@ -112,6 +145,7 @@ the Route 53 record. + AWS hosted zone ID + + ++ + + + The full name of the service which will host the IP address. +@@ -121,6 +155,7 @@ Note: The trailing dot is important to Route53! + Full service name + + ++ + + + IP (local (default), public or secondary private IP address (e.g. 10.0.0.1). +@@ -130,6 +165,7 @@ A secondary private IP can be setup with the awsvip agent. + Type of IP or secondary private IP address (local, public or e.g. 10.0.0.1) + + ++ + + + Time to live for Route53 ARECORD +@@ -137,25 +173,8 @@ Time to live for Route53 ARECORD + ARECORD TTL + + +- +- +-The name of the AWS CLI profile of the root account. This +-profile will have to use the "text" format for CLI output. +-The file /root/.aws/config should have an entry which looks +-like: +- +- [profile cluster] +- region = us-east-1 +- output = text +- +-"cluster" is the name which has to be used in the cluster +-configuration. The region has to be the current one. The +-output has to be "text". +- +-AWS Profile Name +- +- + ++ + + + +@@ -198,17 +217,13 @@ r53_validate() { + [[ -z "$OCF_RESKEY_ttl" ]] && ocf_log error "TTL not set $OCF_RESKEY_ttl!" && exit $OCF_ERR_CONFIGURED + + ocf_log debug "Testing aws command" +- aws --version 2>&1 ++ $OCF_RESKEY_awscli --version 2>&1 + if [ "$?" -gt 0 ]; then + ocf_log error "Error while executing aws command as user root! Please check if AWS CLI tools (Python flavor) are properly installed and configured." && exit $OCF_ERR_INSTALLED + fi + ocf_log debug "ok" + +- if [ -n "$OCF_RESKEY_profile" ]; then +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- else +- AWS_PROFILE_OPT="--profile default --cli-connect-timeout 10" +- fi ++ AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + + return $OCF_SUCCESS + } +@@ -261,7 +276,7 @@ r53_monitor() { + # + if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then + # +- cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" + ocf_log info "Route53 Agent Starting or probing - executing monitoring API call: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -293,7 +308,7 @@ r53_monitor() { + # + if [[ ! $ARECORD =~ $IPREGEX ]] || [ $rc -ne 0 ]; then + ocf_log info "Fallback to Route53 API query due to DNS resolution failure" +- cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" + ocf_log debug "executing monitoring API call: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -372,7 +387,7 @@ _update_record() { + ] + } + EOF +- cmd="aws --profile $OCF_RESKEY_profile route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " + ocf_log debug "Executing command: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -392,7 +407,7 @@ _update_record() { + MYSECONDS=20 + while [ "$STATUS" = 'PENDING' ]; do + sleep $MYSECONDS +- STATUS="$(aws --profile $OCF_RESKEY_profile route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" ++ STATUS="$($OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" + ocf_log debug "Waited for $MYSECONDS seconds and checked execution of Route 53 update status: $STATUS " + done + } + +From 113bee3ae17a8d610edc0e3879b56e96efbe8b31 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Mon, 27 Apr 2020 11:08:27 -0400 +Subject: [PATCH 3/4] Move AWS_PROFILE_OPT before the start/stop/etc and after + the usage/meta-data case statements. + +--- + heartbeat/aws-vpc-route53.in | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index 3042b345b..ee4f8afcb 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -223,8 +223,6 @@ r53_validate() { + fi + ocf_log debug "ok" + +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- + return $OCF_SUCCESS + } + +@@ -423,6 +421,11 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; ++esac ++ ++AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" ++ ++case $__OCF_ACTION in + start) + r53_validate || exit $? + r53_start + +From 8f46c90a73731be0c8f99adcd718f7cfc2d52002 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Mon, 27 Apr 2020 11:54:22 -0400 +Subject: [PATCH 4/4] Move AWS_PROFILE_OPT before functions and after + initialization. + +--- + heartbeat/aws-vpc-route53.in | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index ee4f8afcb..b06b93726 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -37,6 +37,7 @@ + # + # Mar. 15, 2017, vers 1.0.2 + ++ + ####################################################################### + # Initialization: + +@@ -57,9 +58,13 @@ OCF_RESKEY_ttl_default=10 + : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} + : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} + : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} ++####################################################################### ++ + ++AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + ####################################################################### + ++ + usage() { + cat <<-EOT + usage: $0 {start|stop|status|monitor|validate-all|meta-data} +@@ -421,11 +426,6 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; +-esac +- +-AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- +-case $__OCF_ACTION in + start) + r53_validate || exit $? + r53_start diff --git a/SOURCES/bz1764888-exportfs-allow-same-fsid.patch b/SOURCES/bz1764888-exportfs-allow-same-fsid.patch new file mode 100644 index 0000000..93d47af --- /dev/null +++ b/SOURCES/bz1764888-exportfs-allow-same-fsid.patch @@ -0,0 +1,22 @@ +From 9cea030ba6d5c759971873b80d6d97b545ecac39 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 7 Nov 2019 13:03:30 +0100 +Subject: [PATCH] exportfs: allow multiple exports of same directory + +--- + heartbeat/exportfs | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index d79aced88..1cabdee70 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -82,7 +82,7 @@ The directory or directories to export. + + + +- ++ + + The fsid option to pass to exportfs. This can be a unique positive + integer, a UUID (assuredly sans comma characters), or the special string diff --git a/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch b/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch new file mode 100644 index 0000000..9e816bd --- /dev/null +++ b/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch @@ -0,0 +1,46 @@ +From c718050a4a2bb47d640af1d8e19995590755670f Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Wed, 23 Oct 2019 22:04:44 +0200 +Subject: [PATCH] Low: mysql-common: fix startup check + +PID value is not capture correctly so the startup +fails with the wrong exit code. + +Starting 'mysql' case 8 'check lib file': +Setting agent environment: export OCF_RESKEY_CRM_meta_timeout=15000 +Setting system environment: chmod u-w /var/lib/mysql +Running agent: ./mysql start +ERROR: The agent was hanging, killed it, maybe you damaged the agent or system's environment, see details below: +Oct 23 18:46:06 INFO: MySQL is not running +runuser: warning: cannot change directory to /nonexistent: No such file or directory +runuser: warning: cannot change directory to /nonexistent: No such file or directory +runuser: warning: cannot change directory to /nonexistent: No such file or directory +Oct 23 18:46:06 INFO: MySQL is not running +Oct 23 18:46:08 INFO: MySQL is not running +Oct 23 18:46:10 INFO: MySQL is not running +Oct 23 18:46:12 INFO: MySQL is not running +Oct 23 18:46:14 INFO: MySQL is not running +Oct 23 18:46:16 INFO: MySQL is not running +Oct 23 18:46:18 INFO: MySQL is not running +Oct 23 18:46:20 INFO: MySQL is not running +Oct 23 18:46:22 INFO: MySQL is not running +Oct 23 18:46:24 INFO: MySQL is not running +--- + heartbeat/mysql-common.sh | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh +index d1b1ddb96..4004a6b65 100755 +--- a/heartbeat/mysql-common.sh ++++ b/heartbeat/mysql-common.sh +@@ -239,8 +239,8 @@ mysql_common_start() + --datadir=$OCF_RESKEY_datadir \ + --log-error=$OCF_RESKEY_log \ + $OCF_RESKEY_additional_parameters \ +- $mysql_extra_params >/dev/null 2>&1 & +- pid=$!" ++ $mysql_extra_params >/dev/null 2>&1" & ++ pid=$! + + # Spin waiting for the server to come up. + # Let the CRM/LRM time us out if required. diff --git a/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch b/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch new file mode 100644 index 0000000..6e8ccfe --- /dev/null +++ b/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch @@ -0,0 +1,104 @@ +From 92c49b6f2847546f3f938b10a2a97021774f0be3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Wed, 4 Dec 2019 14:36:59 +0100 +Subject: [PATCH] IPaddr2: ipt_CLUSTERIP "iptables" extension not "nft" backend + compatible +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Reference: +https://lists.clusterlabs.org/pipermail/users/2019-December/026674.html +(thread also sketches a future ambition for a [presumably, to revert +the habit of a functional overloading] separate agent to use +"xt_cluster" extension/cluster match). + +Signed-off-by: Jan Pokorný +--- + heartbeat/IPaddr2 | 29 ++++++++++++++++++++++------- + heartbeat/ocf-binaries.in | 2 ++ + 2 files changed, 24 insertions(+), 7 deletions(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 6f8e8c734..db0b0e547 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -123,6 +123,8 @@ VLDIR=$HA_RSCTMP + SENDARPPIDDIR=$HA_RSCTMP + CIP_lockfile=$HA_RSCTMP/IPaddr2-CIP-${OCF_RESKEY_ip} + ++IPADDR2_CIP_IPTABLES=$IPTABLES ++ + ####################################################################### + + meta_data() { +@@ -138,11 +140,21 @@ It can add an IP alias, or remove one. + In addition, it can implement Cluster Alias IP functionality + if invoked as a clone resource. + +-If used as a clone, you should explicitly set clone-node-max >= 2, ++If used as a clone, "shared address with a trivial, stateless ++(autonomous) load-balancing/mutual exclusion on ingress" mode gets ++applied (as opposed to "assume resource uniqueness" mode otherwise). ++For that, Linux firewall (kernel and userspace) is assumed, and since ++recent distributions are ambivalent in plain "iptables" command to ++particular back-end resolution, "iptables-legacy" (when present) gets ++prioritized so as to avoid incompatibilities (note that respective ++ipt_CLUSTERIP firewall extension in use here is, at the same time, ++marked deprecated, yet said "legacy" layer can make it workable, ++literally, to this day) with "netfilter" one (as in "iptables-nft"). ++In that case, you should explicitly set clone-node-max >= 2, + and/or clone-max < number of nodes. In case of node failure, + clone instances need to be re-allocated on surviving nodes. +-This would not be possible if there is already an instance on those nodes, +-and clone-node-max=1 (which is the default). ++This would not be possible if there is already an instance ++on those nodes, and clone-node-max=1 (which is the default). + + + Manages virtual IPv4 and IPv6 addresses (Linux specific version) +@@ -995,7 +1007,7 @@ ip_start() { + + if [ -n "$IP_CIP" ] && ([ $ip_status = "no" ] || [ $ip_status = "partial2" ]); then + $MODPROBE ip_conntrack +- $IPTABLES -I INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ ++ $IPADDR2_CIP_IPTABLES -I INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ + --new \ + --clustermac $IF_MAC \ + --total-nodes $IP_INC_GLOBAL \ +@@ -1089,7 +1101,7 @@ ip_stop() { + i=1 + while [ $i -le $IP_INC_GLOBAL ]; do + ocf_log info $i +- $IPTABLES -D INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ ++ $IPADDR2_CIP_IPTABLES -D INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ + --new \ + --clustermac $IF_MAC \ + --total-nodes $IP_INC_GLOBAL \ +@@ -1186,8 +1198,11 @@ ip_validate() { + set_send_arp_program + + if [ -n "$IP_CIP" ]; then +- check_binary $IPTABLES +- check_binary $MODPROBE ++ if have_binary "$IPTABLES_LEGACY"; then ++ IPADDR2_CIP_IPTABLES="$IPTABLES_LEGACY" ++ fi ++ check_binary "$IPADDR2_CIP_IPTABLES" ++ check_binary $MODPROBE + fi + + # $BASEIP, $NETMASK, $NIC , $IP_INC_GLOBAL, and $BRDCAST have been checked within ip_init, +diff --git a/heartbeat/ocf-binaries.in b/heartbeat/ocf-binaries.in +index 9439ae170..e9bf95fc2 100644 +--- a/heartbeat/ocf-binaries.in ++++ b/heartbeat/ocf-binaries.in +@@ -26,6 +26,8 @@ export PATH + : ${GREP:=grep} + : ${IFCONFIG:=ifconfig} + : ${IPTABLES:=iptables} ++## for cases that are known not to be serviceable with iptables-nft impl. ++: ${IPTABLES_LEGACY:=iptables-legacy} + : ${IP2UTIL:=ip} + : ${MDADM:=mdadm} + : ${MODPROBE:=modprobe} diff --git a/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch b/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch new file mode 100644 index 0000000..bd84123 --- /dev/null +++ b/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch @@ -0,0 +1,33 @@ +From 18888da3ceef7a56388c89a616485fd8faa392cc Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Thu, 14 Nov 2019 17:52:13 +0800 +Subject: [PATCH] Filesystem: refresh UUID in the start phase + +In the case a fresh filesystem is just created from another node on the +shared storage, is not visible yet. Then try partprobe to refresh +/dev/disk/by-uuid/* up to date. + +Signed-off-by: Roger Zhou +--- + heartbeat/Filesystem | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index e66ddc77f..543986441 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -454,6 +454,14 @@ Filesystem_start() + # accordingly + + if [ $blockdevice = "yes" ]; then ++ if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then ++ # In the case a fresh filesystem is just created ++ # from another node on the shared storage, and ++ # is not visible yet. Then try partprobe to ++ # refresh /dev/disk/by-uuid/* up to date. ++ have_binary partprobe && partprobe >/dev/null 2>&1 ++ fi ++ + if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then + ocf_exit_reason "Couldn't find device [$DEVICE]. Expected /dev/??? to exist" + exit $OCF_ERR_INSTALLED diff --git a/SOURCES/bz1777381-Filesystem-2-udev-settle.patch b/SOURCES/bz1777381-Filesystem-2-udev-settle.patch new file mode 100644 index 0000000..fde7f89 --- /dev/null +++ b/SOURCES/bz1777381-Filesystem-2-udev-settle.patch @@ -0,0 +1,124 @@ +From af39017b9333dcbadee2a15f3829667f2b18fb45 Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Fri, 20 Dec 2019 23:28:45 +0800 +Subject: [PATCH 1/2] Filesystem: respect udevd need time to create UUID + symlinks + +To refresh the filesystem UUID, there is a race condition. partprobe +might return before the UUID symlink get created. Particularly, when the +system has many devices, the udev daemon could need visible time to +process the udev event queue. Hence, wait udev for a moment. + +Signed-off-by: Roger Zhou +--- + heartbeat/Filesystem | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 543986441..c21ad5761 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -460,6 +460,10 @@ Filesystem_start() + # is not visible yet. Then try partprobe to + # refresh /dev/disk/by-uuid/* up to date. + have_binary partprobe && partprobe >/dev/null 2>&1 ++ local timeout ++ timeout=${OCF_RESKEY_CRM_meta_timeout:="60000"} ++ timeout=$((timeout/1000)) ++ have_binary udevadm && udevadm settle -t $timeout --exit-if-exists=$DEVICE + fi + + if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then + +From a9fb8077c8201b287ee0486b2a34db4b7d4d8f5d Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Wed, 25 Dec 2019 15:45:03 +0800 +Subject: [PATCH 2/2] Filesystem: add trigger_udev_rules_if_need() for -U, -L, + or /dev/xxx device + +DEVICE parameter of this RA accepts "-U " and "-L + ++ ++ ++Role to use to query/update the route table ++ ++route table query/update role ++ ++ ++ + + + Name of the network interface, i.e. eth0 diff --git a/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch b/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch new file mode 100644 index 0000000..b0e8230 --- /dev/null +++ b/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch @@ -0,0 +1,46 @@ +--- a/heartbeat/Filesystem 2020-06-11 15:49:54.111316780 +0200 ++++ b/heartbeat/Filesystem 2020-06-11 15:53:53.423821158 +0200 +@@ -60,6 +60,21 @@ + # Defaults + DFLT_STATUSDIR=".Filesystem_status/" + ++# Parameter defaults ++ ++OCF_RESKEY_fstype_default="" ++OCF_RESKEY_fast_stop_default="yes" ++ ++: ${OCF_RESKEY_fstype=${OCF_RESKEY_fstype_default}} ++if [ -z "${OCF_RESKEY_fast_stop}" ]; then ++ case "$OCF_RESKEY_fstype" in ++ gfs2) ++ OCF_RESKEY_fast_stop="no";; ++ *) ++ OCF_RESKEY_fast_stop=${OCF_RESKEY_fast_stop_default};; ++ esac ++fi ++ + # Variables used by multiple methods + HOSTOS=`uname` + +@@ -135,7 +150,7 @@ + The type of filesystem to be mounted. + + filesystem type +- ++ + + + +@@ -178,9 +193,11 @@ + users easily and want to prevent the stop action from failing, + then set this parameter to "no" and add an appropriate timeout + for the stop operation. ++ ++This defaults to "no" for GFS2 filesystems. + + fast stop +- ++ + + + diff --git a/SOURCES/bz1817432-use-safe-temp-file-location.patch b/SOURCES/bz1817432-use-safe-temp-file-location.patch new file mode 100644 index 0000000..0149d72 --- /dev/null +++ b/SOURCES/bz1817432-use-safe-temp-file-location.patch @@ -0,0 +1,44 @@ +diff -uNr a/heartbeat/ClusterMon b/heartbeat/ClusterMon +--- a/heartbeat/ClusterMon 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/ClusterMon 2020-03-27 12:09:23.636845893 +0100 +@@ -86,7 +86,7 @@ + PID file location to ensure only one instance is running + + PID file +- ++ + + + +@@ -94,7 +94,7 @@ + Location to write HTML output to. + + HTML output +- ++ + + + +@@ -233,8 +233,8 @@ + fi + + : ${OCF_RESKEY_update:="15000"} +-: ${OCF_RESKEY_pidfile:="/tmp/ClusterMon_${OCF_RESOURCE_INSTANCE}.pid"} +-: ${OCF_RESKEY_htmlfile:="/tmp/ClusterMon_${OCF_RESOURCE_INSTANCE}.html"} ++: ${OCF_RESKEY_pidfile:="${HA_RSCTMP}/ClusterMon_${OCF_RESOURCE_INSTANCE}.pid"} ++: ${OCF_RESKEY_htmlfile:="${HA_RSCTMP}/ClusterMon_${OCF_RESOURCE_INSTANCE}.html"} + + OCF_RESKEY_update=`expr $OCF_RESKEY_update / 1000` + +diff -uNr a/heartbeat/sapdb-nosha.sh b/heartbeat/sapdb-nosha.sh +--- a/heartbeat/sapdb-nosha.sh 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/sapdb-nosha.sh 2020-03-27 12:07:16.183958164 +0100 +@@ -740,5 +740,5 @@ + } + + # Set a tempfile and make sure to clean it up again +-TEMPFILE="/tmp/SAPDatabase.$$.tmp" +-trap trap_handler INT TERM +\ No newline at end of file ++TEMPFILE="${HA_RSCTMP}/SAPDatabase.$$.tmp" ++trap trap_handler INT TERM diff --git a/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch b/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch new file mode 100644 index 0000000..2b025c5 --- /dev/null +++ b/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch @@ -0,0 +1,23 @@ +From bb9e54cdac71a1f26aa626d234e38c8ae8417e9f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 26 Mar 2020 16:26:14 +0100 +Subject: [PATCH] ocf-shellfuncs: fix ocf_is_clone() (clone_max can be 0 with + cloned resources) + +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 7a97558a5..e0eaae1d5 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -557,7 +557,7 @@ ocf_is_probe() { + # defined as a resource where the clone-max meta attribute is present, + # and set to greater than zero. + ocf_is_clone() { +- [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] && [ "${OCF_RESKEY_CRM_meta_clone_max}" -gt 0 ] ++ [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] + } + + # returns true if the resource is configured as a multistate diff --git a/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch b/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch new file mode 100644 index 0000000..4b9be99 --- /dev/null +++ b/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch @@ -0,0 +1,24 @@ +From 420e55da2eb542b35fe8af5d05496b129cd190d5 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 27 Mar 2020 08:44:12 +0100 +Subject: [PATCH] ocf-shellfuncs: ocf_is_clone: update comment based on + clone-max fix in previous commit + +--- + heartbeat/ocf-shellfuncs.in | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index e0eaae1d5..c4d40e382 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -554,8 +554,7 @@ ocf_is_probe() { + } + + # returns true if the resource is configured as a clone. This is +-# defined as a resource where the clone-max meta attribute is present, +-# and set to greater than zero. ++# defined as a resource where the clone-max meta attribute is present. + ocf_is_clone() { + [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] + } diff --git a/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch b/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch new file mode 100644 index 0000000..e470d50 --- /dev/null +++ b/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch @@ -0,0 +1,48 @@ +From dbd45cb5fcce0a3378f6ecd0c14b578e6f843e3d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 24 Jul 2020 16:03:20 +0200 +Subject: [PATCH 1/2] nfsserver: fix SELinux issue due to newer ls versions + giving additional output. + +This patch has been tested on RHEL6, 7 and 8. +--- + heartbeat/nfsserver | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 0dbc173f3..80d20676b 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -192,7 +192,7 @@ fi + which restorecon > /dev/null 2>&1 && selinuxenabled + SELINUX_ENABLED=$? + if [ $SELINUX_ENABLED -eq 0 ]; then +- export SELINUX_LABEL="$(ls -ldZ $STATD_PATH | cut -f4 -d' ')" ++ export SELINUX_LABEL="$(ls -dZ $STATD_PATH | grep -o '\S\+:\S\+:\S\+')" + fi + + ## + +From 81118dbb25fe2cfc7d5fc803178cd7c3b445915e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:33:44 +0200 +Subject: [PATCH 2/2] nfsnotify: fix SELinux issue due to newer ls versions + giving additional output. + +--- + heartbeat/nfsnotify.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in +index 7f710bca7..851f6ad6b 100644 +--- a/heartbeat/nfsnotify.in ++++ b/heartbeat/nfsnotify.in +@@ -305,7 +305,7 @@ esac + which restorecon > /dev/null 2>&1 && selinuxenabled + SELINUX_ENABLED=$? + if [ $SELINUX_ENABLED -eq 0 ]; then +- export SELINUX_LABEL="$(ls -ldZ $STATD_PATH | cut -f4 -d' ')" ++ export SELINUX_LABEL="$(ls -dZ $STATD_PATH | grep -o '\S\+:\S\+:\S\+')" + fi + + case $__OCF_ACTION in diff --git a/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch b/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch new file mode 100644 index 0000000..b3efdce --- /dev/null +++ b/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch @@ -0,0 +1,43 @@ +From 47dd1d16f08de06d512f9e04c3966c35f0ac4d3f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 27 May 2020 13:05:57 +0200 +Subject: [PATCH] nfsserver: fix NFSv4-only support + +When disabling NFSv2 and NFSv3 mountd doesnt register with rpcbind, but +it's still running. This patch checks that mountd is running instead of +basing its status on it being registered w/rpcbind. +--- + heartbeat/nfsserver | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index acef0147a..9e6e1fcb1 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -316,7 +316,7 @@ nfsserver_systemd_monitor() + fi + + ocf_log debug "Status: nfs-mountd" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -ne "0" ]; then + ocf_exit_reason "nfs-mountd is not running" +@@ -683,7 +683,7 @@ nfsserver_start () + local i=1 + while : ; do + ocf_log info "Start: nfs-mountd i: $i" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -eq "0" ]; then + break; +@@ -800,7 +800,7 @@ nfsserver_stop () + + nfs_exec stop nfs-mountd > /dev/null 2>&1 + ocf_log info "Stop: nfs-mountd" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -eq "0" ]; then + ocf_exit_reason "Failed to stop nfs-mountd" diff --git a/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch b/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch new file mode 100644 index 0000000..085a39a --- /dev/null +++ b/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch @@ -0,0 +1,34 @@ +From 290741f43ff414630f558ee3432e830e39d1599d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 22 Jul 2020 11:56:32 +0200 +Subject: [PATCH] nfsserver: stop nfsdcld if present during stop-action + +--- + heartbeat/nfsserver | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 9e6e1fcb1..0dbc173f3 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -806,6 +806,20 @@ nfsserver_stop () + ocf_exit_reason "Failed to stop nfs-mountd" + return $OCF_ERR_GENERIC + fi ++ ++ if systemctl --no-legend list-unit-files "nfsdcld*" | grep -q nfsdcld; then ++ nfs_exec stop nfsdcld > /dev/null 2>&1 ++ ocf_log info "Stop: nfsdcld" ++ fn=`mktemp` ++ nfs_exec status nfsdcld > $fn 2>&1 ++ rc=$? ++ ocf_log debug "$(cat $fn)" ++ rm -f $fn ++ if [ "$rc" -eq "0" ]; then ++ ocf_exit_reason "Failed to stop nfsdcld" ++ return $OCF_ERR_GENERIC ++ fi ++ fi + esac + + diff --git a/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch b/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch new file mode 100644 index 0000000..85355b3 --- /dev/null +++ b/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch @@ -0,0 +1,24 @@ +From 390d1cb8b057ef0e6869fb57dc1e6b6997af49f0 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 3 Apr 2020 16:10:04 +0200 +Subject: [PATCH] aws-vpc-move-ip: delete remaining route entries + +--- + heartbeat/aws-vpc-move-ip | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 97a467217..26ca6007d 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -256,6 +256,10 @@ ec2ip_drop() { + return $OCF_ERR_GENERIC + fi + ++ # delete remaining route-entries if any ++ ip route show to exact ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface | xargs -r ip route delete ++ ip route show table local to exact ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface | xargs -r ip route delete ++ + return $OCF_SUCCESS + } + diff --git a/SOURCES/bz1819965-1-ocf.py-update.patch b/SOURCES/bz1819965-1-ocf.py-update.patch new file mode 100644 index 0000000..e94deb7 --- /dev/null +++ b/SOURCES/bz1819965-1-ocf.py-update.patch @@ -0,0 +1,357 @@ +--- a/heartbeat/ocf.py 2020-04-08 13:03:20.543477544 +0200 ++++ b/heartbeat/ocf.py 2020-04-06 10:23:45.950913519 +0200 +@@ -88,6 +88,10 @@ + + OCF_RESOURCE_INSTANCE = env.get("OCF_RESOURCE_INSTANCE") + ++OCF_ACTION = env.get("__OCF_ACTION") ++if OCF_ACTION is None and len(argv) == 2: ++ OCF_ACTION = argv[1] ++ + HA_DEBUG = env.get("HA_debug", 0) + HA_DATEFMT = env.get("HA_DATEFMT", "%b %d %T ") + HA_LOGFACILITY = env.get("HA_LOGFACILITY") +@@ -135,3 +139,343 @@ + log.addHandler(dfh) + + logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ ++ ++_exit_reason_set = False ++ ++def ocf_exit_reason(msg): ++ """ ++ Print exit error string to stderr. ++ ++ Allows the OCF agent to provide a string describing ++ why the exit code was returned. ++ """ ++ global _exit_reason_set ++ cookie = env.get("OCF_EXIT_REASON_PREFIX", "ocf-exit-reason:") ++ sys.stderr.write("{}{}\n".format(cookie, msg)) ++ sys.stderr.flush() ++ logger.error(msg) ++ _exit_reason_set = True ++ ++ ++def have_binary(name): ++ """ ++ True if binary exists, False otherwise. ++ """ ++ def _access_check(fn): ++ return (os.path.exists(fn) and ++ os.access(fn, os.F_OK | os.X_OK) and ++ not os.path.isdir(fn)) ++ if _access_check(name): ++ return True ++ path = env.get("PATH", os.defpath).split(os.pathsep) ++ seen = set() ++ for dir in path: ++ dir = os.path.normcase(dir) ++ if dir not in seen: ++ seen.add(dir) ++ name2 = os.path.join(dir, name) ++ if _access_check(name2): ++ return True ++ return False ++ ++ ++def is_true(val): ++ """ ++ Convert an OCF truth value to a ++ Python boolean. ++ """ ++ return val in ("yes", "true", "1", 1, "YES", "TRUE", "ja", "on", "ON", True) ++ ++ ++def is_probe(): ++ """ ++ A probe is defined as a monitor operation ++ with an interval of zero. This is called ++ by Pacemaker to check the status of a possibly ++ not running resource. ++ """ ++ return (OCF_ACTION == "monitor" and ++ env.get("OCF_RESKEY_CRM_meta_interval", "") == "0") ++ ++ ++def get_parameter(name, default=None): ++ """ ++ Extract the parameter value from the environment ++ """ ++ return env.get("OCF_RESKEY_{}".format(name), default) ++ ++ ++def distro(): ++ """ ++ Return name of distribution/platform. ++ ++ If possible, returns "name/version", else ++ just "name". ++ """ ++ import subprocess ++ import platform ++ try: ++ ret = subprocess.check_output(["lsb_release", "-si"]) ++ if type(ret) != str: ++ ret = ret.decode() ++ distro = ret.strip() ++ ret = subprocess.check_output(["lsb_release", "-sr"]) ++ if type(ret) != str: ++ ret = ret.decode() ++ version = ret.strip() ++ return "{}/{}".format(distro, version) ++ except Exception: ++ if os.path.exists("/etc/debian_version"): ++ return "Debian" ++ if os.path.exists("/etc/SuSE-release"): ++ return "SUSE" ++ if os.path.exists("/etc/redhat-release"): ++ return "Redhat" ++ return platform.system() ++ ++ ++class Parameter(object): ++ def __init__(self, name, shortdesc, longdesc, content_type, unique, required, default): ++ self.name = name ++ self.shortdesc = shortdesc ++ self.longdesc = longdesc ++ self.content_type = content_type ++ self.unique = unique ++ self.required = required ++ self.default = default ++ ++ def __str__(self): ++ return self.to_xml() ++ ++ def to_xml(self): ++ ret = '' + "\n" ++ ret += '' + self.shortdesc + '' + "\n" ++ ret += ' ++ ++ ++1.0 ++ ++{longdesc} ++ ++{shortdesc} ++ ++ ++{parameters} ++ ++ ++ ++{actions} ++ ++ ++ ++""".format(name=self.name, ++ longdesc=self.longdesc, ++ shortdesc=self.shortdesc, ++ parameters="".join(p.to_xml() for p in self.parameters), ++ actions="".join(a.to_xml() for a in self.actions)) ++ ++ def run(self): ++ run(self) ++ ++ ++def run(agent, handlers=None): ++ """ ++ Main loop implementation for resource agents. ++ Does not return. ++ ++ Arguments: ++ ++ agent: Agent object. ++ ++ handlers: Dict of action name to handler function. ++ ++ Handler functions can take parameters as arguments, ++ the run loop will read parameter values from the ++ environment and pass to the handler. ++ """ ++ import inspect ++ ++ agent._handlers.update(handlers or {}) ++ handlers = agent._handlers ++ ++ def check_required_params(): ++ for p in agent.parameters: ++ if p.required and get_parameter(p.name) is None: ++ ocf_exit_reason("{}: Required parameter not set".format(p.name)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ def call_handler(func): ++ if hasattr(inspect, 'signature'): ++ params = inspect.signature(func).parameters.keys() ++ else: ++ params = inspect.getargspec(func).args ++ def value_for_parameter(param): ++ val = get_parameter(param) ++ if val is not None: ++ return val ++ for p in agent.parameters: ++ if p.name == param: ++ return p.default ++ arglist = [value_for_parameter(p) for p in params] ++ try: ++ rc = func(*arglist) ++ if rc is None: ++ rc = OCF_SUCCESS ++ return rc ++ except Exception as err: ++ if not _exit_reason_set: ++ ocf_exit_reason(str(err)) ++ else: ++ logger.error(str(err)) ++ return OCF_ERR_GENERIC ++ ++ meta_data_action = False ++ for action in agent.actions: ++ if action.name == "meta-data": ++ meta_data_action = True ++ break ++ if not meta_data_action: ++ agent.add_action("meta-data", timeout=10) ++ ++ if len(sys.argv) == 2 and sys.argv[1] in ("-h", "--help"): ++ sys.stdout.write("usage: %s {%s}\n\n" % (sys.argv[0], "|".join(sorted(handlers.keys()))) + ++ "Expects to have a fully populated OCF RA compliant environment set.\n") ++ sys.exit(OCF_SUCCESS) ++ ++ if OCF_ACTION is None: ++ ocf_exit_reason("No action argument set") ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ if OCF_ACTION in ('meta-data', 'usage', 'methods'): ++ sys.stdout.write(agent.to_xml() + "\n") ++ sys.exit(OCF_SUCCESS) ++ ++ check_required_params() ++ if OCF_ACTION in handlers: ++ rc = call_handler(handlers[OCF_ACTION]) ++ sys.exit(rc) ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ ++ ++if __name__ == "__main__": ++ import unittest ++ ++ class TestMetadata(unittest.TestCase): ++ def test_noparams_noactions(self): ++ m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc") ++ self.assertEqual(""" ++ ++ ++1.0 ++ ++longdesc ++ ++shortdesc ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++""", str(m)) ++ ++ def test_params_actions(self): ++ m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc") ++ m.add_parameter("testparam") ++ m.add_action("start") ++ self.assertEqual(str(m.actions[0]), '\n') ++ ++ unittest.main() diff --git a/SOURCES/bz1819965-2-azure-events.patch b/SOURCES/bz1819965-2-azure-events.patch new file mode 100644 index 0000000..220d2ba --- /dev/null +++ b/SOURCES/bz1819965-2-azure-events.patch @@ -0,0 +1,1060 @@ +diff -uNr a/configure.ac b/configure.ac +--- a/configure.ac 2020-04-16 11:54:08.466619607 +0200 ++++ b/configure.ac 2020-04-16 12:05:17.241352586 +0200 +@@ -30,6 +30,8 @@ + PKG_FEATURES="" + + AC_CONFIG_AUX_DIR(.) ++AC_CONFIG_MACRO_DIR([m4]) ++ + AC_CANONICAL_HOST + + dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) +@@ -72,6 +74,11 @@ + [AC_MSG_ERROR([systemd support requested but pkg-config unable to query systemd package])]) + with_systemdsystemunitdir=no], + [with_systemdsystemunitdir="$def_systemdsystemunitdir"])]) ++if test "x$with_systemdsystemunitdir" != "xno" && \ ++ test "x${prefix}" != "xNONE" && \ ++ test "x${prefix}" != "x/usr"; then ++ with_systemdsystemunitdir="${prefix}/$with_systemdsystemunitdir" ++fi + AS_IF([test "x$with_systemdsystemunitdir" != "xno"], + [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])]) + AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"]) +@@ -79,6 +86,11 @@ + AC_ARG_WITH([systemdtmpfilesdir], + AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]), + [], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)]) ++ if test "x$with_systemdtmpfilesdir" != xno && \ ++ test "x${prefix}" != "xNONE" && \ ++ test "x${prefix}" != "x/usr"; then ++ with_systemdtmpfilesdir="${prefix}/$with_systemdtmpfilesdir" ++ fi + if test "x$with_systemdtmpfilesdir" != xno; then + AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir]) + fi +@@ -501,12 +513,35 @@ + AC_SUBST(RM) + AC_SUBST(TEST) + ++dnl Ensure PYTHON is an absolute path ++AC_PATH_PROG([PYTHON], [$PYTHON]) ++ + AM_PATH_PYTHON + if test -z "$PYTHON"; then + echo "*** Essential program python not found" 1>&2 +- exit 1 + fi + ++AC_PYTHON_MODULE(googleapiclient) ++AC_PYTHON_MODULE(pyroute2) ++ ++AS_VERSION_COMPARE([$PYTHON_VERSION], [2.7], [BUILD_OCF_PY=0], [BUILD_OCF_PY=1], [BUILD_OCF_PY=1]) ++ ++BUILD_AZURE_EVENTS=1 ++if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then ++ BUILD_AZURE_EVENTS=0 ++ AC_MSG_WARN("Not building azure-events") ++fi ++AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) ++ ++BUILD_GCP_PD_MOVE=1 ++AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1) ++ ++BUILD_GCP_VPC_MOVE_ROUTE=1 ++AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1) ++ ++BUILD_GCP_VPC_MOVE_VIP=1 ++AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1) ++ + AC_PATH_PROGS(ROUTE, route) + AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) + +@@ -541,6 +576,12 @@ + if test x"${STYLESHEET_PREFIX}" = x""; then + DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ + -type d | LC_ALL=C sort) ++ if test x"${DIRS}" = x""; then ++ # when datadir is not standard OS path, we cannot find docbook.xsl ++ # use standard OS path as backup ++ DIRS=$(find "/usr/share" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ ++ -type d | LC_ALL=C sort) ++ fi + XSLT=$(basename ${DOCBOOK_XSL_PATH}) + for d in ${DIRS}; do + if test -f "${d}/${XSLT}"; then +@@ -948,6 +989,7 @@ + ) + + dnl Files we output that need to be executable ++AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) + AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) + AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) + AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) +@@ -1021,7 +1063,7 @@ + AC_MSG_RESULT([]) + AC_MSG_RESULT([$PACKAGE configuration:]) + AC_MSG_RESULT([ Version = ${VERSION}]) +-AC_MSG_RESULT([ Build Version = e711383fd5c7bef9c24ff6bc85465e59f91080f9]) ++AC_MSG_RESULT([ Build Version = $Format:%H$]) + AC_MSG_RESULT([ Features =${PKG_FEATURES}]) + AC_MSG_RESULT([]) + AC_MSG_RESULT([ Prefix = ${prefix}]) +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2020-04-16 11:54:08.466619607 +0200 ++++ b/doc/man/Makefile.am 2020-04-16 12:08:34.913726440 +0200 +@@ -55,7 +55,7 @@ + # 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was + # first reported in 1995 and added to Savannah in in 2005... + if BUILD_DOC +-man_MANS = ocf_heartbeat_AoEtarget.7 \ ++man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_AudibleAlarm.7 \ + ocf_heartbeat_ClusterMon.7 \ + ocf_heartbeat_CTDB.7 \ +@@ -183,6 +183,22 @@ + man_MANS += ocf_heartbeat_IPv6addr.7 + endif + ++if BUILD_AZURE_EVENTS ++man_MANS += ocf_heartbeat_azure-events.7 ++endif ++ ++if BUILD_GCP_PD_MOVE ++man_MANS += ocf_heartbeat_gcp-pd-move.7 ++endif ++ ++if BUILD_GCP_VPC_MOVE_ROUTE ++man_MANS += ocf_heartbeat_gcp-vpc-move-route.7 ++endif ++ ++if BUILD_GCP_VPC_MOVE_VIP ++man_MANS += ocf_heartbeat_gcp-vpc-move-vip.7 ++endif ++ + xmlfiles = $(man_MANS:.7=.xml) + + %.1 %.5 %.7 %.8: %.xml +diff -uNr a/heartbeat/azure-events.in b/heartbeat/azure-events.in +--- a/heartbeat/azure-events.in 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/azure-events.in 2020-04-16 12:02:15.114693551 +0200 +@@ -0,0 +1,824 @@ ++#!@PYTHON@ -tt ++# ++# Resource agent for monitoring Azure Scheduled Events ++# ++# License: GNU General Public License (GPL) ++# (c) 2018 Tobias Niekamp, Microsoft Corp. ++# and Linux-HA contributors ++ ++import os ++import sys ++import time ++import subprocess ++import json ++try: ++ import urllib2 ++except ImportError: ++ import urllib.request as urllib2 ++import socket ++from collections import defaultdict ++ ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) ++sys.path.append(OCF_FUNCTIONS_DIR) ++import ocf ++ ++############################################################################## ++ ++ ++VERSION = "0.10" ++USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro()) ++ ++attr_globalPullState = "azure-events_globalPullState" ++attr_lastDocVersion = "azure-events_lastDocVersion" ++attr_curNodeState = "azure-events_curNodeState" ++attr_pendingEventIDs = "azure-events_pendingEventIDs" ++ ++default_loglevel = ocf.logging.INFO ++default_relevantEventTypes = set(["Reboot", "Redeploy"]) ++ ++global_pullMaxAttempts = 3 ++global_pullDelaySecs = 1 ++ ++############################################################################## ++ ++class attrDict(defaultdict): ++ """ ++ A wrapper for accessing dict keys like an attribute ++ """ ++ def __init__(self, data): ++ super(attrDict, self).__init__(attrDict) ++ for d in data.keys(): ++ self.__setattr__(d, data[d]) ++ ++ def __getattr__(self, key): ++ try: ++ return self[key] ++ except KeyError: ++ raise AttributeError(key) ++ ++ def __setattr__(self, key, value): ++ self[key] = value ++ ++############################################################################## ++ ++class azHelper: ++ """ ++ Helper class for Azure's metadata API (including Scheduled Events) ++ """ ++ metadata_host = "http://169.254.169.254/metadata" ++ instance_api = "instance" ++ events_api = "scheduledevents" ++ api_version = "2017-08-01" ++ ++ @staticmethod ++ def _sendMetadataRequest(endpoint, postData=None): ++ """ ++ Send a request to Azure's Azure Metadata Service API ++ """ ++ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) ++ ocf.logger.debug("_sendMetadataRequest: url = %s" % url) ++ ++ req = urllib2.Request(url, postData) ++ req.add_header("Metadata", "true") ++ req.add_header("User-Agent", USER_AGENT) ++ resp = urllib2.urlopen(req) ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ if data: ++ data = json.loads(data) ++ ++ ocf.logger.debug("_sendMetadataRequest: finished") ++ return data ++ ++ @staticmethod ++ def getInstanceInfo(): ++ """ ++ Fetch details about the current VM from Azure's Azure Metadata Service API ++ """ ++ ocf.logger.debug("getInstanceInfo: begin") ++ ++ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) ++ ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) ++ ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ ++ @staticmethod ++ def pullScheduledEvents(): ++ """ ++ Retrieve all currently scheduled events via Azure Metadata Service API ++ """ ++ ocf.logger.debug("pullScheduledEvents: begin") ++ ++ jsondata = azHelper._sendMetadataRequest(azHelper.events_api) ++ ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata) ++ ++ ocf.logger.debug("pullScheduledEvents: finished") ++ return attrDict(jsondata) ++ ++ @staticmethod ++ def forceEvents(eventIDs): ++ """ ++ Force a set of events to start immediately ++ """ ++ ocf.logger.debug("forceEvents: begin") ++ ++ events = [] ++ for e in eventIDs: ++ events.append({ ++ "EventId": e, ++ }) ++ postData = { ++ "StartRequests" : events ++ } ++ ocf.logger.info("forceEvents: postData = %s" % postData) ++ resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData)) ++ ++ ocf.logger.debug("forceEvents: finished") ++ return ++ ++############################################################################## ++ ++class clusterHelper: ++ """ ++ Helper functions for Pacemaker control via crm ++ """ ++ @staticmethod ++ def _getLocation(node): ++ """ ++ Helper function to retrieve local/global attributes ++ """ ++ if node: ++ return ["--node", node] ++ else: ++ return ["--type", "crm_config"] ++ ++ @staticmethod ++ def _exec(command, *args): ++ """ ++ Helper function to execute a UNIX command ++ """ ++ args = list(args) ++ ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args))) ++ ++ def flatten(*n): ++ return (str(e) for a in n ++ for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),))) ++ command = list(flatten([command] + args)) ++ ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) ++ try: ++ ret = subprocess.check_output(command) ++ ocf.logger.debug("_exec: return = %s" % ret) ++ return ret.rstrip() ++ except Exception as err: ++ ocf.logger.exception(err) ++ return None ++ ++ @staticmethod ++ def setAttr(key, value, node=None): ++ """ ++ Set the value of a specific global/local attribute in the Pacemaker cluster ++ """ ++ ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node)) ++ ++ if value: ++ ret = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--update", value, ++ clusterHelper._getLocation(node)) ++ else: ++ ret = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--delete", ++ clusterHelper._getLocation(node)) ++ ++ ocf.logger.debug("setAttr: finished") ++ return len(ret) == 0 ++ ++ @staticmethod ++ def getAttr(key, node=None): ++ """ ++ Retrieve a global/local attribute from the Pacemaker cluster ++ """ ++ ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node)) ++ ++ val = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--query", "--quiet", ++ "--default", "", ++ clusterHelper._getLocation(node)) ++ ocf.logger.debug("getAttr: finished") ++ if not val: ++ return None ++ return val if not val.isdigit() else int(val) ++ ++ @staticmethod ++ def getAllNodes(): ++ """ ++ Get a list of hostnames for all nodes in the Pacemaker cluster ++ """ ++ ocf.logger.debug("getAllNodes: begin") ++ ++ nodes = [] ++ nodeList = clusterHelper._exec("crm_node", "--list") ++ for n in nodeList.decode().split("\n"): ++ nodes.append(n.split()[1]) ++ ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) ++ ++ return nodes ++ ++ @staticmethod ++ def getHostNameFromAzName(azName): ++ """ ++ Helper function to get the actual host name from an Azure node name ++ """ ++ return clusterHelper.getAttr("hostName_%s" % azName) ++ ++ @staticmethod ++ def removeHoldFromNodes(): ++ """ ++ Remove the ON_HOLD state from all nodes in the Pacemaker cluster ++ """ ++ ocf.logger.debug("removeHoldFromNodes: begin") ++ ++ for n in clusterHelper.getAllNodes(): ++ if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD": ++ clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n) ++ ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n) ++ ++ ocf.logger.debug("removeHoldFromNodes: finished") ++ return False ++ ++ @staticmethod ++ def otherNodesAvailable(exceptNode): ++ """ ++ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE ++ """ ++ ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode) ++ ++ for n in clusterHelper.getAllNodes(): ++ state = clusterHelper.getAttr(attr_curNodeState, node=n) ++ state = stringToNodeState(state) if state else AVAILABLE ++ if state == AVAILABLE and n != exceptNode.hostName: ++ ocf.logger.info("otherNodesAvailable: at least %s is available" % n) ++ ocf.logger.debug("otherNodesAvailable: finished") ++ return True ++ ocf.logger.info("otherNodesAvailable: no other nodes are available") ++ ocf.logger.debug("otherNodesAvailable: finished") ++ ++ return False ++ ++ @staticmethod ++ def transitionSummary(): ++ """ ++ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) ++ """ ++ # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? ++ # # crm_simulate -Ls ++ # Transition Summary: ++ # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) ++ # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) ++ # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1) ++ # * Start rsc_nc_HN1_HDB03 (hsr3-db1) ++ # # Excepted result when there are no pending actions: ++ # Transition Summary: ++ ocf.logger.debug("transitionSummary: begin") ++ ++ summary = clusterHelper._exec("crm_simulate", "-Ls") ++ if not summary: ++ ocf.logger.warning("transitionSummary: could not load transition summary") ++ return False ++ if summary.find("Transition Summary:") < 0: ++ ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) ++ return False ++ summary = summary.split("Transition Summary:")[1] ++ ret = summary.decode().split("\n").pop(0) ++ ++ ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) ++ return ret ++ ++ @staticmethod ++ def listOperationsOnNode(node): ++ """ ++ Get a list of all current operations for a given node (used to check if any resources are pending) ++ """ ++ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0 ++ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete ++ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete ++ # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending ++ # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete ++ ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node) ++ ++ resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node) ++ if len(resources) == 0: ++ ret = [] ++ else: ++ ret = resources.decode().split("\n") ++ ++ ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) ++ return ret ++ ++ @staticmethod ++ def noPendingResourcesOnNode(node): ++ """ ++ Check that there are no pending resources on a given node ++ """ ++ ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node) ++ ++ for r in clusterHelper.listOperationsOnNode(node): ++ ocf.logger.debug("noPendingResourcesOnNode: * %s" % r) ++ resource = r.split()[-1] ++ if resource == "pending": ++ ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource) ++ ocf.logger.debug("noPendingResourcesOnNode: finished; return = False") ++ return False ++ ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node) ++ ocf.logger.debug("noPendingResourcesOnNode: finished; return = True") ++ ++ return True ++ ++ @staticmethod ++ def allResourcesStoppedOnNode(node): ++ """ ++ Check that all resources on a given node are stopped ++ """ ++ ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node) ++ ++ if clusterHelper.noPendingResourcesOnNode(node): ++ if len(clusterHelper.transitionSummary()) == 0: ++ ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node) ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True") ++ return True ++ ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty") ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") ++ return False ++ ++ ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node) ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") ++ return False ++ ++############################################################################## ++ ++AVAILABLE = 0 # Node is online and ready to handle events ++STOPPING = 1 # Standby has been triggered, but some resources are still running ++IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service ++ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available ++ ++def stringToNodeState(name): ++ if type(name) == int: return name ++ if name == "STOPPING": return STOPPING ++ if name == "IN_EVENT": return IN_EVENT ++ if name == "ON_HOLD": return ON_HOLD ++ return AVAILABLE ++ ++def nodeStateToString(state): ++ if state == STOPPING: return "STOPPING" ++ if state == IN_EVENT: return "IN_EVENT" ++ if state == ON_HOLD: return "ON_HOLD" ++ return "AVAILABLE" ++ ++############################################################################## ++ ++class Node: ++ """ ++ Core class implementing logic for a cluster node ++ """ ++ def __init__(self, ra): ++ self.raOwner = ra ++ self.azInfo = azHelper.getInstanceInfo() ++ self.azName = self.azInfo.name ++ self.hostName = socket.gethostname() ++ self.setAttr("azName", self.azName) ++ clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName) ++ ++ def getAttr(self, key): ++ """ ++ Get a local attribute ++ """ ++ return clusterHelper.getAttr(key, node=self.hostName) ++ ++ def setAttr(self, key, value): ++ """ ++ Set a local attribute ++ """ ++ return clusterHelper.setAttr(key, value, node=self.hostName) ++ ++ def selfOrOtherNode(self, node): ++ """ ++ Helper function to distinguish self/other node ++ """ ++ return node if node else self.hostName ++ ++ def setState(self, state, node=None): ++ """ ++ Set the state for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state))) ++ ++ clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node) ++ ++ ocf.logger.debug("setState: finished") ++ ++ def getState(self, node=None): ++ """ ++ Get the state for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("getState: begin; node = %s" % node) ++ ++ state = clusterHelper.getAttr(attr_curNodeState, node=node) ++ ocf.logger.debug("getState: state = %s" % state) ++ ocf.logger.debug("getState: finished") ++ if not state: ++ return AVAILABLE ++ return stringToNodeState(state) ++ ++ def setEventIDs(self, eventIDs, node=None): ++ """ ++ Set pending EventIDs for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs))) ++ ++ if eventIDs: ++ eventIDStr = ",".join(eventIDs) ++ else: ++ eventIDStr = None ++ clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node) ++ ++ ocf.logger.debug("setEventIDs: finished") ++ return ++ ++ def getEventIDs(self, node=None): ++ """ ++ Get pending EventIDs for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("getEventIDs: begin; node = %s" % node) ++ ++ eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) ++ if eventIDStr: ++ eventIDs = eventIDStr.decode().split(",") ++ else: ++ eventIDs = None ++ ++ ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs)) ++ return eventIDs ++ ++ def updateNodeStateAndEvents(self, state, eventIDs, node=None): ++ """ ++ Set the state and pending EventIDs for a given node (or self) ++ """ ++ ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs))) ++ ++ self.setState(state, node=node) ++ self.setEventIDs(eventIDs, node=node) ++ ++ ocf.logger.debug("updateNodeStateAndEvents: finished") ++ return state ++ ++ def putNodeStandby(self, node=None): ++ """ ++ Put self to standby ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("putNodeStandby: begin; node = %s" % node) ++ ++ clusterHelper._exec("crm_attribute", ++ "-t", "nodes", ++ "-N", node, ++ "-n", "standby", ++ "-v", "on", ++ "--lifetime=forever") ++ ++ ocf.logger.debug("putNodeStandby: finished") ++ ++ def putNodeOnline(self, node=None): ++ """ ++ Put self back online ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("putNodeOnline: begin; node = %s" % node) ++ ++ clusterHelper._exec("crm_attribute", ++ "-t", "nodes", ++ "-N", node, ++ "-n", "standby", ++ "-v", "off", ++ "--lifetime=forever") ++ ++ ocf.logger.debug("putNodeOnline: finished") ++ ++ def separateEvents(self, events): ++ """ ++ Split own/other nodes' events ++ """ ++ ocf.logger.debug("separateEvents: begin; events = %s" % str(events)) ++ ++ localEvents = [] ++ remoteEvents = [] ++ for e in events: ++ e = attrDict(e) ++ if e.EventType not in self.raOwner.relevantEventTypes: ++ continue ++ if self.azName in e.Resources: ++ localEvents.append(e) ++ else: ++ remoteEvents.append(e) ++ ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents))) ++ return (localEvents, remoteEvents) ++ ++ def removeOrphanedEvents(self, azEvents): ++ """ ++ Remove remote events that are already finished ++ """ ++ ocf.logger.debug("removeOrphanedEvents: begin; azEvents = %s" % str(azEvents)) ++ ++ azEventIDs = set() ++ for e in azEvents: ++ azEventIDs.add(e.EventId) ++ # for all nodes except self ... ++ for n in clusterHelper.getAllNodes(): ++ if n == self.hostName: ++ continue ++ curState = self.getState(node=n) ++ # ... that still show in an event or shutting down resources ... ++ if curState in (STOPPING, IN_EVENT): ++ ocf.logger.info("removeOrphanedEvents: node %s has state %s" % (n, curState)) ++ clusterEventIDs = self.getEventIDs(node=n) ++ stillActive = False ++ # ... but don't have any more events running according to Azure, ... ++ for p in clusterEventIDs: ++ if p in azEventIDs: ++ ocf.logger.info("removeOrphanedEvents: (at least) event %s on node %s has not yet finished" % (str(p), n)) ++ stillActive = True ++ break ++ if not stillActive: ++ # ... put them back online. ++ ocf.logger.info("removeOrphanedEvents: clusterEvents %s on node %s are not in azEvents %s -> bring node back online" % (str(clusterEventIDs), n, str(azEventIDs))) ++ self.putNodeOnline(node=n) ++ ++ ocf.logger.debug("removeOrphanedEvents: finished") ++ ++ def handleRemoteEvents(self, azEvents): ++ """ ++ Handle a list of events (as provided by Azure Metadata Service) for other nodes ++ """ ++ ocf.logger.debug("handleRemoteEvents: begin; hostName = %s, events = %s" % (self.hostName, str(azEvents))) ++ ++ if len(azEvents) == 0: ++ ocf.logger.debug("handleRemoteEvents: no remote events to handle") ++ ocf.logger.debug("handleRemoteEvents: finished") ++ return ++ eventIDsForNode = {} ++ ++ # iterate through all current events as per Azure ++ for e in azEvents: ++ ocf.logger.info("handleRemoteEvents: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources))) ++ # before we can force an event to start, we need to ensure all nodes involved have stopped their resources ++ if e.EventStatus == "Scheduled": ++ allNodesStopped = True ++ for azName in e.Resources: ++ hostName = clusterHelper.getHostNameFromAzName(azName) ++ state = self.getState(node=hostName) ++ if state == STOPPING: ++ # the only way we can continue is when node state is STOPPING, but all resources have been stopped ++ if not clusterHelper.allResourcesStoppedOnNode(hostName): ++ ocf.logger.info("handleRemoteEvents: (at least) node %s has still resources running -> wait" % hostName) ++ allNodesStopped = False ++ break ++ elif state in (AVAILABLE, IN_EVENT, ON_HOLD): ++ ocf.logger.info("handleRemoteEvents: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state))) ++ allNodesStopped = False ++ break ++ if allNodesStopped: ++ ocf.logger.info("handleRemoteEvents: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId)) ++ for n in e.Resources: ++ hostName = clusterHelper.getHostNameFromAzName(n) ++ if hostName in eventIDsForNode: ++ eventIDsForNode[hostName].append(e.EventId) ++ else: ++ eventIDsForNode[hostName] = [e.EventId] ++ elif e.EventStatus == "Started": ++ ocf.logger.info("handleRemoteEvents: remote event already started") ++ ++ # force the start of all events whose nodes are ready (i.e. have no more resources running) ++ if len(eventIDsForNode.keys()) > 0: ++ eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist]) ++ ocf.logger.info("handleRemoteEvents: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce))) ++ for node, eventId in eventIDsForNode.items(): ++ self.updateNodeStateAndEvents(IN_EVENT, eventId, node=node) ++ azHelper.forceEvents(eventIDsToForce) ++ ++ ocf.logger.debug("handleRemoteEvents: finished") ++ ++ def handleLocalEvents(self, azEvents): ++ """ ++ Handle a list of own events (as provided by Azure Metadata Service) ++ """ ++ ocf.logger.debug("handleLocalEvents: begin; hostName = %s, azEvents = %s" % (self.hostName, str(azEvents))) ++ ++ azEventIDs = set() ++ for e in azEvents: ++ azEventIDs.add(e.EventId) ++ ++ curState = self.getState() ++ clusterEventIDs = self.getEventIDs() ++ mayUpdateDocVersion = False ++ ocf.logger.info("handleLocalEvents: current state = %s; pending local clusterEvents = %s" % (nodeStateToString(curState), str(clusterEventIDs))) ++ ++ # check if there are currently/still events set for the node ++ if clusterEventIDs: ++ # there are pending events set, so our state must be STOPPING or IN_EVENT ++ i = 0; touchedEventIDs = False ++ while i < len(clusterEventIDs): ++ # clean up pending events that are already finished according to AZ ++ if clusterEventIDs[i] not in azEventIDs: ++ ocf.logger.info("handleLocalEvents: remove finished local clusterEvent %s" % (clusterEventIDs[i])) ++ clusterEventIDs.pop(i) ++ touchedEventIDs = True ++ else: ++ i += 1 ++ if len(clusterEventIDs) > 0: ++ # there are still pending events (either because we're still stopping, or because the event is still in place) ++ # either way, we need to wait ++ if touchedEventIDs: ++ ocf.logger.info("handleLocalEvents: added new local clusterEvent %s" % str(clusterEventIDs)) ++ self.setEventIDs(clusterEventIDs) ++ else: ++ ocf.logger.info("handleLocalEvents: no local clusterEvents were updated") ++ else: ++ # there are no more pending events left after cleanup ++ if clusterHelper.noPendingResourcesOnNode(self.hostName): ++ # and no pending resources on the node -> set it back online ++ ocf.logger.info("handleLocalEvents: all local events finished -> clean up, put node online and AVAILABLE") ++ curState = self.updateNodeStateAndEvents(AVAILABLE, None) ++ self.putNodeOnline() ++ clusterHelper.removeHoldFromNodes() ++ # repeat handleLocalEvents() since we changed status to AVAILABLE ++ else: ++ ocf.logger.info("handleLocalEvents: all local events finished, but some resources have not completed startup yet -> wait") ++ else: ++ # there are no pending events set for us (yet) ++ if curState == AVAILABLE: ++ if len(azEventIDs) > 0: ++ if clusterHelper.otherNodesAvailable(self): ++ ocf.logger.info("handleLocalEvents: can handle local events %s -> set state STOPPING" % (str(azEventIDs))) ++ # this will also set mayUpdateDocVersion = True ++ curState = self.updateNodeStateAndEvents(STOPPING, azEventIDs) ++ else: ++ ocf.logger.info("handleLocalEvents: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(azEventIDs)) ++ self.setState(ON_HOLD) ++ else: ++ ocf.logger.debug("handleLocalEvents: no local azEvents to handle") ++ if curState == STOPPING: ++ if clusterHelper.noPendingResourcesOnNode(self.hostName): ++ ocf.logger.info("handleLocalEvents: all local resources are started properly -> put node standby") ++ self.putNodeStandby() ++ mayUpdateDocVersion = True ++ else: ++ ocf.logger.info("handleLocalEvents: some local resources are not clean yet -> wait") ++ ++ ocf.logger.debug("handleLocalEvents: finished; mayUpdateDocVersion = %s" % str(mayUpdateDocVersion)) ++ return mayUpdateDocVersion ++ ++############################################################################## ++ ++class raAzEvents: ++ """ ++ Main class for resource agent ++ """ ++ def __init__(self, relevantEventTypes): ++ self.node = Node(self) ++ self.relevantEventTypes = relevantEventTypes ++ ++ def monitor(self): ++ ocf.logger.debug("monitor: begin") ++ ++ pullFailedAttemps = 0 ++ while True: ++ # check if another node is pulling at the same time; ++ # this should only be a concern for the first pull, as setting up Scheduled Events may take up to 2 minutes. ++ if clusterHelper.getAttr(attr_globalPullState) == "PULLING": ++ pullFailedAttemps += 1 ++ if pullFailedAttemps == global_pullMaxAttempts: ++ ocf.logger.warning("monitor: exceeded maximum number of attempts (%d) to pull events" % global_pullMaxAttempts) ++ ocf.logger.debug("monitor: finished") ++ return ocf.OCF_SUCCESS ++ else: ++ ocf.logger.info("monitor: another node is pulling; retry in %d seconds" % global_pullDelaySecs) ++ time.sleep(global_pullDelaySecs) ++ continue ++ ++ # we can pull safely from Azure Metadata Service ++ clusterHelper.setAttr(attr_globalPullState, "PULLING") ++ events = azHelper.pullScheduledEvents() ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ ++ # get current document version ++ curDocVersion = events.DocumentIncarnation ++ lastDocVersion = self.node.getAttr(attr_lastDocVersion) ++ ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion)) ++ ++ # split events local/remote ++ (localEvents, remoteEvents) = self.node.separateEvents(events.Events) ++ ++ # ensure local events are only executing once ++ if curDocVersion != lastDocVersion: ++ ocf.logger.debug("monitor: curDocVersion has not been handled yet") ++ # handleLocalEvents() returns True if mayUpdateDocVersion is True; ++ # this is only the case if we can ensure there are no pending events ++ if self.node.handleLocalEvents(localEvents): ++ ocf.logger.info("monitor: handleLocalEvents completed successfully -> update curDocVersion") ++ self.node.setAttr(attr_lastDocVersion, curDocVersion) ++ else: ++ ocf.logger.debug("monitor: handleLocalEvents still waiting -> keep curDocVersion") ++ else: ++ ocf.logger.info("monitor: already handled curDocVersion, skip") ++ ++ # remove orphaned remote events and then handle the remaining remote events ++ self.node.removeOrphanedEvents(remoteEvents) ++ self.node.handleRemoteEvents(remoteEvents) ++ break ++ ++ ocf.logger.debug("monitor: finished") ++ return ocf.OCF_SUCCESS ++ ++############################################################################## ++ ++def setLoglevel(verbose): ++ # set up writing into syslog ++ loglevel = default_loglevel ++ if verbose: ++ opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)) ++ urllib2.install_opener(opener) ++ loglevel = ocf.logging.DEBUG ++ ocf.log.setLevel(loglevel) ++ ++description = ( ++ "Microsoft Azure Scheduled Events monitoring agent", ++ """This resource agent implements a monitor for scheduled ++(maintenance) events for a Microsoft Azure VM. ++ ++If any relevant events are found, it moves all Pacemaker resources ++away from the affected node to allow for a graceful shutdown. ++ ++ Usage: ++ [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events ACTION ++ ++ action (required): Supported values: monitor, help, meta-data ++ eventTypes (optional): List of event types to be considered ++ relevant by the resource agent (comma-separated). ++ Supported values: Freeze,Reboot,Redeploy ++ Default = Reboot,Redeploy ++/ verbose (optional): If set to true, displays debug info. ++ Default = false ++ ++ Deployment: ++ crm configure primitive rsc_azure-events ocf:heartbeat:azure-events \ ++ op monitor interval=10s ++ crm configure clone cln_azure-events rsc_azure-events ++ ++For further information on Microsoft Azure Scheduled Events, please ++refer to the following documentation: ++https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events ++""") ++ ++def monitor_action(eventTypes): ++ relevantEventTypes = set(eventTypes.split(",") if eventTypes else []) ++ ra = raAzEvents(relevantEventTypes) ++ return ra.monitor() ++ ++def validate_action(eventTypes): ++ if eventTypes: ++ for event in eventTypes.split(","): ++ if event not in ("Freeze", "Reboot", "Redeploy"): ++ ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes) ++ return ocf.OCF_ERR_CONFIGURED ++ return ocf.OCF_SUCCESS ++ ++def main(): ++ agent = ocf.Agent("azure-events", shortdesc=description[0], longdesc=description[1]) ++ agent.add_parameter( ++ "eventTypes", ++ shortdesc="List of resources to be considered", ++ longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)", ++ content_type="string", ++ default="Reboot,Redeploy") ++ agent.add_parameter( ++ "verbose", ++ shortdesc="Enable verbose agent logging", ++ longdesc="Set to true to enable verbose logging", ++ content_type="boolean", ++ default="false") ++ agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS) ++ agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS) ++ agent.add_action("validate-all", timeout=20, handler=validate_action) ++ agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action) ++ setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false"))) ++ agent.run() ++ ++if __name__ == '__main__': ++ main() +diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2020-04-16 11:54:08.467619588 +0200 ++++ b/heartbeat/Makefile.am 2020-04-16 12:08:07.788224036 +0200 +@@ -55,7 +55,7 @@ + osp_SCRIPTS = nova-compute-wait \ + NovaEvacuate + +-ocf_SCRIPTS = AoEtarget \ ++ocf_SCRIPTS = AoEtarget \ + AudibleAlarm \ + ClusterMon \ + CTDB \ +@@ -116,10 +116,7 @@ + fio \ + galera \ + garbd \ +- gcp-pd-move \ + gcp-vpc-move-ip \ +- gcp-vpc-move-vip \ +- gcp-vpc-move-route \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +@@ -177,6 +174,22 @@ + vsftpd \ + zabbixserver + ++if BUILD_AZURE_EVENTS ++ocf_SCRIPTS += azure-events ++endif ++ ++if BUILD_GCP_PD_MOVE ++ocf_SCRIPTS += gcp-pd-move ++endif ++ ++if BUILD_GCP_VPC_MOVE_ROUTE ++ocf_SCRIPTS += gcp-vpc-move-route ++endif ++ ++if BUILD_GCP_VPC_MOVE_VIP ++ocf_SCRIPTS += gcp-vpc-move-vip ++endif ++ + ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat + ocfcommon_DATA = ocf-shellfuncs \ + ocf-binaries \ +@@ -205,3 +218,13 @@ + + %.check: % + OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - ++ ++do_spellcheck = printf '[%s]\n' "$(agent)"; \ ++ OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) \ ++ ./$(agent) meta-data 2>/dev/null \ ++ | xsltproc $(top_srcdir)/make/extract_text.xsl - \ ++ | aspell pipe list -d en_US --ignore-case \ ++ --home-dir=$(top_srcdir)/make -p spellcheck-ignore \ ++ | sed -n 's|^&\([^:]*\):.*|\1|p'; ++spellcheck: ++ @$(foreach agent,$(ocf_SCRIPTS), $(do_spellcheck)) +diff -uNr a/m4/ac_python_module.m4 b/m4/ac_python_module.m4 +--- a/m4/ac_python_module.m4 1970-01-01 01:00:00.000000000 +0100 ++++ b/m4/ac_python_module.m4 2020-04-14 11:11:26.325806378 +0200 +@@ -0,0 +1,30 @@ ++dnl @synopsis AC_PYTHON_MODULE(modname[, fatal]) ++dnl ++dnl Checks for Python module. ++dnl ++dnl If fatal is non-empty then absence of a module will trigger an ++dnl error. ++dnl ++dnl @category InstalledPackages ++dnl @author Andrew Collier . ++dnl @version 2004-07-14 ++dnl @license AllPermissive ++ ++AC_DEFUN([AC_PYTHON_MODULE],[ ++ AC_MSG_CHECKING(python module: $1) ++ $PYTHON -c "import $1" 2>/dev/null ++ if test $? -eq 0; ++ then ++ AC_MSG_RESULT(yes) ++ eval AS_TR_CPP(HAVE_PYMOD_$1)=yes ++ else ++ AC_MSG_RESULT(no) ++ eval AS_TR_CPP(HAVE_PYMOD_$1)=no ++ # ++ if test -n "$2" ++ then ++ AC_MSG_ERROR(failed to find required module $1) ++ exit 1 ++ fi ++ fi ++]) diff --git a/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch b/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch new file mode 100644 index 0000000..3c62631 --- /dev/null +++ b/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch @@ -0,0 +1,59 @@ +From 57424bd1f158f1ff597034e09ca90da864925a16 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 16 Jul 2020 09:58:55 +0200 +Subject: [PATCH] azure-events: only decode() when exec() output not of type + str + +--- + heartbeat/azure-events.in | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a48a86309..d4a166d9f 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -179,6 +179,8 @@ class clusterHelper: + ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) + try: + ret = subprocess.check_output(command) ++ if type(ret) != str: ++ ret = ret.decode() + ocf.logger.debug("_exec: return = %s" % ret) + return ret.rstrip() + except Exception as err: +@@ -232,7 +234,7 @@ class clusterHelper: + + nodes = [] + nodeList = clusterHelper._exec("crm_node", "--list") +- for n in nodeList.decode().split("\n"): ++ for n in nodeList.split("\n"): + nodes.append(n.split()[1]) + ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) + +@@ -303,7 +305,7 @@ class clusterHelper: + ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) + return False + summary = summary.split("Transition Summary:")[1] +- ret = summary.decode().split("\n").pop(0) ++ ret = summary.split("\n").pop(0) + + ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) + return ret +@@ -324,7 +326,7 @@ class clusterHelper: + if len(resources) == 0: + ret = [] + else: +- ret = resources.decode().split("\n") ++ ret = resources.split("\n") + + ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) + return ret +@@ -470,7 +472,7 @@ class Node: + + eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) + if eventIDStr: +- eventIDs = eventIDStr.decode().split(",") ++ eventIDs = eventIDStr.split(",") + else: + eventIDs = None + diff --git a/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch b/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch new file mode 100644 index 0000000..b5c1d96 --- /dev/null +++ b/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch @@ -0,0 +1,51 @@ +From 091a6d1f26140651b7314cfb618c80f9258fd1d4 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 5 May 2020 14:20:43 +0200 +Subject: [PATCH] exportfs: add symlink support + +based on implementation in Filesystem agent +--- + heartbeat/exportfs | 30 ++++++++++++++++++++++-------- + 1 file changed, 22 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 1cabdee70..294d7eec9 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -450,14 +450,28 @@ exportfs_validate_all () + fi + } + +-# If someone puts a trailing slash at the end of the export directory, +-# this agent is going to fail in some unexpected ways due to how +-# export strings are matched. The simplest solution here is to strip off +-# a trailing '/' in the directory before processing anything. +-newdir=$(echo "$OCF_RESKEY_directory" | sed -n -e 's/^\(.*\)\/$/\1/p') +-if [ -n "$newdir" ]; then +- OCF_RESKEY_directory=$newdir +-fi ++for dir in $OCF_RESKEY_directory; do ++ # strip off trailing '/' from directory ++ dir=$(echo $dir | sed 's/\/*$//') ++ : ${dir:=/} ++ if [ -e "$dir" ] ; then ++ canonicalized_dir=$(readlink -f "$dir") ++ if [ $? -ne 0 ]; then ++ if [ "$__OCF_ACTION" != "stop" ]; then ++ ocf_exit_reason "Could not canonicalize $dir because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ fi ++ else ++ if [ "$__OCF_ACTION" != "stop" ]; then ++ ocf_exit_reason "$dir does not exist" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ fi ++ directories+="$canonicalized_dir " ++done ++ ++OCF_RESKEY_directory="${directories%% }" + + NUMDIRS=`echo "$OCF_RESKEY_directory" | wc -w` + OCF_REQUIRED_PARAMS="directory fsid clientspec" diff --git a/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch b/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch new file mode 100644 index 0000000..ec1dff4 --- /dev/null +++ b/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch @@ -0,0 +1,35 @@ +From fda12d3d6495e33e049ed3ac03d6bfb4d65aac3d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 9 Jun 2020 10:27:13 +0200 +Subject: [PATCH] exportfs: fix monitor-action in symlink-logic for when + directory doesnt exist + +--- + heartbeat/exportfs | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 294d7eec9..d7d3463d9 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -463,10 +463,16 @@ for dir in $OCF_RESKEY_directory; do + fi + fi + else +- if [ "$__OCF_ACTION" != "stop" ]; then +- ocf_exit_reason "$dir does not exist" +- exit $OCF_ERR_CONFIGURED +- fi ++ case "$__OCF_ACTION" in ++ stop|monitor) ++ canonicalized_dir="$dir" ++ ocf_log debug "$dir does not exist" ++ ;; ++ *) ++ ocf_exit_reason "$dir does not exist" ++ exit $OCF_ERR_CONFIGURED ++ ;; ++ esac + fi + directories+="$canonicalized_dir " + done diff --git a/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch b/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch new file mode 100644 index 0000000..0a7144f --- /dev/null +++ b/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch @@ -0,0 +1,37 @@ +From 143864c694fb3f44a28b805a17ba7a2f6bf9931f Mon Sep 17 00:00:00 2001 +From: Vincent Untz +Date: Sun, 07 Feb 2016 10:30:00 +0100 +Subject: [PATCH] NovaEvacuate: Avoid initial useless message on stderr + +When no evacuation has been done yet, we're spamming syslog with: + + Could not query value of evacuate: attribute does not exist + +So let's just filter this out, since it's known to be expected on +initial setup. + +As this requires a bashism, also move the script to use bash. + +Change-Id: I3351919febc0ef0101e4a08ce6eb412e3c7cfc76 +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 319a747..f764bde 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!/bin/bash + # + # + # NovaCompute agent manages compute daemons. +@@ -220,7 +220,8 @@ + fi + + handle_evacuations $( +- attrd_updater -n evacuate -A | ++ attrd_updater -n evacuate -A \ ++ 2> >(grep -v "attribute does not exist" 1>&2) | + sed 's/ value=""/ value="no"/' | + tr '="' ' ' | + awk '{print $4" "$6}' diff --git a/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch b/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch new file mode 100644 index 0000000..558ecc6 --- /dev/null +++ b/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch @@ -0,0 +1,60 @@ +From cf1e7bfab984b5e9451a63c25b39c0932e0d9116 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 6 May 2020 16:11:36 +0200 +Subject: [PATCH] Increase the rabbitmqctl wait timeout during start() + +After we start the rabbitmq process we wait for the pid to show up +and then declare the server to be started successfully. +This wait is done via 'rabbitmqctl wait'. Now from +From https://www.rabbitmq.com/rabbitmqctl.8.html we have: + + If the specified pidfile is not created or erlang node is not started + within --timeout the command will fail. Default timeout is 10 seconds. + +This default of 10 seconds might not be enough in overloaded +environments. So what we want to do here is wait for as much time as +the start() operation allows us. So we wait for OCF_RESKEY_CRM_meta_timeout +minus 5 seconds. In the rare and non-sensical case that it is less than +10s we do not pass a timeout string at all to rabbitmqctl. + +Co-Authored-By: John Eckersberg +--- + heartbeat/rabbitmq-cluster | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index a9ebd37ad..f7d48120c 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -294,6 +294,8 @@ rmq_monitor() { + rmq_init_and_wait() + { + local rc ++ local wait_timeout ++ local timeout_string + + prepare_dir $RMQ_PID_DIR + prepare_dir $RMQ_LOG_DIR +@@ -305,11 +307,20 @@ rmq_init_and_wait() + setsid sh -c "$RMQ_SERVER > ${RMQ_LOG_DIR}/startup_log 2> ${RMQ_LOG_DIR}/startup_err" & + + ocf_log info "Waiting for server to start" +- $RMQ_CTL wait $RMQ_PID_FILE ++ # We want to give the wait command almost the full startup timeout we are given ++ # So we use the start operation timeout (in ms), convert it and subtract 5 seconds ++ # In the silly case that it is less than 10 seconds we just skip setting the timeout ++ wait_timeout=`expr $OCF_RESKEY_CRM_meta_timeout / 1000 - 5` ++ if [ $wait_timeout -gt 10 ]; then ++ timeout_string="--timeout ${wait_timeout}" ++ else ++ timeout_string="" ++ fi ++ $RMQ_CTL $timeout_string wait $RMQ_PID_FILE + rc=$? + if [ $rc -ne $OCF_SUCCESS ]; then + remove_pid +- ocf_log info "rabbitmq-server start failed: $rc" ++ ocf_log info "rabbitmq-server start failed with a timeout of ($timeout_string): $rc" + return $OCF_ERR_GENERIC + fi + diff --git a/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch b/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch new file mode 100644 index 0000000..b0d7ade --- /dev/null +++ b/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch @@ -0,0 +1,52 @@ +--- a/heartbeat/ocf-shellfuncs.in 2020-06-16 10:47:54.462276461 +0200 ++++ b/heartbeat/ocf-shellfuncs.in 2020-06-16 10:43:36.880739016 +0200 +@@ -563,7 +563,7 @@ + # (master/slave) resource. This is defined as a resource where the + # master-max meta attribute is present, and set to greater than zero. + ocf_is_ms() { +- [ ! -z "${OCF_RESKEY_CRM_meta_master_max}" ] && [ "${OCF_RESKEY_CRM_meta_master_max}" -gt 0 ] ++ [ "${OCF_RESKEY_CRM_meta_promotable}" = "true" ] || { [ ! -z "${OCF_RESKEY_CRM_meta_master_max}" ] && [ "${OCF_RESKEY_CRM_meta_master_max}" -gt 0 ]; } + } + + # version check functions +--- a/heartbeat/pgsql 2020-06-16 10:47:54.474276441 +0200 ++++ b/heartbeat/pgsql 2020-06-16 10:49:02.835159083 +0200 +@@ -1021,7 +1021,7 @@ + + # I can't get master node name from $OCF_RESKEY_CRM_meta_notify_master_uname on monitor, + # so I will get master node name using crm_mon -n +- print_crm_mon | tr -d "\t" | tr -d " " | grep -q "^${RESOURCE_NAME}[(:].*[):].*Master" ++ print_crm_mon | grep -q -i -E " +Date: Mon, 18 May 2020 16:18:21 +0200 +Subject: [PATCH] db2: HADR add STANDBY/REMOTE_CATCHUP_PENDING/DISCONNECTED to + correctly promote standby node when master node disappears (e.g. via fencing) + +--- + heartbeat/db2 | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index 62b288d46..a57fd2bb6 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -774,14 +774,19 @@ db2_promote() { + ;; + + STANDBY/PEER/CONNECTED|Standby/Peer) +- # must take over ++ # must take over + ;; + + STANDBY/*PEER/DISCONNECTED|Standby/DisconnectedPeer) +- # must take over forced ++ # must take over by force peer window only + force="by force peer window only" + ;; + ++ # must take over by force ++ STANDBY/REMOTE_CATCHUP_PENDING/DISCONNECTED) ++ force="by force" ++ ;; ++ + *) + return $OCF_ERR_GENERIC + esac diff --git a/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch b/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch new file mode 100644 index 0000000..89fbb06 --- /dev/null +++ b/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch @@ -0,0 +1,53 @@ +From 5a732511db2c49ff6afe0a20e738b565a35273ae Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Fri, 29 May 2020 11:57:29 +0200 +Subject: [PATCH] podman: make sure to remove containers with lingering exec + sessions + +It may happen that some "podman exec" commands don't finish +cleanly and leave lingering "Exec sessions" in the container's +state. In that case, a "podman rm" command will always fail. + +To overcome the podman bug, issue a "podman rm -f" command when +we detect a container is stopped but still has some lingering +"Exec sessions" associated with it. + +Related-Bug: rhbz#1839721 +--- + heartbeat/podman | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/heartbeat/podman b/heartbeat/podman +index f77d988fc..e2f6e981b 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -232,6 +232,9 @@ container_exists() + + remove_container() + { ++ local rc ++ local execids ++ + if ocf_is_true "$OCF_RESKEY_reuse"; then + # never remove the container if we have reuse enabled. + return 0 +@@ -244,6 +247,19 @@ remove_container() + fi + ocf_log notice "Cleaning up inactive container, ${CONTAINER}." + ocf_run podman rm $CONTAINER ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ # due to a podman bug (rhbz#1841485), sometimes a stopped ++ # container can still be associated with Exec sessions, in ++ # which case the "podman rm" has to be forced ++ execids=$(podman inspect $CONTAINER --format '{{len .ExecIDs}}') ++ if [ "$execids" -ne "0" ]; then ++ ocf_log warn "Inactive container ${CONTAINER} has lingering exec sessions. Force-remove it." ++ ocf_run podman rm -f $CONTAINER ++ rc=$? ++ fi ++ fi ++ return $rc + } + + podman_simple_status() diff --git a/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch b/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch new file mode 100644 index 0000000..7a7185f --- /dev/null +++ b/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch @@ -0,0 +1,265 @@ +--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:41:35.308379032 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2020-06-05 10:48:45.555132686 +0200 +@@ -9,12 +9,46 @@ + # Initialization: + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Parameter defaults ++ ++OCF_RESKEY_address_default="" ++OCF_RESKEY_routing_table_default="" ++OCF_RESKEY_interface_default="eth0" ++OCF_RESKEY_profile_default="default" ++OCF_RESKEY_endpoint_default="vpc.aliyuncs.com" ++OCF_RESKEY_aliyuncli_default="detect" ++ ++ ++: ${OCF_RESKEY_address=${OCF_RESKEY_address_default}} ++: ${OCF_RESKEY_routing_table=${OCF_RESKEY_routing_table_default}} ++: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} ++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} ++: ${OCF_RESKEY_endpoint=${OCF_RESKEY_endpoint_default}} ++: ${OCF_RESKEY_aliyuncli=${OCF_RESKEY_aliyuncli_default}} ++ + ####################################################################### + +-# aliyuncli doesnt work without HOME parameter ++# aliyun cli doesnt work without HOME parameter + export HOME="/root" + + USAGE="usage: $0 {start|stop|status|meta-data}"; ++ ++if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then ++ OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" ++fi ++ ++if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then ++ OUTPUT="text" ++ EXECUTING='{ print $3 }' ++ IFS_=" " ++ ENDPOINT="" ++elif [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyun' ]]; then ++ OUTPUT="table cols=InstanceId,DestinationCidrBlock rows=RouteTables.RouteTable[].RouteEntrys.RouteEntry[]" ++ EXECUTING='{ gsub (" ", "", $0); print $1 }' ++ IFS_="|" ++ ENDPOINT="--endpoint $OCF_RESKEY_endpoint" ++fi + ############################################################################### + + +@@ -24,27 +58,61 @@ + # + ############################################################################### + ++request_create_route_entry() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd 2>&1) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ocf_log debug "result: $res; rc: $rc" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++ return $rc ++} ++ ++request_delete_route_entry() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ocf_log debug "result: $res; rc: $rc" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++ return $rc ++} + ++request_describe_route_tables() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output ${OUTPUT} ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ROUTE_TO_INSTANCE=$(echo "$res" |grep "\s${OCF_RESKEY_address}/" | awk -F "${IFS_}" "${EXECUTING}") ++ ocf_log debug "ROUTE_TO_INSTANCE: $ROUTE_TO_INSTANCE" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++} + + ip_get_and_configure() { + ocf_log debug "function: ip_get_and_configure" + +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then + if [ -n "$ROUTE_TO_INSTANCE" ]; then + ip_drop + fi +- +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- ocf_log debug "executing command: $cmd" +- $cmd ++ request_create_route_entry + rc=$? + while [ $rc -ne 0 ]; do + sleep 1 +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- ocf_log debug "executing command: $cmd" +- $cmd ++ request_create_route_entry + rc=$? + done + wait_for_started +@@ -68,17 +136,15 @@ + ocf_log debug "function: ip_drop" + cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" + ocf_log debug "executing command: $cmd" +- $cmd ++ res=$($cmd) + rc=$? + if [ $rc -ne 0 ] && [ $rc -ne 2 ]; then +- ocf_log err "command failed, rc $rc" ++ ocf_log err "command failed, rc: $rc; cmd: $cmd; result: $res" + return $OCF_ERR_GENERIC + fi +- +- cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" +- ocf_log debug "executing command: $cmd" +- $cmd +- if [ $? -ne 0 ]; then ++ request_delete_route_entry ++ rc=$? ++ if [ $rc -ne 0 ]; then + ocf_log err "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +@@ -90,26 +156,18 @@ + } + + wait_for_started() { +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do + sleep 3 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + done + } + + wait_for_deleted() { +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + while [ ! -z "$ROUTE_TO_INSTANCE" ]; do + sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + done + } + +@@ -124,38 +182,58 @@ + by changing an entry in an specific routing table + + Move IP within a VPC of the Aliyun ECS ++ + ++ ++ ++Path to command line tools for Aliyun ++ ++Path to Aliyun CLI tools ++ ++ ++ + + + VPC private IP address + + vpc ip +- ++ + ++ + + + Name of the routing table, where the route for the IP address should be changed, i.e. vtb-... + + routing table name +- ++ + ++ + + + Name of the network interface, i.e. eth0 + + network interface name +- ++ + +- ++ ++ + +-Valid Aliyun CLI profile name (see 'aliyuncli-ra configure'). ++An endpoint is the service entry of an Alibaba Cloud service, i.e. vpc.cn-beijing.aliyuncs.com ++ ++service endpoint ++ ++ + +-See https://www.alibabacloud.com/help/doc-detail/43039.htm?spm=a2c63.p38356.b99.16.38a914abRZtOU3 for more information about aliyuncli-ra. ++ ++ ++Valid Aliyun CLI profile name (see 'aliyun cli configure'). ++See https://www.alibabacloud.com/help/zh/product/29991.htm for more information about aliyun cli. + + profile name +- ++ + + ++ + + + +@@ -170,6 +248,11 @@ + ecs_ip_validate() { + ocf_log debug "function: validate" + ++ if [ -z "${OCF_RESKEY_aliyuncli}" ]; then ++ ocf_exit_reason "unable to detect aliyuncli binary" ++ exit $OCF_ERR_INSTALLED ++ fi ++ + # IP address + if [ -z "$OCF_RESKEY_address" ]; then + ocf_log err "IP address parameter not set $OCF_RESKEY_ADDRESS!" +@@ -250,10 +333,7 @@ + + ecs_ip_monitor() { + ocf_log debug "function: ecsip_monitor: check routing table" +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + + if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then + ocf_log debug "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" diff --git a/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch b/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch new file mode 100644 index 0000000..fa194c9 --- /dev/null +++ b/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch @@ -0,0 +1,70 @@ +From 194909ff08cfe75cd5da9f704d8ed4cc9ab40341 Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 10:58:34 +0200 +Subject: [PATCH 1/2] azure-events: handle exceptions in urlopen The locking in + azure-events does not correctly handle some failures. + +If the metadata server is not recheable or has an error +handling the request, attr_globalPullState will never go +back to IDLE unless the administrator manually changes it. + +> azure-events: ERROR: [Errno 104] Connection reset by peer +> lrmd[2734]: notice: rsc_azure-events_monitor_10000:113088:stderr [ ocf-exit-reason:[Errno 104] Connection reset by peer ] +--- + heartbeat/azure-events.in | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index 8709d97e3..bd812f4b2 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -82,9 +82,19 @@ class azHelper: + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) +- resp = urllib2.urlopen(req) +- data = resp.read() +- ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ try: ++ resp = urllib2.urlopen(req) ++ except URLError as e: ++ if hasattr(e, 'reason'): ++ print('We failed to reach a server. Reason: '), e.reason ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ elif hasattr(e, 'code'): ++ print('The server couldn\'t fulfill the request. Error code: '), e.code ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ else: ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ + if data: + data = json.loads(data) + + +From c4071ec4a82fcb831f170f341e0790633e4b904f Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 12:53:22 +0200 +Subject: [PATCH 2/2] azure-events: use ocf.logger.warning instead of print + +--- + heartbeat/azure-events.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index bd812f4b2..a48a86309 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -86,10 +86,10 @@ class azHelper: + resp = urllib2.urlopen(req) + except URLError as e: + if hasattr(e, 'reason'): +- print('We failed to reach a server. Reason: '), e.reason ++ ocf.logger.warning("Failed to reach the server: %s" % e.reason) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + elif hasattr(e, 'code'): +- print('The server couldn\'t fulfill the request. Error code: '), e.code ++ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + else: + data = resp.read() diff --git a/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch b/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch new file mode 100644 index 0000000..7795e78 --- /dev/null +++ b/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch @@ -0,0 +1,68 @@ +From f2bf1d8a07ea810099b03469883cb7f485ab9ac1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:09:43 +0200 +Subject: [PATCH 1/2] azure-events: import URLError and encode postData when + necessary + +--- + heartbeat/azure-events.in | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index d4a166d9f..a7f359468 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -13,8 +13,10 @@ import subprocess + import json + try: + import urllib2 ++ from urllib2 import URLError + except ImportError: + import urllib.request as urllib2 ++ from urllib.error import URLError + import socket + from collections import defaultdict + +@@ -76,9 +78,13 @@ class azHelper: + Send a request to Azure's Azure Metadata Service API + """ + url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ data = "" + ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) + ocf.logger.debug("_sendMetadataRequest: url = %s" % url) + ++ if postData and type(postData) != bytes: ++ postData = postData.encode() ++ + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) + +From 1ab5d71bff95eb271f1e1bbc401961dc313219d9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 Jul 2020 21:25:43 +0200 +Subject: [PATCH 2/2] azure-events: report error if jsondata not received + +--- + heartbeat/azure-events.in | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a7f359468..3a24d6358 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -117,8 +117,12 @@ class azHelper: + jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) + ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) + +- ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) +- return attrDict(jsondata["compute"]) ++ if jsondata: ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ else: ++ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") ++ sys.exit(ocf.OCF_ERR_GENERIC) + + @staticmethod + def pullScheduledEvents(): diff --git a/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch b/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch new file mode 100644 index 0000000..06da3b4 --- /dev/null +++ b/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch @@ -0,0 +1,31 @@ +From 314eadcd683551bd79b644de05cbf0e425c84f83 Mon Sep 17 00:00:00 2001 +From: Kazunori INOUE +Date: Tue, 9 Jun 2020 13:30:32 +0000 +Subject: [PATCH] nfsserver: prevent error messages when /etc/sysconfig/nfs + does not exist + +--- + heartbeat/nfsserver-redhat.sh | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/nfsserver-redhat.sh b/heartbeat/nfsserver-redhat.sh +index cef0862ee..73a70c186 100644 +--- a/heartbeat/nfsserver-redhat.sh ++++ b/heartbeat/nfsserver-redhat.sh +@@ -150,10 +150,12 @@ set_env_args() + + # override local nfs config. preserve previous local config though. + if [ -s $tmpconfig ]; then +- cat $NFS_SYSCONFIG | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 +- if [ $? -ne 0 ]; then +- # backup local nfs config if it doesn't have our HA autogen tag in it. +- mv -f $NFS_SYSCONFIG $NFS_SYSCONFIG_LOCAL_BACKUP ++ if [ -f "$NFS_SYSCONFIG" ]; then ++ cat $NFS_SYSCONFIG | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 ++ if [ $? -ne 0 ]; then ++ # backup local nfs config if it doesn't have our HA autogen tag in it. ++ mv -f $NFS_SYSCONFIG $NFS_SYSCONFIG_LOCAL_BACKUP ++ fi + fi + + cat $tmpconfig | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 diff --git a/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch b/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch new file mode 100644 index 0000000..fd685b1 --- /dev/null +++ b/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch @@ -0,0 +1,27 @@ +From f572186ec9bd26c791771a18d38804cfde602578 Mon Sep 17 00:00:00 2001 +From: zzhou1 <10611019+zzhou1@users.noreply.github.com> +Date: Tue, 3 Sep 2019 09:24:23 +0000 +Subject: [PATCH] exportfs: doc clarification for clientspec format + +Point out the standard of the format is aligned with `man exportfs`, and also point out the correct way to deal with the use case to export the same directory(-ies) to multiple subnets. +--- + heartbeat/exportfs | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 80ed057f2..dc609a0b4 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -47,6 +47,12 @@ Manages NFS exports + + The client specification allowing remote machines to mount the directory + (or directories) over NFS. ++ ++Note: it follows the format defined in "man exportfs". For example, in ++the use case to export the directory(-ies) for multiple subnets, please ++do config a dedicated primitive for each subnet CIDR ip address, ++and do not attempt to use multiple CIDR ip addresses in a space ++seperated list, like in /etc/exports. + + + Client ACL. diff --git a/SOURCES/bz1845583-exportfs-2-fix-typo.patch b/SOURCES/bz1845583-exportfs-2-fix-typo.patch new file mode 100644 index 0000000..0406f31 --- /dev/null +++ b/SOURCES/bz1845583-exportfs-2-fix-typo.patch @@ -0,0 +1,23 @@ +From 0f8189161ef5c09c6a6df96cb15937d430f75353 Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Wed, 23 Oct 2019 22:11:14 +0200 +Subject: [PATCH] Low: exportfs: Fix spelling error + +Replace seperated -> separated. +--- + heartbeat/exportfs | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index dc609a0b4..d79aced88 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -52,7 +52,7 @@ Note: it follows the format defined in "man exportfs". For example, in + the use case to export the directory(-ies) for multiple subnets, please + do config a dedicated primitive for each subnet CIDR ip address, + and do not attempt to use multiple CIDR ip addresses in a space +-seperated list, like in /etc/exports. ++separated list, like in /etc/exports. + + + Client ACL. diff --git a/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch b/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch new file mode 100644 index 0000000..13401c2 --- /dev/null +++ b/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch @@ -0,0 +1,317 @@ +--- a/heartbeat/gcp-vpc-move-vip.in 2020-08-17 10:33:22.132531259 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-08-17 10:34:54.050633259 +0200 +@@ -22,7 +22,8 @@ + import sys + import time + +-OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" ++ % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + from ocf import * +@@ -43,6 +44,10 @@ + import urllib2 as urlrequest + + ++# Constants for alias add/remove modes ++ADD = 0 ++REMOVE = 1 ++ + CONN = None + THIS_VM = None + ALIAS = None +@@ -53,27 +58,27 @@ + + + 1.0 +- Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance +- Floating IP Address on Google Cloud Platform ++ Floating IP Address or Range on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP range to a running instance ++ Floating IP Address or Range on Google Cloud Platform + + +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) + + +- ++ + Subnet name for the Alias IP + Subnet name for the Alias IP + + +- +- List of hosts in the cluster ++ ++ List of hosts in the cluster, separated by spaces + Host list + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). +- Stackdriver-logging support. Requires additional libraries (google-cloud-logging). ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support + + + +@@ -107,7 +112,8 @@ + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") ++ return request_opener.open( ++ request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -134,17 +140,21 @@ + time.sleep(1) + + +-def set_alias(project, zone, instance, alias, alias_range_name=None): +- fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++def set_aliases(project, zone, instance, aliases, fingerprint): ++ """Sets the alias IP ranges for an instance. ++ ++ Args: ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ aliases: list, the list of dictionaries containing alias IP ranges ++ to be added to or removed from the instance. ++ fingerprint: string, the fingerprint of the network interface. ++ """ + body = { +- 'aliasIpRanges': [], +- 'fingerprint': fingerprint ++ 'aliasIpRanges': aliases, ++ 'fingerprint': fingerprint + } +- if alias: +- obj = {'ipCidrRange': alias} +- if alias_range_name: +- obj['subnetworkRangeName'] = alias_range_name +- body['aliasIpRanges'].append(obj) + + request = CONN.instances().updateNetworkInterface( + instance=instance, networkInterface='nic0', project=project, zone=zone, +@@ -153,21 +163,75 @@ + wait_for_operation(project, zone, operation) + + +-def get_alias(project, zone, instance): +- iface = get_network_ifaces(project, zone, instance) ++def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): ++ """Adds or removes an alias IP range for a GCE instance. ++ ++ Args: ++ mode: int, a constant (ADD (0) or REMOVE (1)) indicating the ++ operation type. ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ alias: string, the alias IP range to be added to or removed from ++ the instance. ++ alias_range_name: string, the subnet name for the alias IP range. ++ ++ Returns: ++ True if the existing list of alias IP ranges was modified, or False ++ otherwise. ++ """ ++ ifaces = get_network_ifaces(project, zone, instance) ++ fingerprint = ifaces[0]['fingerprint'] ++ ++ try: ++ old_aliases = ifaces[0]['aliasIpRanges'] ++ except KeyError: ++ old_aliases = [] ++ ++ new_aliases = [a for a in old_aliases if a['ipCidrRange'] != alias] ++ ++ if alias: ++ if mode == ADD: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ new_aliases.append(obj) ++ elif mode == REMOVE: ++ pass # already removed during new_aliases build ++ else: ++ raise ValueError('Invalid value for mode: {}'.format(mode)) ++ ++ if (sorted(new_aliases) != sorted(old_aliases)): ++ set_aliases(project, zone, instance, new_aliases, fingerprint) ++ return True ++ else: ++ return False ++ ++ ++def add_alias(project, zone, instance, alias, alias_range_name=None): ++ return add_rm_alias(ADD, project, zone, instance, alias, alias_range_name) ++ ++ ++def remove_alias(project, zone, instance, alias): ++ return add_rm_alias(REMOVE, project, zone, instance, alias) ++ ++ ++def get_aliases(project, zone, instance): ++ ifaces = get_network_ifaces(project, zone, instance) + try: +- return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ aliases = ifaces[0]['aliasIpRanges'] ++ return [a['ipCidrRange'] for a in aliases] + except KeyError: +- return '' ++ return [] + + +-def get_localhost_alias(): ++def get_localhost_aliases(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) + net_iface = json.loads(net_iface) + try: +- return net_iface[0]['ipAliases'][0] ++ return net_iface[0]['ipAliases'] + except (KeyError, IndexError): +- return '' ++ return [] + + + def get_zone(project, instance): +@@ -201,21 +265,17 @@ + + + def gcp_alias_start(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- # If I already have the IP, exit. If it has an alias IP that isn't the VIP, +- # then remove it +- if my_alias == alias: ++ if alias in my_aliases: ++ # TODO: Do we need to check alias_range_name? + logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) +- elif my_alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') + +- # Loops through all hosts & remove the alias IP from the host that has it ++ # If the alias is currently attached to another host, detach it. + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: + hostlist = hostlist.replace(THIS_VM, '').split() +@@ -223,47 +283,53 @@ + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: + host_zone = get_zone(project, host) +- host_alias = get_alias(project, host_zone, host) +- if alias == host_alias: ++ host_aliases = get_aliases(project, host_zone, host) ++ if alias in host_aliases: + logger.info( +- '%s is attached to %s - Removing all alias IP addresses from %s' % +- (alias, host, host)) +- set_alias(project, host_zone, host, '') ++ '%s is attached to %s - Removing %s from %s' % ++ (alias, host, alias, host)) ++ remove_alias(project, host_zone, host, alias) + break + +- # add alias IP to localhost +- set_alias( ++ # Add alias IP range to localhost ++ add_alias( + project, my_zone, THIS_VM, alias, + os.environ.get('OCF_RESKEY_alias_range_name')) + +- # Check the IP has been added +- my_alias = get_localhost_alias() +- if alias == my_alias: ++ # Verify that the IP range has been added ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: + logger.info('Finished adding %s to %s' % (alias, THIS_VM)) +- elif my_alias: +- logger.error( +- 'Failed to add IP. %s has an IP attached but it isn\'t %s' % +- (THIS_VM, alias)) +- sys.exit(OCF_ERR_GENERIC) + else: +- logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ if my_aliases: ++ logger.error( ++ 'Failed to add alias IP range %s. %s has alias IP ranges attached but' ++ + ' they don\'t include %s' % (alias, THIS_VM, alias)) ++ else: ++ logger.error( ++ 'Failed to add IP range %s. %s has no alias IP ranges attached' ++ % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + + def gcp_alias_stop(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- if my_alias == alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') ++ if alias in my_aliases: ++ logger.info('Removing %s from %s' % (alias, THIS_VM)) ++ remove_alias(project, my_zone, THIS_VM, alias) ++ else: ++ logger.info( ++ '%s is not attached to %s. No action required' ++ % (alias, THIS_VM)) + + + def gcp_alias_status(alias): +- my_alias = get_localhost_alias() +- if alias == my_alias: +- logger.info('%s has the correct IP address attached' % THIS_VM) ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: ++ logger.info('%s has the correct IP range attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,7 +341,8 @@ + + # Populate global vars + try: +- CONN = googleapiclient.discovery.build('compute', 'v1') ++ CONN = googleapiclient.discovery.build('compute', 'v1', ++ cache_discovery=False) + except Exception as e: + logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -283,7 +350,8 @@ + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' ++ + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') +@@ -309,7 +377,8 @@ + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) + log.addHandler(handler) +- logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': ++ OCF_RESOURCE_INSTANCE}) + except ImportError: + logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') diff --git a/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch b/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch new file mode 100644 index 0000000..887fc99 --- /dev/null +++ b/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch @@ -0,0 +1,32 @@ +From 2b22a14a128b87214bfb1ece221274aac78ba81b Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 18 Aug 2020 18:43:13 -0700 +Subject: [PATCH] gcp-vpc-move-vip: Fix sort for list of dicts in python3 + +python2 sorts a list of dicts of `{'ipCidrRange': }` correctly by +default. python3 fails with a TypeError: + +`TypeError: '<' not supported between instances of 'dict' and 'dict'` + +Fix this by using the key parameter of sorted(). python2 also supports +this. + +Signed-off-by: Reid Wahl +--- + heartbeat/gcp-vpc-move-vip.in | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 85d59f6bb..01d91a59d 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -200,7 +200,8 @@ def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): + else: + raise ValueError('Invalid value for mode: {}'.format(mode)) + +- if (sorted(new_aliases) != sorted(old_aliases)): ++ if (sorted(new_aliases, key=lambda item: item.get('ipCidrRange')) ++ != sorted(old_aliases, key=lambda item: item.get('ipCidrRange'))): + set_aliases(project, zone, instance, new_aliases, fingerprint) + return True + else: diff --git a/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch b/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch new file mode 100644 index 0000000..b171613 --- /dev/null +++ b/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch @@ -0,0 +1,54 @@ +From d22700fc5d5098c683b465ea0fede43803fd4d6b Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Tue, 7 Jul 2020 02:18:09 -0700 +Subject: [PATCH] azure-lb: Don't redirect nc listener output to pidfile + +The `lb_start()` function spawns an `nc` listener background process +and echoes the resulting pid to `$pidfile`. Due to a bug in the +redirection, all future data received by the `nc` process is also +appended to `$pidfile`. + +If binary data is received later and appended to `$pidfile`, the +monitor operation fails when `grep` searches the now-binary file. + +``` +line 97: kill: Binary: arguments must be process or job IDs ] +line 97: kill: file: arguments must be process or job IDs ] +line 97: kill: /var/run/nc_PF2_02.pid: arguments must be process or job + IDs ] +line 97: kill: matches: arguments must be process or job IDs ] +``` + +Then the start operation fails during recovery. `lb_start()` spawns a +new `nc` process, but the old process is still running and using the +configured port. + +``` +nc_PF2_02_start_0:777:stderr [ Ncat: bind to :::62502: Address + already in use. QUITTING. ] +``` + +This patch fixes the issue by removing the `nc &` command from the +section whose output gets redirected to `$pidfile`. Now, only the `nc` +PID is echoed to `$pidfile`. + +Resolves: RHBZ#1850778 +Resolves: RHBZ#1850779 +--- + heartbeat/azure-lb | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 05c134514..05755d778 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -113,7 +113,8 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- eval "$cmd & echo \$!" > $pidfile ++ $cmd & ++ echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" + lb_monitor diff --git a/SOURCES/bz1905587-aws-add-imdsv2-support.patch b/SOURCES/bz1905587-aws-add-imdsv2-support.patch new file mode 100644 index 0000000..09772cc --- /dev/null +++ b/SOURCES/bz1905587-aws-add-imdsv2-support.patch @@ -0,0 +1,97 @@ +From 8f10d0eb1e33d38ab6e89015a903620c54edd7c1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Nov 2020 16:36:20 +0100 +Subject: [PATCH] AWS agents: add support for IMDSv2 + +--- + heartbeat/aws-vpc-move-ip | 5 +++-- + heartbeat/aws-vpc-route53.in | 3 ++- + heartbeat/awseip | 9 +++++---- + heartbeat/awsvip | 7 ++++--- + 4 files changed, 14 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 72a89ecb1..cbb629b00 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -215,7 +215,8 @@ ec2ip_validate() { + return $OCF_ERR_CONFIGURED + fi + +- EC2_INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + if [ -z "${EC2_INSTANCE_ID}" ]; then + ocf_exit_reason "Instance ID not found. Is this a EC2 instance?" +@@ -329,7 +330,7 @@ ec2ip_get_instance_eni() { + fi + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + +- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" ++ cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\"" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + rc=$? +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index b06b93726..4fb17019b 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -347,7 +347,8 @@ r53_monitor() { + _get_ip() { + case $OCF_RESKEY_ip in + local|public) +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4)";; ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");; + *.*.*.*) + IPADDRESS="${OCF_RESKEY_ip}";; + esac +diff --git a/heartbeat/awseip b/heartbeat/awseip +index 445a03666..de1967774 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -149,12 +149,12 @@ awseip_start() { + awseip_monitor && return $OCF_SUCCESS + + if [ -n "${PRIVATE_IP_ADDRESS}" ]; then +- NETWORK_INTERFACES_MACS="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/)" ++ NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN") + for MAC in ${NETWORK_INTERFACES_MACS}; do +- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s | ++ curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" | + grep -q "^${PRIVATE_IP_ADDRESS}$" + if [ $? -eq 0 ]; then +- NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id)" ++ NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + fi + done + $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \ +@@ -244,7 +244,8 @@ AWSCLI="${OCF_RESKEY_awscli}" + ELASTIC_IP="${OCF_RESKEY_elastic_ip}" + ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 3eb31e6ae..8050107e8 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -206,9 +206,10 @@ esac + + AWSCLI="${OCF_RESKEY_awscli}" + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" +-MAC_ADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/mac)" +-NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") ++MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN") ++NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) diff --git a/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch new file mode 100644 index 0000000..d868593 --- /dev/null +++ b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch @@ -0,0 +1,76 @@ +From 2dbfbd8ee3c1547f941507ab4109aa04eec0ef5a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Mon, 16 Jul 2018 20:24:04 +0200 +Subject: [PATCH] Do not call ocf_attribute_target in the meta-data function + +Starting with pacemaker-1.1.19 a "crm_node -n" call triggers +a CRM_OP_NODE_INFO cluster operation. If this is called +from a bundle with 1.1.19 code (or later) running on a 1.1.18 +cluster, during a meta-data call we will get the following error in the +cluster logs: +Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected request (node-info) sent to non-DC node +Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected + +By not calling ocf_attribute_target (which triggers a crm_node -n +call) when polling for meta-data we do not run into this issue. + +This can easily get triggered when creating a resource invoking +crm_node -n inside a 1.1.19 container with pcs, as that invokes +the 'meta-data' action explicitely. + +Co-Authored-By: Damien Ciabrini +Suggested-By: Ken Gaillot +--- + heartbeat/galera | 6 ++++-- + heartbeat/rabbitmq-cluster | 4 +++- + heartbeat/redis.in | 4 +++- + 3 files changed, 10 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 270bdaf1b..4f341ceef 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -66,9 +66,11 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs +-. ${OCF_FUNCTIONS_DIR}/mysql-common.sh + +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ . ${OCF_FUNCTIONS_DIR}/mysql-common.sh ++ NODENAME=$(ocf_attribute_target) ++fi + + # It is common for some galera instances to store + # check user that can be used to query status +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 9ff49e075..54a16c941 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -37,7 +37,9 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" + RMQ_PID_DIR="/var/run/rabbitmq" + RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid" + RMQ_LOG_DIR="/var/log/rabbitmq" +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ NODENAME=$(ocf_attribute_target) ++fi + + # this attr represents the current active local rmq node name. + # when rmq stops or the node is fenced, this attr disappears +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index d5eb8f664..ddc62d8a7 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -664,7 +664,9 @@ redis_validate() { + fi + } + +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ NODENAME=$(ocf_attribute_target) ++fi + if [ -r "$REDIS_CONFIG" ]; then + clientpasswd="$(sed -n -e 's/^\s*requirepass\s*\(.*\)\s*$/\1/p' < $REDIS_CONFIG | tail -n 1)" + fi diff --git a/SOURCES/findif-only-match-lines-with-netmasks.patch b/SOURCES/findif-only-match-lines-with-netmasks.patch new file mode 100644 index 0000000..6afdd55 --- /dev/null +++ b/SOURCES/findif-only-match-lines-with-netmasks.patch @@ -0,0 +1,25 @@ +From 2437d3879270f8febc5353e09898dd7d0aee08af Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 1 Aug 2018 09:54:39 +0200 +Subject: [PATCH] findif: only match lines containing netmasks + +--- + heartbeat/findif.sh | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh +index fc84cf0ec..66bc6d56a 100644 +--- a/heartbeat/findif.sh ++++ b/heartbeat/findif.sh +@@ -215,9 +215,9 @@ findif() + fi + if [ -n "$nic" ] ; then + # NIC supports more than two. +- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') ++ set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') + else +- set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') ++ set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') + fi + if [ $# = 0 ] ; then + case $OCF_RESKEY_ip in diff --git a/SOURCES/lvmlockd-add-cmirrord-support.patch b/SOURCES/lvmlockd-add-cmirrord-support.patch new file mode 100644 index 0000000..cfaf001 --- /dev/null +++ b/SOURCES/lvmlockd-add-cmirrord-support.patch @@ -0,0 +1,118 @@ +From d4c9de6264251e4dbc91b64aaf7f500919d08d60 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 17 Aug 2018 12:48:46 +0200 +Subject: [PATCH] lvmlockd: add cmirrord support + +--- + heartbeat/lvmlockd | 53 ++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 49 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd +index 7fe73e364..57f7fdc76 100755 +--- a/heartbeat/lvmlockd ++++ b/heartbeat/lvmlockd +@@ -59,6 +59,14 @@ For more information, refer to manpage lvmlockd.8. + This agent manages the lvmlockd daemon + + ++ ++ ++Start with cmirrord (cluster mirror log daemon). ++ ++activate cmirrord ++ ++ ++ + + pid file + pid file +@@ -102,6 +110,7 @@ END + : ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"} + + LOCKD="lvmlockd" ++CMIRRORD="cmirrord" + # 0.5s sleep each count + TIMEOUT_COUNT=20 + +@@ -138,6 +147,21 @@ silent_status() + + if [ -n "$pid" ] ; then + daemon_is_running "$pid" ++ rc=$? ++ mirror_rc=$rc ++ ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ pid=$(pgrep $CMIRRORD | head -n1) ++ daemon_is_running "$pid" ++ mirror_rc=$? ++ fi ++ ++ # If these ever don't match, return error to force recovery ++ if [ $mirror_rc -ne $rc ]; then ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $rc + else + # No pid file + false +@@ -199,6 +223,16 @@ lvmlockd_start() { + return $OCF_SUCCESS + fi + ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ ocf_log info "starting ${CMIRRORD}..." ++ $CMIRRORD ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ] ; then ++ ocf_exit_reason "Failed to start ${CMIRRORD}, exit code: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ + if [ ! -z "$OCF_RESKEY_socket_path" ] ; then + extras="$extras -s ${OCF_RESKEY_socket_path}" + fi +@@ -252,10 +286,11 @@ wait_lockspaces_close() + + kill_stop() + { +- local pid=$1 ++ local proc=$1 ++ local pid=$2 + local retries=0 + +- ocf_log info "Killing ${LOCKD} (pid=$pid)" ++ ocf_log info "Killing $proc (pid=$pid)" + while + daemon_is_running $pid && [ $retries -lt "$TIMEOUT_COUNT" ] + do +@@ -292,9 +327,15 @@ lvmlockd_stop() { + wait_lockspaces_close + + pid=$(get_pid) +- kill_stop $pid ++ kill_stop $LOCKD $pid ++ ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ pid=$(pgrep $CMIRRORD) ++ kill_stop $CMIRRORD $pid ++ fi ++ + if silent_status ; then +- ocf_exit_reason "Failed to stop, ${LOCKD}[$pid] still running." ++ ocf_exit_reason "Failed to stop, ${LOCKD} or ${CMIRRORD} still running." + return $OCF_ERR_GENERIC + fi + +@@ -317,6 +358,10 @@ lvmlockd_validate() { + check_binary pgrep + check_binary lvmlockctl + ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ check_binary $CMIRRORD ++ fi ++ + return $OCF_SUCCESS + } + diff --git a/SOURCES/metadata-add-missing-s-suffix.patch b/SOURCES/metadata-add-missing-s-suffix.patch new file mode 100644 index 0000000..62ab32b --- /dev/null +++ b/SOURCES/metadata-add-missing-s-suffix.patch @@ -0,0 +1,183 @@ +From 84083d83ff6049bcc99b959c00999496b3027317 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:30:26 +0200 +Subject: [PATCH 1/4] IPv6Addr/nagios/sybaseASE: add missing "s"-suffix in + metadata + +--- + heartbeat/IPv6addr.c | 12 ++++++------ + heartbeat/nagios | 2 +- + heartbeat/sybaseASE.in | 4 ++-- + 3 files changed, 9 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/IPv6addr.c b/heartbeat/IPv6addr.c +index 68447de2e..d8562559b 100644 +--- a/heartbeat/IPv6addr.c ++++ b/heartbeat/IPv6addr.c +@@ -863,12 +863,12 @@ meta_data_addr6(void) + " \n" + " \n" + " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" + " \n" + "\n"; + printf("%s\n",meta_data); +diff --git a/heartbeat/nagios b/heartbeat/nagios +index 4cb462f6a..3d07b141c 100755 +--- a/heartbeat/nagios ++++ b/heartbeat/nagios +@@ -114,7 +114,7 @@ nagios_meta_data() { + + + +- ++ + + + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index b4809ea23..9ddd429be 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -234,8 +234,8 @@ meta_data() + + + +- +- ++ ++ + + + + +From d4bba27b171cb87698359dd300313ba5a6600cca Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:34:41 +0200 +Subject: [PATCH 2/4] CI: improvements + +- added "check"-command to skip build-process +- added check for "s"-suffix in agents +- added additional file-types to also check ocf-* and C agents +--- + ci/build.sh | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/ci/build.sh b/ci/build.sh +index c331e9ab4..22f4472d3 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -51,7 +51,7 @@ find_prunes() { + } + + find_cmd() { +- echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' \) $(find_prunes)" ++ echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' -or -name '*.c' -or -name '*.in' \) $(find_prunes)" + } + + check_all_executables() { +@@ -59,6 +59,12 @@ check_all_executables() { + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue + file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue ++ file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue ++ ++ if grep -qE "\ +Date: Fri, 21 Sep 2018 11:41:40 +0200 +Subject: [PATCH 3/4] ocf-shellfuncs: fixes caught when improving CI + +--- + heartbeat/ocf-shellfuncs.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 8e44f09eb..043ab9bf2 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -457,7 +457,7 @@ ocf_pidfile_status() { + return 2 + fi + pid=`cat $pidfile` +- kill -0 $pid 2>&1 > /dev/null ++ kill -0 $pid > /dev/null 2>&1 + if [ $? = 0 ]; then + return 0 + fi +@@ -761,7 +761,7 @@ maketempfile() + { + if [ $# = 1 -a "$1" = "-d" ]; then + mktemp -d +- return -0 ++ return 0 + elif [ $# != 0 ]; then + return 1 + fi + +From d1579996d6f5aec57ece2bc31b106891d0bbb964 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:50:08 +0200 +Subject: [PATCH 4/4] CI: fix upstream CI not detecting MIME-format correctly + for Makefiles + +--- + ci/build.sh | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ci/build.sh b/ci/build.sh +index 22f4472d3..b900ddc05 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -59,7 +59,8 @@ check_all_executables() { + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue + file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue +- file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue ++ # upstream CI doesnt detect MIME-format correctly for Makefiles ++ [[ "$script" =~ .*/Makefile.in ]] && continue + + if grep -qE "\ +Date: Fri, 20 May 2016 15:47:33 +0200 +Subject: [PATCH 1/2] nfsserver: mount based on rpcpipefs_dir variable + +--- + heartbeat/nfsserver | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 3d036a98a..479082169 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -177,14 +177,8 @@ esac + fp="$OCF_RESKEY_nfs_shared_infodir" + : ${OCF_RESKEY_nfs_notify_cmd="$DEFAULT_NOTIFY_CMD"} + : ${OCF_RESKEY_nfs_notify_foreground="$DEFAULT_NOTIFY_FOREGROUND"} +- +-if [ -z ${OCF_RESKEY_rpcpipefs_dir} ]; then +- rpcpipefs_make_dir=$fp/rpc_pipefs +- rpcpipefs_umount_dir=${DEFAULT_RPCPIPEFS_DIR} +-else +- rpcpipefs_make_dir=${OCF_RESKEY_rpcpipefs_dir} +- rpcpipefs_umount_dir=${OCF_RESKEY_rpcpipefs_dir} +-fi ++: ${OCF_RESKEY_rpcpipefs_dir="$DEFAULT_RPCPIPEFS_DIR"} ++OCF_RESKEY_rpcpipefs_dir=${OCF_RESKEY_rpcpipefs_dir%/} + + # Use statd folder if it exists + if [ -d "/var/lib/nfs/statd" ]; then +@@ -409,7 +403,7 @@ prepare_directory () + fi + + [ -d "$fp" ] || mkdir -p $fp +- [ -d "$rpcpipefs_make_dir" ] || mkdir -p $rpcpipefs_make_dir ++ [ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir + [ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery + + [ -d "$fp/$STATD_DIR" ] || mkdir -p "$fp/$STATD_DIR" +@@ -453,9 +447,13 @@ bind_tree () + + unbind_tree () + { +- if `mount | grep -q " on $rpcpipefs_umount_dir"`; then +- umount -t rpc_pipefs $rpcpipefs_umount_dir +- fi ++ local i=1 ++ while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do ++ ocf_log info "Stop: umount ($i/10 attempts)" ++ umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir ++ sleep 1 ++ i=$((i + 1)) ++ done + if is_bound /var/lib/nfs; then + umount /var/lib/nfs + fi +@@ -617,6 +615,8 @@ nfsserver_start () + prepare_directory + bind_tree + ++ mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ + # remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot. + rm -f /var/run/sm-notify.pid + # + +From c92e8c84b73dde3254f53665a0ef3603418538dc Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 27 Sep 2018 16:09:09 +0200 +Subject: [PATCH 2/2] nfsserver: only mount rpc_pipefs if it's not mounted + +also added space to avoid matching similarly named mounts +--- + heartbeat/nfsserver | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 479082169..5412f391b 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -448,7 +448,7 @@ bind_tree () + unbind_tree () + { + local i=1 +- while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do ++ while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "` && [ "$i" -le 10 ]; do + ocf_log info "Stop: umount ($i/10 attempts)" + umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir + sleep 1 +@@ -615,7 +615,9 @@ nfsserver_start () + prepare_directory + bind_tree + +- mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then ++ mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ fi + + # remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot. + rm -f /var/run/sm-notify.pid diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch new file mode 100644 index 0000000..12b7ad5 --- /dev/null +++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch @@ -0,0 +1,747 @@ +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2018-06-27 13:22:31.576628598 +0200 ++++ b/doc/man/Makefile.am 2018-06-27 13:47:15.902753673 +0200 +@@ -75,6 +75,8 @@ + ocf_heartbeat_ManageRAID.7 \ + ocf_heartbeat_ManageVE.7 \ + ocf_heartbeat_NodeUtilization.7 \ ++ ocf_heartbeat_nova-compute-wait.7 \ ++ ocf_heartbeat_NovaEvacuate.7 \ + ocf_heartbeat_Pure-FTPd.7 \ + ocf_heartbeat_Raid1.7 \ + ocf_heartbeat_Route.7 \ +diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2018-06-27 13:22:31.574628625 +0200 ++++ b/heartbeat/Makefile.am 2018-06-27 13:46:23.621453631 +0200 +@@ -29,6 +29,8 @@ + + ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat + ++ospdir = $(OCF_RA_DIR_PREFIX)/openstack ++ + dtddir = $(datadir)/$(PACKAGE_NAME) + dtd_DATA = ra-api-1.dtd metadata.rng + +@@ -50,6 +52,9 @@ + IPv6addr_LDADD = -lplumb $(LIBNETLIBS) + send_ua_LDADD = $(LIBNETLIBS) + ++osp_SCRIPTS = nova-compute-wait \ ++ NovaEvacuate ++ + ocf_SCRIPTS = AoEtarget \ + AudibleAlarm \ + ClusterMon \ +diff -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait +--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/nova-compute-wait 2018-06-27 13:27:15.166830889 +0200 +@@ -0,0 +1,317 @@ ++#!/bin/sh ++# Copyright 2015 Red Hat, Inc. ++# ++# Description: Manages compute daemons ++# ++# Authors: Andrew Beekhof ++# ++# Support: openstack@lists.openstack.org ++# License: Apache Software License (ASL) 2.0 ++# ++ ++ ++####################################################################### ++# Initialization: ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++OpenStack Nova Compute Server. ++ ++OpenStack Nova Compute Server ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++ ++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN ++ ++DNS domain ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++ ++How long to wait for nova to finish evacuating instances elsewhere ++before starting nova-compute. Only used when the agent detects ++evacuations might be in progress. ++ ++You may need to increase the start timeout when increasing this value. ++ ++Delay to allow evacuations time to complete ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++nova_usage() { ++ cat </run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf ++[Service] ++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST} ++EOF ++} ++ ++nova_validate() { ++ rc=$OCF_SUCCESS ++ ++ check_binary crudini ++ check_binary nova-compute ++ check_binary fence_compute ++ ++ if [ ! -f /etc/nova/nova.conf ]; then ++ ocf_exit_reason "/etc/nova/nova.conf not found" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ # Is the state directory writable? ++ state_dir=$(dirname $statefile) ++ touch "$state_dir/$$" ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Invalid state directory: $state_dir" ++ return $OCF_ERR_ARGS ++ fi ++ rm -f "$state_dir/$$" ++ ++ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null) ++ if [ $? = 1 ]; then ++ short_host=$(uname -n | awk -F. '{print $1}') ++ if [ "x${OCF_RESKEY_domain}" != x ]; then ++ NOVA_HOST=${short_host}.${OCF_RESKEY_domain} ++ else ++ NOVA_HOST=$(uname -n) ++ fi ++ fi ++ ++ if [ $rc != $OCF_SUCCESS ]; then ++ exit $rc ++ fi ++ return $rc ++} ++ ++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" ++ ++: ${OCF_RESKEY_evacuation_delay=120} ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS ++ ;; ++usage|help) nova_usage ++ exit $OCF_SUCCESS ++ ;; ++esac ++ ++case $__OCF_ACTION in ++start) nova_validate; nova_start;; ++stop) nova_stop;; ++monitor) nova_validate; nova_monitor;; ++notify) nova_notify;; ++validate-all) exit $OCF_SUCCESS;; ++*) nova_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc ++ +diff -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/NovaEvacuate 2018-06-27 13:27:18.835781756 +0200 +@@ -0,0 +1,388 @@ ++#!/bin/sh ++# ++# Copyright 2015 Red Hat, Inc. ++# ++# Description: Manages evacuation of nodes running nova-compute ++# ++# Authors: Andrew Beekhof ++# ++# Support: openstack@lists.openstack.org ++# License: Apache Software License (ASL) 2.0 ++# ++ ++ ++####################################################################### ++# Initialization: ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged. ++ ++Evacuator for OpenStack Nova Compute Server ++ ++ ++ ++ ++ ++Authorization URL for connecting to keystone in admin context ++ ++Authorization URL ++ ++ ++ ++ ++ ++Username for connecting to keystone in admin context ++ ++Username ++ ++ ++ ++ ++Password for connecting to keystone in admin context ++ ++Password ++ ++ ++ ++ ++ ++Tenant name for connecting to keystone in admin context. ++Note that with Keystone V3 tenant names are only unique within a domain. ++ ++Keystone v2 Tenant or v3 Project Name ++ ++ ++ ++ ++ ++User's domain name. Used when authenticating to Keystone. ++ ++Keystone v3 User Domain ++ ++ ++ ++ ++ ++Domain name containing project. Used when authenticating to Keystone. ++ ++Keystone v3 Project Domain ++ ++ ++ ++ ++ ++Nova API location (internal, public or admin URL) ++ ++Nova API location (internal, public or admin URL) ++ ++ ++ ++ ++ ++Region name for connecting to nova. ++ ++Region name ++ ++ ++ ++ ++ ++Explicitly allow client to perform "insecure" TLS (https) requests. ++The server's certificate will not be verified against any certificate authorities. ++This option should be used with caution. ++ ++Allow insecure TLS requests ++ ++ ++ ++ ++ ++Disable shared storage recovery for instances. Use at your own risk! ++ ++Disable shared storage recovery for instances ++ ++ ++ ++ ++ ++Enable extra logging from the evacuation process ++ ++Enable debug logging ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++evacuate_usage() { ++ cat < 0: + filename = keyValues['--filename'][0] + else: +- print "A profile is needed! please use \'--filename\' and add the profile name." ++ print("A profile is needed! please use \'--filename\' and add the profile name.") + return filename + + def getInstanceCount(self,keyValues): +@@ -25,7 +25,7 @@ + if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: + count = keyValues['--instancecount'][0] + else: +- print "InstanceCount should be a positive number! The default value(1) will be used!" ++ print("InstanceCount should be a positive number! The default value(1) will be used!") + return int(count) + + def getSubOperations(self,cmd,operation): +@@ -65,8 +65,8 @@ + _newkeyValues["RegionId"] = newkeyValues["RegionId"] + self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest) + else: +- print "InstanceId is need!" +- except Exception,e: ++ print("InstanceId is need!") ++ except Exception as e: + print(e) + + def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False): +@@ -81,7 +81,7 @@ + response.display_response("error", result, "json") + else: + response.display_response(extraOperation, result, "json") +- except Exception,e: ++ except Exception as e: + print(e) + + +@@ -127,7 +127,7 @@ + ''' + if data.has_key('InstanceId') and len(data['InstanceId']) > 0: + instanceId = data['InstanceId'] +- except Exception,e: ++ except Exception as e: + pass + finally: + return instanceId +@@ -156,5 +156,5 @@ + if __name__ == "__main__": + handler = EcsImportHandler() + handler.getKVFromJson('ttt') +- print handler.getKVFromJson('ttt') ++ print(handler.getKVFromJson('ttt')) + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200 +@@ -77,8 +77,8 @@ + if not filename == None: + self.exportInstanceToFile(result,filename) + else: +- print 'Filename is needed' +- except Exception,e: ++ print('Filename is needed') ++ except Exception as e: + print(e) + + def exportInstanceToFile(self, result, filename): +@@ -96,9 +96,9 @@ + fp = open(fileName,'w') + try : + fp.write(json.dumps(result,indent=4)) +- print "success" ++ print("success") + except IOError: +- print "Error: can\'t find file or read data" ++ print("Error: can\'t find file or read data") + finally: + fp.close() + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200 +@@ -26,7 +26,7 @@ + count = keyValues[import_count][0] + else: + pass +- # print "InstanceCount should be a positive number! The default value(1) will be used!" ++ # print("InstanceCount should be a positive number! The default value(1) will be used!") + return int(count), "InstanceCount is "+str(count)+" created." + + def getSubOperations(self,cmd,operation): +@@ -46,7 +46,7 @@ + if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues): + newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()] + newkeyValues["ClientToken"] = [self.random_str()] +- # print newkeyValues.keys() ++ # print(newkeyValues.keys()) + # return + # self._setAttr(cmdInstance, newkeyValues) # set all key values in instance + # self.apiHandler.changeEndPoint(cmdInstance, newkeyValues) +@@ -58,7 +58,7 @@ + response.display_response("error", result, "json") + else: + response.display_response(item, result, "json") +- except Exception,e: ++ except Exception as e: + print(e) + + def getKVFromJson(self,filename): +@@ -77,7 +77,7 @@ + fp = open(fileName,'r') + data=json.loads(fp.read()) + keys = data.keys() +- # print keys, type(data['Items']['DBInstanceAttribute'][0]) ++ # print(keys, type(data['Items']['DBInstanceAttribute'][0])) + # instanceAttribute = data['Items']['DBInstanceAttribute'][0] + items = data['Items']['DBInstanceAttribute'][0] + keys = items.keys() +@@ -130,7 +130,7 @@ + if __name__ == "__main__": + handler = RdsImportDBInstanceHandler() + # handler.getKVFromJson('ttt') +- # print handler.getKVFromJson('ttt') +- print handler.random_str() ++ # print(handler.getKVFromJson('ttt')) ++ print(handler.random_str()) + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200 +@@ -24,9 +24,9 @@ + _value = keyValues[ProfileCmd.name][0] # use the first value + self.extensionCliHandler.setUserProfile(_value) + else: +- print "Do your forget profile name? please use \'--name\' and add the profile name." ++ print("Do your forget profile name? please use \'--name\' and add the profile name.") + else: +- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?" ++ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?") + + def addProfileCmd(self, cmd, keyValues): + userKey = '' +@@ -52,12 +52,12 @@ + finally: + f.close() + else: +- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?" ++ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?") + + + if __name__ == "__main__": + handler = ProfileHandler() + handler.handleProfileCmd("useprofile", {'--name':["profile444"]}) +- print handler.extensionCliHandler.getUserProfile() ++ print(handler.extensionCliHandler.getUserProfile()) + handler.addProfileCmd("addProfile", {}) +- handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) +\ No newline at end of file ++ handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200 +@@ -24,14 +24,14 @@ + self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler() + + def showUsage(self): +- print "usage: aliyuncli [options and parameters]" ++ print("usage: aliyuncli [options and parameters]") + + def showExample(self): +- print "show example" ++ print("show example") + + def showCmdError(self, cmd): + self.showUsage() +- print " the valid command as follows:\n" ++ print(" the valid command as follows:\n") + cmds = self.openApiDataHandler.getApiCmds() + self.printAsFormat(cmds) + +@@ -44,7 +44,7 @@ + error.printInFormat("Wrong version", "The sdk version is not exit.") + return None + self.showUsage() +- print "["+cmd+"]","valid operations as follows:\n" ++ print("["+cmd+"]","valid operations as follows:\n") + operations = self.openApiDataHandler.getApiOperations(cmd, version) + extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd) + operations.update(extensions) +@@ -56,8 +56,8 @@ + self.printAsFormat(operations) + + def showParameterError(self, cmd, operation, parameterlist): +- print 'usage: aliyuncli [options and parameters]' +- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n' ++ print('usage: aliyuncli [options and parameters]') ++ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n') + self.printAsFormat(parameterlist) + pass + +@@ -72,7 +72,7 @@ + tmpList.append(item) + count = count+1 + if len(tmpList) == 2: +- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10') ++ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')) + tmpList = list() + if len(tmpList) == 1 and count == len(mlist): +- print tmpList[0] +\ No newline at end of file ++ print(tmpList[0]) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200 +@@ -91,7 +91,7 @@ + keyValues["RegionId"] = [self.extensionHandler.getUserRegion()] + #check necessaryArgs as:accesskeyid accesskeysecret regionId + if not self.handler.hasNecessaryArgs(keyValues): +- print 'accesskeyid/accesskeysecret/regionId is absence' ++ print('accesskeyid/accesskeysecret/regionId is absence') + return + result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest) + if result is None: +@@ -102,7 +102,7 @@ + else: + response.display_response(operation, result, outPutFormat,keyValues) + else: +- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com' ++ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com') + elif self.handler.isAvailableExtensionOperation(cmd, operation): + if self.args.__len__() >= 3 and self.args[2] == 'help': + import commandConfigure +@@ -125,7 +125,7 @@ + def showInstanceAttribute(self, cmd, operation, classname): + if self.args.__len__() >= 3 and self.args[2] == "help": + self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname)) +- #print self.completer._help_to_show_instance_attribute(cmdInstance) ++ #print(self.completer._help_to_show_instance_attribute(cmdInstance)) + return True + return False + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200 +@@ -141,7 +141,7 @@ + _key = keyValues[keystr][0] + if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: + _secret = keyValues[secretstr][0] +- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret ++ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) + return _key, _secret + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200 +@@ -161,12 +161,12 @@ + + if __name__ == "__main__": + upgradeHandler = aliyunCliUpgradeHandler() +- # print upgradeHandler.getLatestTimeFromServer() ++ # print(upgradeHandler.getLatestTimeFromServer()) + # flag, url = upgradeHandler.isNewVersionReady() + # if flag: +- # print url ++ # print(url) + # else: +- # print "current version is latest one" +- # print "final test:" +- print upgradeHandler.checkForUpgrade() +- print upgradeHandler.handleUserChoice("N") ++ # print("current version is latest one") ++ # print("final test:") ++ print(upgradeHandler.checkForUpgrade()) ++ print(upgradeHandler.handleUserChoice("N")) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200 +@@ -127,35 +127,35 @@ + + # this api will show help page when user input aliyuncli help(-h or --help) + def showAliyunCliHelp(self): +- print color.bold+"ALIYUNCLI()"+color.end +- print color.bold+"\nNAME"+color.end +- print "\taliyuncli -" +- print color.bold+"\nDESCRIPTION"+color.end +- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. " +- print color.bold+"\nSYNOPSIS"+color.end +- print "\taliyuncli [options and parameters]" +- print "\n\taliyuncli has supported command completion now. The detail you can check our site." +- print color.bold+"OPTIONS"+color.end +- print color.bold+"\tconfigure"+color.end +- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)" +- print color.bold+"\n\t--output"+color.end+" (string)" +- print "\n\tThe formatting style for command output." +- print "\n\to json" +- print "\n\to text" +- print "\n\to table" ++ print(color.bold+"ALIYUNCLI()"+color.end) ++ print(color.bold+"\nNAME"+color.end) ++ print("\taliyuncli -") ++ print(color.bold+"\nDESCRIPTION"+color.end) ++ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ") ++ print(color.bold+"\nSYNOPSIS"+color.end) ++ print("\taliyuncli [options and parameters]") ++ print("\n\taliyuncli has supported command completion now. The detail you can check our site.") ++ print(color.bold+"OPTIONS"+color.end) ++ print(color.bold+"\tconfigure"+color.end) ++ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)") ++ print(color.bold+"\n\t--output"+color.end+" (string)") ++ print("\n\tThe formatting style for command output.") ++ print("\n\to json") ++ print("\n\to text") ++ print("\n\to table") + +- print color.bold+"\n\t--secure"+color.end +- print "\n\tMaking secure requests(HTTPS) to service" ++ print(color.bold+"\n\t--secure"+color.end) ++ print("\n\tMaking secure requests(HTTPS) to service") + +- print color.bold+"\nAVAILABLE SERVICES"+color.end +- print "\n\to ecs" +- print "\n\to ess" +- print "\n\to mts" +- print "\n\to rds" +- print "\n\to slb" ++ print(color.bold+"\nAVAILABLE SERVICES"+color.end) ++ print("\n\to ecs") ++ print("\n\to ess") ++ print("\n\to mts") ++ print("\n\to rds") ++ print("\n\to slb") + + def showCurrentVersion(self): +- print self._version ++ print(self._version) + + def findConfigureFilePath(self): + homePath = "" +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200 +@@ -39,9 +39,9 @@ + + + def oss_notice(): +- print "OSS operation in aliyuncli is not supported." +- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation." +- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n" ++ print("OSS operation in aliyuncli is not supported.") ++ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.") ++ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n") + + + try: +@@ -391,22 +391,22 @@ + return jsonobj + + except ImportError as e: +- print module, 'is not exist!' ++ print(module, 'is not exist!') + sys.exit(1) + + except ServerException as e: + error = cliError.error() + error.printInFormat(e.get_error_code(), e.get_error_msg()) +- print "Detail of Server Exception:\n" +- print str(e) ++ print("Detail of Server Exception:\n") ++ print(str(e)) + sys.exit(1) + + except ClientException as e: +- # print e.get_error_msg() ++ # print(e.get_error_msg()) + error = cliError.error() + error.printInFormat(e.get_error_code(), e.get_error_msg()) +- print "Detail of Client Exception:\n" +- print str(e) ++ print("Detail of Client Exception:\n") ++ print(str(e)) + sys.exit(1) + + def getSetFuncs(self,classname): +@@ -549,6 +549,6 @@ + + if __name__ == '__main__': + handler = aliyunOpenApiDataHandler() +- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance') +- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances') +- print "###############",handler.getExtensionOperationsFromCmd('ecs') ++ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')) ++ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances')) ++ print("###############",handler.getExtensionOperationsFromCmd('ecs')) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200 +@@ -44,7 +44,7 @@ + filename=self.fileName + self.writeCmdVersionToFile(cmd,version,filename) + else: +- print "A argument is needed! please use \'--version\' and add the sdk version." ++ print("A argument is needed! please use \'--version\' and add the sdk version.") + return + def showVersions(self,cmd,operation,stream=None): + configureVersion='(not configure)' +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200 +@@ -55,7 +55,7 @@ + # _mlist = self.rds.extensionOptions[self.rds.exportDBInstance] + self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance]) + if operation.lower() == self.rds.importDBInstance.lower(): +- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance]) ++ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance])) + # parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance]) + self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance]) + +@@ -89,8 +89,8 @@ + importInstance:['count','filename']} + + if __name__ == '__main__': +- # print type(rds.extensionOperations) +- # print type(rds.extensionOptions) +- # print rds.extensionOptions['ll'] ++ # print(type(rds.extensionOperations)) ++ # print(type(rds.extensionOptions)) ++ # print(rds.extensionOptions['ll']) + configure = commandConfigure() +- print configure.showExtensionOperationHelp("ecs", "ExportInstance") ++ print(configure.showExtensionOperationHelp("ecs", "ExportInstance")) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200 +@@ -577,7 +577,7 @@ + operation = operations[i].strip() + self._getKeyFromSection(profilename,operation) + else: +- print 'The correct usage:aliyuncli configure get key --profile profilename' ++ print('The correct usage:aliyuncli configure get key --profile profilename') + return + + def _getKeyFromSection(self,profilename,key): +@@ -591,7 +591,7 @@ + elif key in _WRITE_TO_CONFIG_FILE : + self._getKeyFromFile(config_filename,sectionName,key) + else: +- print key,'=','None' ++ print(key,'=','None') + def _getKeyFromFile(self,filename,section,key): + if os.path.isfile(filename): + with open(filename, 'r') as f: +@@ -600,9 +600,9 @@ + start = self._configWriter.hasSectionName(section,contents)[1] + end = self._configWriter._getSectionEnd(start,contents) + value = self._configWriter._getValueInSlice(start,end,key,contents) +- print key,'=',value ++ print(key,'=',value) + else: +- print key,'=None' ++ print(key,'=None') + + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200 +@@ -2,7 +2,7 @@ + + def handleEndPoint(cmd,operation,keyValues): + if not hasNecessaryArgs(keyValues): +- print 'RegionId/EndPoint is absence' ++ print('RegionId/EndPoint is absence') + return + if cmd is not None: + cmd = cmd.capitalize() +@@ -25,7 +25,7 @@ + from aliyunsdkcore.profile.region_provider import modify_point + modify_point(cmd,regionId,endPoint) + except Exception as e: +- print e ++ print(e) + pass + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200 +@@ -111,14 +111,14 @@ + if os.path.isfile(cfgfile): + ans = raw_input('File existed. Do you wish to overwrite it?(y/n)') + if ans.lower() != 'y': +- print 'Answer is No. Quit now' ++ print('Answer is No. Quit now') + return + with open(cfgfile, 'w+') as f: + config.write(f) +- print 'Your configuration is saved to %s.' % cfgfile ++ print('Your configuration is saved to %s.' % cfgfile) + + def cmd_help(args): +- print HELP ++ print(HELP) + + def add_config(parser): + parser.add_argument('--host', type=str, help='service host') +@@ -161,7 +161,7 @@ + return CMD_LIST.keys() + def handleOas(pars=None): + if pars is None: +- print HELP ++ print(HELP) + sys.exit(0) + parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter) + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200 +@@ -61,7 +61,7 @@ + data = f.read() + return data + except (OSError, IOError) as e: +- print e ++ print(e) + def _getParamFromUrl(prefix,value,mode): + + req = urllib2.Request(value) +@@ -74,7 +74,7 @@ + errorMsg='Get the wrong content' + errorClass.printInFormat(response.getcode(), errorMsg) + except Exception as e: +- print e ++ print(e) + + PrefixMap = {'file://': _getParamFromFile, + 'fileb://': _getParamFromFile +@@ -86,4 +86,4 @@ + 'fileb://': {'mode': 'rb'}, + #'http://': {}, + #'https://': {} +- } +\ No newline at end of file ++ } +diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py +--- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100 ++++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200 +@@ -16,10 +16,10 @@ + 3a4 + """ + colorama.init() +- print "aaa" +- print "aaa" +- print "aaa" +- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4" ++ print("aaa") ++ print("aaa") ++ print("aaa") ++ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4") + + + if __name__ == '__main__': +diff -uNr a/bundled/aliyun/pycryptodome/Doc/conf.py b/bundled/aliyun/pycryptodome/Doc/conf.py +--- a/bundled/aliyun/pycryptodome/Doc/conf.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/Doc/conf.py 2018-10-08 12:08:11.122188094 +0200 +@@ -15,7 +15,7 @@ + + # Modules to document with autodoc are in another directory + sys.path.insert(0, os.path.abspath('../lib')) +-print sys.path ++print(sys.path) + + # Mock existance of native modules + from Crypto.Util import _raw_api +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-10-08 12:08:11.123188075 +0200 +@@ -302,7 +302,7 @@ + randfunc = kwargs.pop("randfunc", None) + prime_filter = kwargs.pop("prime_filter", lambda x: True) + if kwargs: +- print "Unknown parameters:", kwargs.keys() ++ print("Unknown parameters:", kwargs.keys()) + + if exact_bits is None: + raise ValueError("Missing exact_bits parameter") +@@ -341,7 +341,7 @@ + exact_bits = kwargs.pop("exact_bits", None) + randfunc = kwargs.pop("randfunc", None) + if kwargs: +- print "Unknown parameters:", kwargs.keys() ++ print("Unknown parameters:", kwargs.keys()) + + if randfunc is None: + randfunc = Random.new().read +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-10-08 12:08:11.124188057 +0200 +@@ -912,4 +912,4 @@ + count = 30 + for x in xrange(count): + _ = point * d +- print (time.time() - start) / count * 1000, "ms" ++ print((time.time() - start) / count * 1000, "ms") +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-10-08 12:08:11.124188057 +0200 +@@ -1276,7 +1276,7 @@ + tests += make_block_tests(AES, "AESNI", test_data, {'use_aesni': True}) + tests += [ TestMultipleBlocks(True) ] + else: +- print "Skipping AESNI tests" ++ print("Skipping AESNI tests") + return tests + + if __name__ == '__main__': +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-10-08 12:08:11.125188038 +0200 +@@ -894,7 +894,7 @@ + if config.get('slow_tests'): + tests += list_test_cases(NISTTestVectorsGCM_no_clmul) + else: +- print "Skipping test of PCLMULDQD in AES GCM" ++ print("Skipping test of PCLMULDQD in AES GCM") + + return tests + +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-10-08 12:08:11.125188038 +0200 +@@ -39,7 +39,7 @@ + """Convert a text string with bytes in hex form to a byte string""" + clean = b(rws(t)) + if len(clean)%2 == 1: +- print clean ++ print(clean) + raise ValueError("Even number of characters expected") + return a2b_hex(clean) + +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-10-08 12:08:11.126188020 +0200 +@@ -25,11 +25,11 @@ + + slow_tests = not "--skip-slow-tests" in sys.argv + if not slow_tests: +- print "Skipping slow tests" ++ print("Skipping slow tests") + + wycheproof_warnings = "--wycheproof-warnings" in sys.argv + if wycheproof_warnings: +- print "Printing Wycheproof warnings" ++ print("Printing Wycheproof warnings") + + config = {'slow_tests' : slow_tests, 'wycheproof_warnings' : wycheproof_warnings } + SelfTest.run(stream=sys.stdout, verbosity=1, config=config) +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-10-08 12:08:11.126188020 +0200 +@@ -369,13 +369,13 @@ + ] + + for key, words in data: +- print 'Trying key', key ++ print('Trying key', key) + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: +- print 'key_to_english fails on key', repr(key), ', producing', str(w2) ++ print('key_to_english fails on key', repr(key), ', producing', str(w2)) + k2=english_to_key(words) + if k2!=key: +- print 'english_to_key fails on key', repr(key), ', producing', repr(k2) ++ print('english_to_key fails on key', repr(key), ', producing', repr(k2)) diff --git a/SOURCES/timeout-interval-add-s-suffix.patch b/SOURCES/timeout-interval-add-s-suffix.patch new file mode 100644 index 0000000..74f584d --- /dev/null +++ b/SOURCES/timeout-interval-add-s-suffix.patch @@ -0,0 +1,161 @@ +From 1c23bbf9700eda44d0d64f34bcb538d7b9e4f6f6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 4 Sep 2018 09:19:59 +0200 +Subject: [PATCH] timeout/interval: add "s" suffix where it's missing + +--- + .gitignore | 1 + + heartbeat/SAPInstance | 2 +- + heartbeat/aliyun-vpc-move-ip | 10 +++++----- + heartbeat/gcp-vpc-move-vip.in | 10 +++++----- + heartbeat/mariadb.in | 22 +++++++++++----------- + heartbeat/sybaseASE.in | 32 ++++++++++++++++---------------- + 6 files changed, 39 insertions(+), 38 deletions(-) + +diff --git a/.gitignore b/.gitignore +index bbff032c3..3a9be36e5 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -44,6 +44,7 @@ heartbeat/ocf-directories + heartbeat/ocf-shellfuncs + heartbeat/send_ua + heartbeat/shellfuncs ++heartbeat/*.pyc + include/agent_config.h + include/config.h + include/config.h.in +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index e27952adb..ed446c9c1 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -155,11 +155,11 @@ Valid Aliyun CLI profile name + + + +- +- +- +- +- ++ ++ ++ ++ ++ + + + END +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index ba61193b6..31d84643a 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -77,11 +77,11 @@ METADATA = \ + + + +- +- +- +- +- ++ ++ ++ ++ ++ + + ''' + +diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in +index 860fea7fd..c1969d70e 100644 +--- a/heartbeat/mariadb.in ++++ b/heartbeat/mariadb.in +@@ -250,17 +250,17 @@ The port on which the Master MariaDB instance is listening. + + + +- +- +- +- +- +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + END +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index a4a0b7a0c..b4809ea23 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -26,19 +26,19 @@ + # /$sybase_home/$sybase_ase/install/RUN_$server_name + # + # (2) You can customize the interval value in the meta-data section if needed: +-# +-# ++# ++# + # + # +-# +-# ++# ++# + # + # +-# +-# ++# ++# + # +-# +-# ++# ++# + # The timeout value is not supported by Redhat in RHCS5.0. + # + +@@ -226,19 +226,19 @@ meta_data() + + + +- +- ++ ++ + + +- +- ++ ++ + + +- +- ++ ++ + +- +- ++ ++ + + + EOT diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec new file mode 100644 index 0000000..126d567 --- /dev/null +++ b/SPECS/resource-agents.spec @@ -0,0 +1,1881 @@ +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. +# + +# Below is the script used to generate a new source file +# from the resource-agent upstream git repo. +# +# TAG=$(git log --pretty="format:%h" -n 1) +# distdir="ClusterLabs-resource-agents-${TAG}" +# TARFILE="${distdir}.tar.gz" +# rm -rf $TARFILE $distdir +# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE +# + +%global upstream_prefix ClusterLabs-resource-agents +%global upstream_version e711383f + +# bundles +%global bundled_lib_dir bundled +## google cloud +# google-cloud-sdk bundle +%global googlecloudsdk google-cloud-sdk +%global googlecloudsdk_version 241.0.0 +%global googlecloudsdk_dir %{bundled_lib_dir}/gcp/%{googlecloudsdk} +# python-httplib2 bundle +%global httplib2 httplib2 +%global httplib2_version 0.18.1 +%global httplib2_dir %{bundled_lib_dir}/gcp/%{httplib2} +# python-pyroute2 bundle +%global pyroute2 pyroute2 +%global pyroute2_version 0.4.13 +%global pyroute2_dir %{bundled_lib_dir}/gcp/%{pyroute2} +## alibaba cloud +# python-colorama bundle +%global colorama colorama +%global colorama_version 0.3.3 +%global colorama_dir %{bundled_lib_dir}/aliyun/%{colorama} +# python-pycryptodome bundle +%global pycryptodome pycryptodome +%global pycryptodome_version 3.6.4 +%global pycryptodome_dir %{bundled_lib_dir}/aliyun/%{pycryptodome} +# python-aliyun-sdk-core bundle +%global aliyunsdkcore aliyun-python-sdk-core +%global aliyunsdkcore_version 2.13.1 +%global aliyunsdkcore_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkcore} +# python-aliyun-sdk-ecs bundle +%global aliyunsdkecs aliyun-python-sdk-ecs +%global aliyunsdkecs_version 4.9.3 +%global aliyunsdkecs_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkecs} +# python-aliyun-sdk-vpc bundle +%global aliyunsdkvpc aliyun-python-sdk-vpc +%global aliyunsdkvpc_version 3.0.2 +%global aliyunsdkvpc_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc} +# aliyuncli bundle +%global aliyuncli aliyun-cli +%global aliyuncli_version 2.1.10 +%global aliyuncli_dir %{bundled_lib_dir}/aliyun/%{aliyuncli} + +# determine the ras-set to process based on configure invokation +%bcond_with rgmanager +%bcond_without linuxha + +Name: resource-agents +Summary: Open Source HA Reusable Cluster Resource Scripts +Version: 4.1.1 +Release: 68%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.1 +License: GPLv2+ and LGPLv2+ +URL: https://github.com/ClusterLabs/resource-agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Source0: %{upstream_prefix}-%{upstream_version}.tar.gz +Source1: %{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz +Source2: %{httplib2}-%{httplib2_version}.tar.gz +Source3: %{pyroute2}-%{pyroute2_version}.tar.gz +Source4: %{colorama}-%{colorama_version}.tar.gz +Source5: %{pycryptodome}-%{pycryptodome_version}.tar.gz +Source6: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz +Source7: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz +Source8: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz +Source9: %{aliyuncli}-%{aliyuncli_version}.tar.gz +Patch0: nova-compute-wait-NovaEvacuate.patch +Patch1: LVM-volume_group_check_only.patch +Patch2: bz1552330-vdo-vol.patch +Patch3: IPaddr2-monitor_retries.patch +Patch4: VirtualDomain-stateless-support.patch +Patch5: 1-configure-add-python-path-detection.patch +Patch6: 2-ci-skip-python-agents-in-shellcheck.patch +Patch7: 3-gcp-vpc-move-vip.patch +Patch8: 4-gcp-vpc-move-route.patch +Patch9: 5-python-library.patch +Patch10: dont-use-ocf_attribute_target-for-metadata.patch +Patch11: LVM-activate-fix-issue-with-dashes.patch +Patch12: 6-gcp-move-vip-filter-aggregatedlist.patch +Patch13: aliyun-vpc-move-ip-1.patch +Patch14: aliyun-vpc-move-ip-2-fixes.patch +Patch15: aliyun-vpc-move-ip-3-fix-manpage.patch +Patch16: build-add-missing-manpages.patch +Patch17: findif-only-match-lines-with-netmasks.patch +Patch18: 7-gcp-stackdriver-logging-note.patch +Patch19: LVM-fix-missing-dash.patch +Patch20: lvmlockd-add-cmirrord-support.patch +Patch21: LVM-activate-1-warn-vg_access_mode.patch +Patch22: bz1607607-podman.patch +Patch23: aliyun-vpc-move-ip-5-improve-metadata-manpage.patch +Patch24: aws-vpc-move-ip-1-avoid-false-positive-monitor.patch +Patch25: aws-vpc-move-ip-2-avoid-false-positive-monitor.patch +Patch26: LVM-activate-2-parameters-access-mode-fixes.patch +Patch27: timeout-interval-add-s-suffix.patch +Patch28: metadata-add-missing-s-suffix.patch +Patch29: bz1631291-systemd-tmpfiles-configurable-path.patch +Patch30: nfsserver-mount-rpc_pipefs.patch +Patch31: bz1635785-redis-pidof-basename.patch +Patch32: bz1642027-nfsserver-var-lib-nfs-fix.patch +Patch33: bz1662466-vdo-vol-fix-monitor-action.patch +Patch34: bz1643307-LVM-activate-dont-fail-initial-probe.patch +Patch35: bz1658664-LVM-activate-dont-require-locking_type.patch +Patch36: bz1689184-Squid-1-fix-pidfile-issue.patch +Patch37: bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch +Patch38: bz1667414-2-LVM-activate-only-count-volumes.patch +Patch39: bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch +Patch40: bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch +Patch41: bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch +Patch42: bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch +Patch43: bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch +Patch44: bz1669140-Route-make-family-parameter-optional.patch +Patch45: bz1683548-redis-mute-password-warning.patch +Patch46: bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch +Patch47: bz1689184-Squid-2-dont-run-pgrep-without-PID.patch +Patch48: bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch +Patch49: bz1707969-2-ocf_is_true-add-True-to-regexp.patch +Patch50: bz1717759-Filesystem-remove-notify-action-from-metadata.patch +Patch51: bz1719684-dhcpd-keep-SELinux-context-chroot.patch +Patch52: bz1718219-podman-1-avoid-double-inspect-call.patch +Patch53: bz1718219-podman-2-improve-monitor-action.patch +Patch54: bz1718219-podman-3-remove-docker-remnant.patch +Patch55: bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch +Patch56: bz1730455-LVM-activate-fix-monitor-hang.patch +Patch57: bz1732867-CTDB-1-explicitly-use-bash-shell.patch +Patch58: bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch +Patch59: bz1732867-CTDB-3-fixes.patch +Patch60: bz1732867-CTDB-4-add-v4.9-support.patch +Patch61: bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch +Patch62: bz1736746-podman-drop-in-support.patch +Patch63: bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch +Patch64: bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch +Patch65: bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch +Patch66: bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch +Patch67: bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch +Patch68: bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch +Patch69: bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch +Patch70: bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch +Patch71: bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch +Patch72: bz1738428-LVM-activate-detect-volume-without-reboot.patch +Patch73: bz1744103-Filesystem-1-monitor-symlink-support.patch +Patch74: bz1744103-Filesystem-2-add-symlink-support.patch +Patch75: bz1744103-Filesystem-3-fix-umount-disk-failure.patch +Patch76: bz1744103-Filesystem-4-fix-readlink-issue.patch +Patch77: bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch +Patch78: bz1744140-Filesystem-2-prevent-killing-bind-mount.patch +Patch79: bz1744140-Filesystem-3-improved-bind-mount-check.patch +Patch80: bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch +Patch81: bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch +Patch82: bz1748768-docker-fix-stop-issues.patch +Patch83: bz1750261-Route-1-dont-fence-when-parameters-not-set.patch +Patch84: bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch +Patch85: bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch +Patch86: bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch +Patch87: bz1751962-nfsserver-1-systemd-perf-improvements.patch +Patch88: bz1751962-nfsserver-2-systemd-use-no-legend.patch +Patch89: bz1755760-NovaEvacuate-evacuate_delay.patch +Patch90: bz1750261-Route-2-validate-start-validate-all.patch +Patch91: bz1741843-LVM-activate-partial-activation.patch +Patch92: bz1764888-exportfs-allow-same-fsid.patch +Patch93: bz1765128-mysql-galera-fix-incorrect-rc.patch +Patch94: bz1741042-IPaddr2-add-noprefixroute-parameter.patch +Patch95: bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch +Patch96: bz1788889-podman-improve-image-exist-check.patch +Patch97: bz1744224-IPsrcaddr-3-fix-probe-issues.patch +Patch98: bz1767916-IPaddr2-clusterip-not-supported.patch +Patch99: bz1777381-Filesystem-1-refresh-UUID.patch +Patch100: bz1777381-Filesystem-2-udev-settle.patch +Patch101: bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch +Patch102: bz1792196-rabbitmq-cluster-delete-nodename-when-stop-fails.patch +Patch103: bz1808468-1-lvmlockd-fix-conditionals.patch +Patch104: bz1808468-2-remove-locking_type.patch +Patch105: bz1759115-aws-vpc-route53-1-update.patch +Patch106: bz1804658-azure-lb-1-remove-status-metadata.patch +Patch107: bz1804658-azure-lb-2-add-socat-support.patch +Patch108: bz1810466-aws-vpc-move-ip-1-add-routing_table_role.patch +Patch109: bz1810466-aws-vpc-move-ip-2-update-metadata.patch +Patch110: bz1792237-redis-1-fix-validate-all.patch +Patch111: bz1792237-redis-2-run-validate-during-start.patch +Patch112: bz1817432-use-safe-temp-file-location.patch +Patch113: bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch +Patch114: bz1817598-ocf_is_clone-2-update-comment.patch +Patch115: bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch +Patch116: bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch +Patch117: bz1633251-gcp-pd-move-1.patch +Patch118: bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch +Patch119: bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch +Patch120: bz1819965-1-ocf.py-update.patch +Patch121: bz1819965-2-azure-events.patch +Patch122: bz1759115-aws-vpc-route53-3-awscli-property.patch +Patch123: bz1744190-pgsql-1-set-primary-standby-initial-score.patch +Patch124: bz1744190-pgsql-2-improve-start-checks.patch +Patch125: bz1820523-exportfs-1-add-symlink-support.patch +Patch126: bz1832321-rabbitmq-cluster-increase-wait-timeout.patch +Patch127: bz1818997-nfsserver-1-fix-nfsv4-only-support.patch +Patch128: bz1830716-NovaEvacuate-suppress-expected-error.patch +Patch129: bz1836945-db2-hadr-promote-standby-node.patch +Patch130: bz1633251-gcp-pd-move-4-fixes-and-improvements.patch +Patch131: bz1633251-gcp-pd-move-5-bundle.patch +Patch132: bz1839721-podman-force-rm-container-if-rm-fails.patch +Patch133: bz1820523-exportfs-2-fix-monitor-action.patch +Patch134: bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch +Patch135: bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch +Patch136: bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch +Patch137: bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch +Patch138: bz1845583-exportfs-2-fix-typo.patch +Patch139: bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch +Patch140: bz1836186-pgsql-support-Pacemaker-v2.03-output.patch +Patch141: bz1819965-3-azure-events-decode-when-type-not-str.patch +Patch142: bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch +Patch143: bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch +Patch144: bz1845574-azure-events-2-import-urlerror-encode-postdata.patch +Patch145: bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch +Patch146: bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch +Patch147: bz1850778-azure-lb-fix-redirect-issue.patch +Patch148: bz1905587-aws-add-imdsv2-support.patch + +# bundle patches +Patch1000: 7-gcp-bundled.patch +Patch1001: 8-google-cloud-sdk-fixes.patch +Patch1002: 9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch +Patch1003: 10-gcloud-support-info.patch +Patch1004: bz1691456-gcloud-dont-detect-python2.patch +Patch1005: aliyun-vpc-move-ip-4-bundled.patch +Patch1006: python3-syntax-fixes.patch +Patch1007: aliyuncli-python3-fixes.patch + +Obsoletes: heartbeat-resources <= %{version} +Provides: heartbeat-resources = %{version} + +# Build dependencies +BuildRequires: automake autoconf gcc +BuildRequires: perl-interpreter python3-devel +BuildRequires: libxslt glib2-devel +BuildRequires: systemd +BuildRequires: which + +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +#BuildRequires: cluster-glue-libs-devel +BuildRequires: docbook-style-xsl docbook-dtds +%if 0%{?rhel} == 0 +BuildRequires: libnet-devel +%endif +%endif + +## Runtime deps +# system tools shared by several agents +Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk +Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat +Requires: /usr/sbin/fuser /bin/mount + +# Filesystem / fs.sh / netfs.sh +Requires: /sbin/fsck +Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4 +Requires: /usr/sbin/fsck.xfs +Requires: /sbin/mount.nfs /sbin/mount.nfs4 /usr/sbin/mount.cifs + +# IPaddr2 +Requires: /sbin/ip + +# LVM / lvm.sh +Requires: /usr/sbin/lvm + +# nfsserver / netfs.sh +Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd + +# ocf.py +Requires: python3 + +# rgmanager +%if %{with rgmanager} +# ip.sh +Requires: /usr/sbin/ethtool +Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 + +# nfsexport.sh +Requires: /sbin/findfs +Requires: /sbin/quotaon /sbin/quotacheck +%endif + +%description +A set of scripts to interface with several services to operate in a +High Availability environment for both Pacemaker and rgmanager +service managers. + +%ifarch x86_64 +%package aliyun +License: GPLv2+ and LGPLv2+ and ASL 2.0 and BSD and MIT +Summary: Alibaba Cloud (Aliyun) resource agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Requires: %{name} = %{version}-%{release} +Requires: python3-jmespath >= 0.9.0 +Requires: python3-urllib3 +# python-colorama bundle +Provides: bundled(python-%{colorama}) = %{colorama_version} +# python-pycryptodome bundle +Provides: bundled(python-%{pycryptodome}) = %{pycryptodome_version} +# python-aliyun-sdk-core bundle +Provides: bundled(python-aliyun-sdk-core) = %{aliyunsdkcore_version} +# python-aliyun-sdk-ecs bundle +Provides: bundled(python-aliyun-sdk-ecs) = %{aliyunsdkecs_version} +# python-aliyun-sdk-vpc bundle +Provides: bundled(python-aliyun-sdk-vpc) = %{aliyunsdkvpc_version} +# aliyuncli bundle +Provides: bundled(aliyuncli) = %{aliyuncli_version} + +%description aliyun +Alibaba Cloud (Aliyun) resource agents allows Alibaba Cloud +(Aliyun) instances to be managed in a cluster environment. +%endif + +%ifarch x86_64 +%package gcp +License: GPLv2+ and LGPLv2+ and BSD and ASL 2.0 and MIT and Python +Summary: Google Cloud Platform resource agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Requires: %{name} = %{version}-%{release} +Requires: python3-google-api-client +# google-cloud-sdk bundle +Requires: python3-cryptography >= 1.7.2 +Requires: python3-dateutil >= 2.6.0 +Provides: bundled(%{googlecloudsdk}) = %{googlecloudsdk_version} +Provides: bundled(python-antlr3) = 3.1.1 +Provides: bundled(python-appdirs) = 1.4.0 +Provides: bundled(python-argparse) = 1.2.1 +Provides: bundled(python-chardet) = 2.3.0 +Provides: bundled(python-dulwich) = 0.10.2 +Provides: bundled(python-ipaddress) = 1.0.16 +Provides: bundled(python-ipaddr) = 2.1.11 +Provides: bundled(python-mako) = 1.0.7 +Provides: bundled(python-oauth2client) = 3.0.0 +Provides: bundled(python-prompt_toolkit) = 1.0.13 +Provides: bundled(python-pyasn1) = 0.4.2 +Provides: bundled(python-pyasn1_modules) = 0.2.1 +Provides: bundled(python-pygments) = 2.2.0 +Provides: bundled(python-pyparsing) = 2.1.10 +Provides: bundled(python-requests) = 2.10.0 +Provides: bundled(python-six) = 1.11.0 +Provides: bundled(python-uritemplate) = 3.0.0 +Provides: bundled(python-urllib3) = 1.15.1 +Provides: bundled(python-websocket) = 0.47.0 +Provides: bundled(python-yaml) = 3.12 +# python-pyroute2 bundle +Provides: bundled(%{pyroute2}) = %{pyroute2_version} + +%description gcp +The Google Cloud Platform resource agents allows Google Cloud +Platform instances to be managed in a cluster environment. +%endif + +%prep +%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0 +%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} +exit 1 +%endif +%setup -q -n %{upstream_prefix}-%{upstream_version} +%patch0 -p1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 +%patch6 -p1 +%patch7 -p1 +%patch8 -p1 +%patch9 -p1 +%patch10 -p1 +%patch11 -p1 +%patch12 -p1 +%patch13 -p1 +%patch14 -p1 +%patch15 -p1 +%patch16 -p1 +%patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 +%patch29 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch34 -p1 +%patch35 -p1 +%patch36 -p1 +%patch37 -p1 +%patch38 -p1 +%patch39 -p1 +%patch40 -p1 -F2 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch45 -p1 +%patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 +%patch52 -p1 +%patch53 -p1 +%patch54 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 -F1 +%patch61 -p1 +%patch62 -p1 -F2 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 +%patch66 -p1 +%patch67 -p1 +%patch68 -p1 +%patch69 -p1 +%patch70 -p1 +%patch71 -p1 +%patch72 -p1 +%patch73 -p1 +%patch74 -p1 +%patch75 -p1 +%patch76 -p1 +%patch77 -p1 +%patch78 -p1 +%patch79 -p1 +%patch80 -p1 +%patch81 -p1 +%patch82 -p1 +%patch83 -p1 +%patch84 -p1 +%patch85 -p1 +%patch86 -p1 +%patch87 -p1 +%patch88 -p1 +%patch89 -p1 +%patch90 -p1 +%patch91 -p1 +%patch92 -p1 +%patch93 -p1 +%patch94 -p1 -F2 +%patch95 -p1 +%patch96 -p1 +%patch97 -p1 +%patch98 -p1 +%patch99 -p1 +%patch100 -p1 +%patch101 -p1 +%patch102 -p1 +%patch103 -p1 +%patch104 -p1 +%patch105 -p1 +%patch106 -p1 +%patch107 -p1 +%patch108 -p1 +%patch109 -p1 +%patch110 -p1 +%patch111 -p1 +%patch112 -p1 +%patch113 -p1 +%patch114 -p1 +%patch115 -p1 +%patch116 -p1 +%patch117 -p1 +%patch118 -p1 +%patch119 -p1 +%patch120 -p1 +%patch121 -p1 +%patch122 -p1 +%patch123 -p1 +%patch124 -p1 +%patch125 -p1 +%patch126 -p1 +%patch127 -p1 +%patch128 -p1 -F2 +%patch129 -p1 +%patch130 -p1 +%patch131 -p1 +%patch132 -p1 +%patch133 -p1 +%patch134 -p1 +%patch135 -p1 +%patch136 -p1 +%patch137 -p1 +%patch138 -p1 +%patch139 -p1 +%patch140 -p1 +%patch141 -p1 +%patch142 -p1 +%patch143 -p1 +%patch144 -p1 +%patch145 -p1 +%patch146 -p1 +%patch147 -p1 +%patch148 -p1 + +chmod 755 heartbeat/nova-compute-wait +chmod 755 heartbeat/NovaEvacuate + +# bundles +mkdir -p %{bundled_lib_dir}/gcp +mkdir -p %{bundled_lib_dir}/aliyun + +# google-cloud-sdk bundle +%ifarch x86_64 +tar -xzf %SOURCE1 -C %{bundled_lib_dir}/gcp +## upgrade httplib2 to fix CVE-2020-11078 +pushd %{googlecloudsdk_dir} +rm -rf lib/third_party/httplib2 +popd + +# python-httplib2 bundle +tar -xzf %SOURCE2 -C %{bundled_lib_dir} +mv %{bundled_lib_dir}/%{httplib2}-%{httplib2_version} %{httplib2_dir} + +# gcp*: append bundled-directory to search path, gcloud-ra +%patch1000 -p1 +# google-cloud-sdk fixes +%patch1001 -p1 +# replace python-rsa with python-cryptography +%patch1002 -p1 +# gcloud support info +%patch1003 -p1 +# gcloud remove python 2 detection +%patch1004 -p1 +# rename gcloud +mv %{googlecloudsdk_dir}/bin/gcloud %{googlecloudsdk_dir}/bin/gcloud-ra +# keep googleapiclient +mv %{googlecloudsdk_dir}/platform/bq/third_party/googleapiclient %{googlecloudsdk_dir}/lib/third_party +# only keep gcloud +rm -rf %{googlecloudsdk_dir}/bin/{bootstrapping,bq,dev_appserver.py,docker-credential-gcloud,endpointscfg.py,git-credential-gcloud.sh,gsutil,java_dev_appserver.sh} %{googlecloudsdk_dir}/{completion.*,deb,install.*,path.*,platform,properties,RELEASE_NOTES,rpm,VERSION} +# remove Python 2 code +rm -rf %{googlecloudsdk_dir}/lib/third_party/*/python2 +# remove python-rsa +rm -rf %{googlecloudsdk_dir}/lib/third_party/rsa +# remove grpc +rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc +# remove dateutil +rm -rf %{googlecloudsdk_dir}/lib/third_party/dateutil +# docs/licenses +cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README +cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt +cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE +cp %{httplib2_dir}/LICENSE %{googlecloudsdk}_httplib2_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyu2f/LICENSE %{googlecloudsdk}_pyu2f_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/LICENSE %{googlecloudsdk}_ml_sdk_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/pkg/LICENSE %{googlecloudsdk}_pkg_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ipaddr/LICENSE %{googlecloudsdk}_ipaddr_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/urllib3/LICENSE %{googlecloudsdk}_urllib3_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ipaddress/LICENSE %{googlecloudsdk}_ipaddress_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/requests/LICENSE %{googlecloudsdk}_requests_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/docker/LICENSE %{googlecloudsdk}_docker_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/monotonic/LICENSE %{googlecloudsdk}_monotonic_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/websocket/LICENSE %{googlecloudsdk}_websocket_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/fasteners/LICENSE %{googlecloudsdk}_fasteners_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/wcwidth/LICENSE %{googlecloudsdk}_wcwidth_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pygments/LICENSE %{googlecloudsdk}_pygments_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/oauth2client/LICENSE %{googlecloudsdk}_oauth2client_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/uritemplate/LICENSE %{googlecloudsdk}_uritemplate_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/dulwich/LICENSE %{googlecloudsdk}_dulwich_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/mako/LICENSE %{googlecloudsdk}_mako_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/packaging/LICENSE %{googlecloudsdk}_packaging_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/socks/LICENSE %{googlecloudsdk}_socks_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/antlr3/LICENSE %{googlecloudsdk}_antlr3_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/argparse/LICENSE.txt %{googlecloudsdk}_argparse_LICENSE.txt +cp %{googlecloudsdk_dir}/lib/third_party/chardet/LICENSE %{googlecloudsdk}_chardet_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ruamel/LICENSE %{googlecloudsdk}_ruamel_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/appdirs/LICENSE %{googlecloudsdk}_appdirs_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/argcomplete/LICENSE %{googlecloudsdk}_argcomplete_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyasn1_modules/LICENSE %{googlecloudsdk}_pyasn1_modules_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/setuptools/LICENSE %{googlecloudsdk}_setuptools_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/google/LICENSE %{googlecloudsdk}_google_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/google/protobuf/LICENSE %{googlecloudsdk}_protobuf_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/six/LICENSE %{googlecloudsdk}_six_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/dns/LICENSE %{googlecloudsdk}_dns_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/enum/LICENSE %{googlecloudsdk}_enum_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/gae_ext_runtime/LICENSE %{googlecloudsdk}_gae_ext_runtime_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/fancy_urllib/LICENSE %{googlecloudsdk}_fancy_urllib_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyasn1/LICENSE %{googlecloudsdk}_pyasn1_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/apitools/LICENSE %{googlecloudsdk}_apitools_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloudsdk}_containerregistry_LICENSE + +# python-pyroute2 bundle +tar -xzf %SOURCE3 -C %{bundled_lib_dir}/gcp +mv %{bundled_lib_dir}/gcp/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir} +cp %{pyroute2_dir}/README.md %{pyroute2}_README.md +cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md +cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2 +cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2 + +# python-colorama bundle +tar -xzf %SOURCE4 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{colorama}-%{colorama_version} %{colorama_dir} +cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt +cp %{colorama_dir}/README.rst %{colorama}_README.rst + +pushd %{colorama_dir} +# remove bundled egg-info +rm -rf *.egg-info +popd + +# python-pycryptodome bundle +tar -xzf %SOURCE5 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir} +cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst +cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst + +# python-aliyun-sdk-core bundle +tar -xzf %SOURCE6 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir} +cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst + +# python-aliyun-sdk-ecs bundle +tar -xzf %SOURCE7 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir} +cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst + +# python-aliyun-sdk-vpc bundle +tar -xzf %SOURCE8 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir} +cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst + +# aliyuncli bundle +tar -xzf %SOURCE9 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir} +cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst +cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE +# aliyun*: use bundled libraries +%patch1005 -p1 + +# aliyun Python 3 fixes +%patch1006 -p1 +%patch1007 -p1 +%endif + +%build +if [ ! -f configure ]; then + ./autogen.sh +fi + +%if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5 +CFLAGS="$(echo '%{optflags}')" +%global conf_opt_fatal "--enable-fatal-warnings=no" +%else +CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" +%global conf_opt_fatal "--enable-fatal-warnings=yes" +%endif + +%if %{with rgmanager} +%global rasset rgmanager +%endif +%if %{with linuxha} +%global rasset linux-ha +%endif +%if %{with rgmanager} && %{with linuxha} +%global rasset all +%endif + +export CFLAGS + +%configure BASH_SHELL="/bin/bash" \ + PYTHON="%{__python3}" \ + %{conf_opt_fatal} \ +%if %{defined _unitdir} + --with-systemdsystemunitdir=%{_unitdir} \ +%endif +%if %{defined _tmpfilesdir} + --with-systemdtmpfilesdir=%{_tmpfilesdir} \ + --with-rsctmpdir=/run/resource-agents \ +%endif + --with-pkg-name=%{name} \ + --with-ras-set=%{rasset} + +%if %{defined jobs} +JFLAGS="$(echo '-j%{jobs}')" +%else +JFLAGS="$(echo '%{_smp_mflags}')" +%endif + +make $JFLAGS + +# python-httplib2 bundle +%ifarch x86_64 +pushd %{httplib2_dir} +%{__python3} setup.py build +popd + +# python-pyroute2 bundle +pushd %{pyroute2_dir} +%{__python3} setup.py build +popd + +# python-colorama bundle +pushd %{colorama_dir} +%{__python3} setup.py build +popd + +# python-pycryptodome bundle +pushd %{pycryptodome_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-core bundle +pushd %{aliyunsdkcore_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-ecs bundle +pushd %{aliyunsdkecs_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-vpc bundle +pushd %{aliyunsdkvpc_dir} +%{__python3} setup.py build +popd + +# aliyuncli bundle +pushd %{aliyuncli_dir} +%{__python3} setup.py build +popd +%endif + +%install +rm -rf %{buildroot} +make install DESTDIR=%{buildroot} + +# byte compile ocf.py +%py_byte_compile %{__python3} %{buildroot}%{_usr}/lib/ocf/lib/heartbeat + +# google-cloud-sdk bundle +%ifarch x86_64 +pushd %{googlecloudsdk_dir} +mkdir -p %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} +cp -a bin data lib %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} +mkdir %{buildroot}/%{_bindir} +ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir} +popd + +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{googlecloudsdk_dir}/lib/third_party +popd + +# python-pyroute2 bundle +pushd %{pyroute2_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/gcp +popd + +# python-colorama bundle +pushd %{colorama_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-pycryptodome bundle +pushd %{pycryptodome_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-core bundle +pushd %{aliyunsdkcore_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-ecs bundle +pushd %{aliyunsdkecs_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-vpc bundle +pushd %{aliyunsdkvpc_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# aliyuncli bundle +pushd %{aliyuncli_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +sed -i -e "/^import sys/asys.path.insert(0, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun')\nsys.path.insert(1, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyuncli')" %{buildroot}/%{_bindir}/aliyuncli +mv %{buildroot}/%{_bindir}/aliyuncli %{buildroot}/%{_bindir}/aliyuncli-ra +# aliyun_completer / aliyun_zsh_complete.sh +rm %{buildroot}/%{_bindir}/aliyun_* +popd +%endif + +## tree fixup +# remove docs (there is only one and they should come from doc sections in files) +rm -rf %{buildroot}/usr/share/doc/resource-agents + +## +# Create symbolic link between IPAddr and IPAddr2 +## +rm -f %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr +ln -s /usr/lib/ocf/resource.d/heartbeat/IPaddr2 %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root) +%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog +%if %{with linuxha} +%doc heartbeat/README.galera +%doc doc/README.webapps +%doc %{_datadir}/%{name}/ra-api-1.dtd +%doc %{_datadir}/%{name}/metadata.rng +%endif + +%if %{with rgmanager} +%{_datadir}/cluster +%{_sbindir}/rhev-check.sh +%endif + +%if %{with linuxha} +%dir %{_usr}/lib/ocf +%dir %{_usr}/lib/ocf/resource.d +%dir %{_usr}/lib/ocf/lib + +%{_usr}/lib/ocf/lib/heartbeat + +%{_usr}/lib/ocf/resource.d/heartbeat +%{_usr}/lib/ocf/resource.d/openstack +%if %{with rgmanager} +%{_usr}/lib/ocf/resource.d/redhat +%endif + +%if %{defined _unitdir} +%{_unitdir}/resource-agents-deps.target +%endif +%if %{defined _tmpfilesdir} +%{_tmpfilesdir}/%{name}.conf +%endif + +%dir %{_datadir}/%{name} +%dir %{_datadir}/%{name}/ocft +%{_datadir}/%{name}/ocft/configs +%{_datadir}/%{name}/ocft/caselib +%{_datadir}/%{name}/ocft/README +%{_datadir}/%{name}/ocft/README.zh_CN +%{_datadir}/%{name}/ocft/helpers.sh +%exclude %{_datadir}/%{name}/ocft/runocft +%exclude %{_datadir}/%{name}/ocft/runocft.prereq + +%{_sbindir}/ocft + +%{_includedir}/heartbeat + +%if %{defined _tmpfilesdir} +%dir %attr (1755, root, root) /run/resource-agents +%else +%dir %attr (1755, root, root) %{_var}/run/resource-agents +%endif + +%{_mandir}/man7/*.7* + +### +# Supported, but in another sub package +### +%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%exclude %{_mandir}/man7/*aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* +%exclude %{_mandir}/man7/*gcp* +%exclude /usr/lib/%{name}/%{bundled_lib_dir} + +### +# Moved to separate packages +### +%exclude /usr/lib/ocf/resource.d/heartbeat/SAP* +%exclude /usr/lib/ocf/lib/heartbeat/sap* +%exclude %{_mandir}/man7/*SAP* + +### +# Unsupported +### +%exclude /usr/lib/ocf/resource.d/heartbeat/clvm +%exclude /usr/lib/ocf/resource.d/heartbeat/LVM +%exclude /usr/lib/ocf/resource.d/heartbeat/AoEtarget +%exclude /usr/lib/ocf/resource.d/heartbeat/AudibleAlarm +%exclude /usr/lib/ocf/resource.d/heartbeat/ClusterMon +%exclude /usr/lib/ocf/resource.d/heartbeat/EvmsSCC +%exclude /usr/lib/ocf/resource.d/heartbeat/Evmsd +%exclude /usr/lib/ocf/resource.d/heartbeat/ICP +%exclude /usr/lib/ocf/resource.d/heartbeat/LinuxSCSI +%exclude /usr/lib/ocf/resource.d/heartbeat/ManageRAID +%exclude /usr/lib/ocf/resource.d/heartbeat/ManageVE +%exclude /usr/lib/ocf/resource.d/heartbeat/Pure-FTPd +%exclude /usr/lib/ocf/resource.d/heartbeat/Raid1 +%exclude /usr/lib/ocf/resource.d/heartbeat/ServeRAID +%exclude /usr/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon +%exclude /usr/lib/ocf/resource.d/heartbeat/Stateful +%exclude /usr/lib/ocf/resource.d/heartbeat/SysInfo +%exclude /usr/lib/ocf/resource.d/heartbeat/VIPArip +%exclude /usr/lib/ocf/resource.d/heartbeat/WAS +%exclude /usr/lib/ocf/resource.d/heartbeat/WAS6 +%exclude /usr/lib/ocf/resource.d/heartbeat/WinPopup +%exclude /usr/lib/ocf/resource.d/heartbeat/Xen +%exclude /usr/lib/ocf/resource.d/heartbeat/anything +%exclude /usr/lib/ocf/resource.d/heartbeat/asterisk +%exclude /usr/lib/ocf/resource.d/heartbeat/dnsupdate +%exclude /usr/lib/ocf/resource.d/heartbeat/eDir88 +%exclude /usr/lib/ocf/resource.d/heartbeat/fio +%exclude /usr/lib/ocf/resource.d/heartbeat/ids +%exclude /usr/lib/ocf/resource.d/heartbeat/iface-bridge +%exclude /usr/lib/ocf/resource.d/heartbeat/ipsec +%exclude /usr/lib/ocf/resource.d/heartbeat/jira +%exclude /usr/lib/ocf/resource.d/heartbeat/kamailio +%exclude /usr/lib/ocf/resource.d/heartbeat/lxd-info +%exclude /usr/lib/ocf/resource.d/heartbeat/machine-info +%exclude /usr/lib/ocf/resource.d/heartbeat/mariadb +%exclude /usr/lib/ocf/resource.d/heartbeat/minio +%exclude /usr/lib/ocf/resource.d/heartbeat/mpathpersist +%exclude /usr/lib/ocf/resource.d/heartbeat/iscsi +%exclude /usr/lib/ocf/resource.d/heartbeat/jboss +%exclude /usr/lib/ocf/resource.d/heartbeat/ldirectord +%exclude /usr/lib/ocf/resource.d/heartbeat/lxc +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-cinder-volume +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-floating-ip +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-info +%exclude /usr/lib/ocf/resource.d/heartbeat/ovsmonitor +%exclude /usr/lib/ocf/resource.d/heartbeat/pgagent +%exclude /usr/lib/ocf/resource.d/heartbeat/pingd +%exclude /usr/lib/ocf/resource.d/heartbeat/pound +%exclude /usr/lib/ocf/resource.d/heartbeat/proftpd +%exclude /usr/lib/ocf/resource.d/heartbeat/rkt +%exclude /usr/lib/ocf/resource.d/heartbeat/scsi2reservation +%exclude /usr/lib/ocf/resource.d/heartbeat/sfex +%exclude /usr/lib/ocf/resource.d/heartbeat/sg_persist +%exclude /usr/lib/ocf/resource.d/heartbeat/syslog-ng +%exclude /usr/lib/ocf/resource.d/heartbeat/varnish +%exclude /usr/lib/ocf/resource.d/heartbeat/vmware +%exclude /usr/lib/ocf/resource.d/heartbeat/zabbixserver +%exclude /usr/lib/ocf/resource.d/heartbeat/mysql-proxy +%exclude /usr/lib/ocf/resource.d/heartbeat/rsyslog +%exclude /usr/lib/ocf/resource.d/heartbeat/vsftpd +%exclude /usr/lib/ocf/resource.d/heartbeat/ZFS +%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-cinder-volume.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-floating-ip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz + +### +# Other excluded files. +### +# This tool has to be updated for the new pacemaker lrmd. +%exclude %{_sbindir}/ocf-tester +%exclude %{_mandir}/man8/ocf-tester.8* +# ldirectord is not supported +%exclude /etc/ha.d/resource.d/ldirectord +%exclude /etc/init.d/ldirectord +%exclude %{_unitdir}/ldirectord.service +%exclude /etc/logrotate.d/ldirectord +%exclude /usr/sbin/ldirectord +%exclude %{_mandir}/man8/ldirectord.8.gz + +# For compatability with pre-existing agents +%dir %{_sysconfdir}/ha.d +%{_sysconfdir}/ha.d/shellfuncs + +%{_libexecdir}/heartbeat +%endif + +%if %{with rgmanager} +%post -n resource-agents +ccs_update_schema > /dev/null 2>&1 ||: +%endif + +%ifarch x86_64 +%files aliyun +%doc aliyun*_README* %{colorama}_README.rst %{pycryptodome}_README.rst +%license %{aliyuncli}_LICENSE %{colorama}_LICENSE.txt %{pycryptodome}_LICENSE.rst +%defattr(-,root,root) +/usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%{_mandir}/man7/*aliyun-vpc-move-ip* +# bundle +%{_bindir}/aliyuncli-ra +%dir /usr/lib/%{name} +/usr/lib/%{name}/%{bundled_lib_dir}/aliyun +%endif + +%ifarch x86_64 +%files gcp +%doc %{googlecloudsdk}_*README* +%license %{googlecloudsdk}_*LICENSE* +%doc %{pyroute2}_README* +%license %{pyroute2}_LICENSE* +%defattr(-,root,root) +/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-vip* +%{_mandir}/man7/*gcp-vpc-move-vip* +/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-route* +%{_mandir}/man7/*gcp-vpc-move-route* +/usr/lib/ocf/resource.d/heartbeat/gcp-pd-move* +%{_mandir}/man7/*gcp-pd-move* +# bundle +%{_bindir}/gcloud-ra +%dir /usr/lib/%{name} +/usr/lib/%{name}/%{bundled_lib_dir}/gcp +%endif + +%changelog +* Wed Dec 9 2020 Oyvind Albrigtsen - 4.1.1-68.1 +- AWS agents: add support for IMDSv2 + + Resolves: rhbz#1905587 + +* Thu Aug 20 2020 Oyvind Albrigtsen - 4.1.1-68 +- azure-lb: fix redirect issue + + Resolves: rhbz#1850778 + +* Wed Aug 19 2020 Oyvind Albrigtsen - 4.1.1-67 +- gcp-vpc-move-vip: add support for multiple alias IPs + + Resolves: rhbz#1846733 + +* Thu Jul 30 2020 Oyvind Albrigtsen - 4.1.1-65 +- azure-events: handle exceptions in urlopen + + Resolves: rhbz#1845574 + +* Mon Jul 27 2020 Oyvind Albrigtsen - 4.1.1-64 +- nfsserver: fix NFSv4-only support +- azure-events: new resource agent for Azure + + Resolves: rhbz#1818997 + Resolves: rhbz#1819965 + +* Thu Jun 25 2020 Oyvind Albrigtsen - 4.1.1-60 +- Upgrade bundled python-httplib2 to fix CVE-2020-11078 + + Resolves: rhbz#1850990 + +* Wed Jun 17 2020 Oyvind Albrigtsen - 4.1.1-59 +- pgsql: support Pacemaker v2.03+ output + + Resolves: rhbz#1836186 + +* Thu Jun 11 2020 Oyvind Albrigtsen - 4.1.1-56 +- Filesystem: set "fast_stop" default to "no" for GFS2 filesystems + + Resolves: rhbz#1814896 + +* Wed Jun 10 2020 Oyvind Albrigtsen - 4.1.1-55 +- nfsserver: dont log error message when /etc/sysconfig/nfs does not exist +- exportfs: describe clientspec format in metadata + + Resolves: rhbz#1845581 + Resolves: rhbz#1845583 + +* Tue Jun 9 2020 Oyvind Albrigtsen - 4.1.1-54 +- exportfs: add symlink support +- aliyun-vpc-move-ip: log output when failing + + Resolves: rhbz#1820523 + Resolves: rhbz#1843999 + +* Tue Jun 2 2020 Oyvind Albrigtsen - 4.1.1-53 +- podman: force remove container if remove fails + + Resolves: rhbz#1839721 + +* Thu May 28 2020 Oyvind Albrigtsen - 4.1.1-52 +- gcp-pd-move: new resource agent for Google Cloud + + Resolves: rhbz#1633251 + +* Wed May 27 2020 Oyvind Albrigtsen - 4.1.1-51 +- NovaEvacuate: suppress expected initial error message +- db2 (HADR): promote standby node when master node disappears + + Resolves: rhbz#1830716 + Resolves: rhbz#1836945 + +* Thu May 7 2020 Oyvind Albrigtsen - 4.1.1-50 +- rabbitmq-cluster: increase rabbitmqctl wait timeout during start + + Resolves: rhbz#1832321 + +* Tue Apr 28 2020 Oyvind Albrigtsen - 4.1.1-49 +- aws-vpc-route53: new resource agent for AWS +- pgsql: improve checks to prevent incorrect status, and set initial + score for primary and hot standby + + Resolves: rhbz#1759115 + Resolves: rhbz#1744190 + +* Mon Apr 6 2020 Oyvind Albrigtsen - 4.1.1-47 +- aws-vpc-move-ip: delete remaining route entries + + Resolves: rhbz#1819021 + +* Fri Mar 27 2020 Oyvind Albrigtsen - 4.1.1-46 +- use safe temp file location +- ocf-shellfuncs: ocf_is_clone(): fix to return true when clone-max + is set to 0 + + Resolves: rhbz#1817432 + Resolves: rhbz#1817598 + +* Wed Mar 18 2020 Oyvind Albrigtsen - 4.1.1-45 +- azure-lb: support using socat instead of nc +- aws-vpc-move-ip: add "routing_table_role" parameter +- redis: fix validate-all action and run it during start + + Resolves: rhbz#1804658 + Resolves: rhbz#1810466 + Resolves: rhbz#1792237 + +* Fri Mar 6 2020 Oyvind Albrigtsen - 4.1.1-44 +- lvmlockd: automatically remove locking_type from lvm.conf for LVM + v2.03+ + + Resolves: rhbz#1808468 + +* Tue Jan 28 2020 Oyvind Albrigtsen - 4.1.1-43 +- rabbitmq-cluster: delete nodename when stop fails + + Resolves: rhbz#1792196 + +* Thu Jan 23 2020 Oyvind Albrigtsen - 4.1.1-42 +- IPsrcaddr: add destination and table parameters + + Resolves: rhbz#1744224 + +* Mon Jan 13 2020 Oyvind Albrigtsen - 4.1.1-40 +- podman: improve image exist check +- IPaddr2: add CLUSTERIP not supported info to metadata/manpage +- Filesystem: refresh UUID if block device doesnt exist + + Resolves: rhbz#1788889 + Resolves: rhbz#1767916 + Resolves: rhbz#1777381 + +* Wed Nov 27 2019 Oyvind Albrigtsen - 4.1.1-38 +- IPaddr2: add noprefixroute parameter + + Resolves: rhbz#1741042 + +* Wed Nov 13 2019 Oyvind Albrigtsen - 4.1.1-36 +- exportfs: allow multiple exports with same fsid +- mysql/galera: fix incorrect rc + + Resolves: rhbz#1764888 + Resolves: rhbz#1765128 + +* Mon Oct 14 2019 Oyvind Albrigtsen - 4.1.1-35 +- Route: dont fence when parameters not set +- LVM-activate: add partial-activation support + + Resolves: rhbz#1750261 + Resolves: rhbz#1741843 + +* Wed Oct 2 2019 Oyvind Albrigtsen - 4.1.1-34 +- LVM/clvm: remove manpages for excluded agents +- LVM-activate: return NOT_RUNNING when node rejoins cluster +- LVM-activate: detect systemid volume without reboot +- Filesystem: add symlink support +- Filesystem: avoid corrupt mount-list and dont kill incorrect processes + for bind-mounts +- IPsrcaddr: make proto optional to fix regression when used without + NetworkManager +- docker: fix stop issues +- rabbitmq-cluster: also restore users in single node mode +- IPaddr2: sanitize compressed IPv6 IPs +- nfsserver: systemd performance improvements +- NovaEvacuate: add "evacuate_delay" parameter + + Resolves: rhbz#1694392 + Resolves: rhbz#1695039 + Resolves: rhbz#1738428 + Resolves: rhbz#1744103 + Resolves: rhbz#1744140 + Resolves: rhbz#1757837 + Resolves: rhbz#1748768 + Resolves: rhbz#1750352 + Resolves: rhbz#1751700 + Resolves: rhbz#1751962 + Resolves: rhbz#1755760 + +* Tue Aug 27 2019 Oyvind Albrigtsen - 4.1.1-33 +- rabbitmq-cluster: fail monitor when node is in minority partition, + fix stop regression, retry start when cluster join fails, ensure + node attributes are removed + + Resolves: rhbz#1745713 + +* Mon Aug 12 2019 Oyvind Albrigtsen - 4.1.1-32 +- mysql/galera: use runuser/su to avoid using DAC_OVERRIDE + + Resolves: rhbz#1692960 + +* Wed Aug 7 2019 Oyvind Albrigtsen - 4.1.1-31 +- podman: add drop-in dependency support + + Resolves: rhbz#1736746 + +* Wed Jul 31 2019 Oyvind Albrigtsen - 4.1.1-30 +- iSCSITarget/iSCSILogicalUnit: only create iqn/acls when it doesnt + exist + + Resolves: rhbz#1692413 + +* Tue Jul 30 2019 Oyvind Albrigtsen - 4.1.1-29 +- CTDB: add support for v4.9+ + + Resolves: rhbz#1732867 + +* Tue Jul 23 2019 Oyvind Albrigtsen - 4.1.1-28 +- podman: fixes to avoid bundle resources restarting when probing + takes too long +- LVM-activate: fix monitor to avoid hang caused by validate-all call + + Resolves: rhbz#1718219 + Resolves: rhbz#1730455 + +* Wed Jun 19 2019 Oyvind Albrigtsen - 4.1.1-27 +- ocf_log: do not log debug messages when HA_debug unset +- Filesystem: remove notify-action from metadata +- dhcpd keep SELinux context in chroot + + Resolves: rhbz#1707969 + Resolves: rhbz#1717759 + Resolves: rhbz#1719684 + +* Tue Jun 11 2019 Oyvind Albrigtsen - 4.1.1-26 +- sap/sap-hana: split subpackages into separate packages + + Resolves: rhbz#1705767 + +* Wed May 29 2019 Oyvind Albrigtsen - 4.1.1-24 +- Squid: fix PID file issue + + Resolves: rhbz#1689184 + +* Tue May 28 2019 Oyvind Albrigtsen - 4.1.1-23 +- Route: make family parameter optional +- redis: mute password warning + + Resolves: rhbz#1669140 + Resolves: rhbz#1683548 + +* Thu May 23 2019 Oyvind Albrigtsen - 4.1.1-22 +- aws-vpc-move-ip: add multi route-table support and fix issue + w/multiple NICs + + Resolves: rhbz#1697559 + +* Fri Apr 5 2019 Oyvind Albrigtsen - 4.1.1-21 +- gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding issue + + Resolves: rhbz#1695656 + +* Mon Apr 1 2019 Oyvind Albrigtsen - 4.1.1-20 +- aws-vpc-move-ip: use "--query" to avoid a possible race condition +- gcloud-ra: fix Python 3 issue and remove Python 2 detection + + Resolves: rhbz#1693662 + Resolves: rhbz#1691456 + +* Thu Mar 21 2019 Oyvind Albrigtsen - 4.1.1-19 +- Add CI gating tests +- LVM-activate: support LVs from same VG +- tomcat: use systemd when catalina.sh is unavailable +- Fixed python-devel/perl build dependencies + + Resolves: rhbz#1682136 + Resolves: rhbz#1667414 + Resolves: rhbz#1666691 + Resolves: rhbz#1595854 + +* Thu Mar 7 2019 Oyvind Albrigtsen - 4.1.1-18 +- aliyun-vpc-move-ip: exclude from main package +- aliyuncli-ra: upgrade bundled python-aliyun-sdk-core and fix Python 3 issues +- ocf.py: byte compile + + Resolves: rhbz#1677204 + Resolves: rhbz#1677981 + Resolves: rhbz#1678874 + +* Tue Feb 5 2019 Oyvind Albrigtsen - 4.1.1-17 +- LVM-activate: dont require locking_type + + Resolves: rhbz#1658664 + +* Fri Jan 11 2019 Oyvind Albrigtsen - 4.1.1-16 +- vdo-vol: fix monitor-action +- LVM-activate: dont fail initial probe + + Resolves: rhbz#1662466 + Resolves: rhbz#1643307 + +* Tue Oct 23 2018 Oyvind Albrigtsen - 4.1.1-15 +- nfsserver: fix start-issues when nfs_shared_infodir parameter is + changed + + Resolves: rhbz#1642027 + +* Mon Oct 8 2018 Oyvind Albrigtsen - 4.1.1-14 +- redis: use basename in pidof to avoid issues in containers + + Resolves: rhbz#1635785 + +* Wed Sep 26 2018 Oyvind Albrigtsen - 4.1.1-11 +- Remove grpc from bundle + + Resolves: rhbz#1630627 + +* Fri Sep 21 2018 Oyvind Albrigtsen - 4.1.1-10 +- systemd-tmpfiles: change path to /run/resource-agents + + Resolves: rhbz#1631291 + +* Fri Aug 24 2018 Oyvind Albrigtsen - 4.1.1-9 +- podman: new resource agent + + Resolves: rhbz#1607607 + +* Wed Aug 22 2018 Oyvind Albrigtsen - 4.1.1-8 +- LVM: fix missing dash in activate_options +- LVM-activate: warn about incorrect vg_access_mode +- lvmlockd: add cmirrord support + +* Wed Aug 1 2018 Oyvind Albrigtsen - 4.1.1-7 +- findif: only match lines containing netmasks + +* Mon Jul 30 2018 Florian Weimer - 4.1.1-6 +- Rebuild with fixed binutils + +* Fri Jul 27 2018 Oyvind Albrigtsen - 4.1.1-5 +- vdo-vol: new resource agent + + Resolves: rhbz#1552330 + +* Wed Jul 4 2018 Oyvind Albrigtsen - 4.1.1-4 +- VirtualDomain: add stateless support +- Exclude unsupported agents + +* Thu Jun 28 2018 Oyvind Albrigtsen - 4.1.1-3 +- Added SAPHana and OpenStack agents + +* Fri May 25 2018 Oyvind Albrigtsen - 4.1.1-2 +- Remove unsupported clvm and LVM agents + +* Tue Mar 13 2018 Oyvind Albrigtsen - 4.1.1-1 +- Rebase to resource-agents 4.1.1 upstream release. + +* Mon Feb 19 2018 Oyvind Albrigtsen - 4.1.0-2 +- Add gcc to BuildRequires + +* Fri Feb 09 2018 Igor Gnatenko - 4.1.0-1.1 +- Escape macros in %%changelog + +* Wed Jan 10 2018 Oyvind Albrigtsen - 4.1.0-1 +- Rebase to resource-agents 4.1.0 upstream release. + +* Thu Aug 03 2017 Fedora Release Engineering - 4.0.1-1.3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Thu Jul 27 2017 Fedora Release Engineering - 4.0.1-1.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Sat Feb 11 2017 Fedora Release Engineering - 4.0.1-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Feb 2 2017 Oyvind Albrigtsen - 4.0.1-1 +- Rebase to resource-agents 4.0.1 upstream release. + +* Wed Feb 1 2017 Oyvind Albrigtsen - 4.0.0-2 +- galera: remove "long SST monitoring" support due to corner-case issues + +* Tue Jan 31 2017 Oyvind Albrigtsen - 4.0.0-1 +- Rebase to resource-agents 4.0.0 upstream release. + +* Thu Dec 15 2016 Oyvind Albrigtsen - 3.9.7-6 +- Add netstat dependency + +* Tue Feb 9 2016 Oyvind Albrigtsen - 3.9.7-4 +- Rebase to resource-agents 3.9.7 upstream release. + +* Thu Feb 04 2016 Fedora Release Engineering - 3.9.6-2.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Thu Jun 18 2015 Fedora Release Engineering - 3.9.6-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Mon Apr 20 2015 David Vossel - 3.9.6-2 +- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent + +* Fri Feb 13 2015 David Vossel - 3.9.6-1 +- Rebase to resource-agents 3.9.6 upstream release. + +* Sun Aug 17 2014 Fedora Release Engineering - 3.9.5-12.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Sun Jun 08 2014 Fedora Release Engineering - 3.9.5-12.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Wed Apr 30 2014 David Vossel - 3.9.5-12 +- Sync with latest upstream. + +* Thu Jan 2 2014 David Vossel - 3.9.5-11 +- Sync with latest upstream. + +* Sun Oct 20 2013 David Vossel - 3.9.5-10 +- Fix build system for rawhide. + +* Wed Oct 16 2013 David Vossel - 3.9.5-9 +- Remove rgmanager agents from build. + +* Sun Aug 04 2013 Fedora Release Engineering - 3.9.5-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild + +* Wed Jul 17 2013 Petr Pisar - 3.9.5-7 +- Perl 5.18 rebuild + +* Tue Jun 18 2013 David Vossel - 3.9.5-6 +- Restores rsctmp directory to upstream default. + +* Tue Jun 18 2013 David Vossel - 3.9.5-5 +- Merges redhat provider into heartbeat provider. Remove + rgmanager's redhat provider. + + Resolves: rhbz#917681 + Resolves: rhbz#928890 + Resolves: rhbz#952716 + Resolves: rhbz#960555 + +* Tue Mar 12 2013 David Vossel - 3.9.5-3 +- Fixes build system error with conditional logic involving + IPv6addr and updates spec file to build against rhel 7 as + well as fedora 19. + +* Mon Mar 11 2013 David Vossel - 3.9.5-2 +- Resolves rhbz#915050 + +* Mon Mar 11 2013 David Vossel - 3.9.5-1 +- New upstream release. + +* Fri Jan 25 2013 Kevin Fenzi - 3.9.2-5 +- Fix cifs mount requires + +* Mon Nov 12 2012 Chris Feist - 3.9.2-4 +- Removed version number after dist + +* Mon Oct 29 2012 Chris Feist - 3.9.2-3.8 +- Remove cluster-glue-libs-devel +- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to + disappearance of cluster-glue in F18) + +* Sat Jul 21 2012 Fedora Release Engineering - 3.9.2-3.5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu Jul 05 2012 Chris Feist - 3.9.2-3.4 +- Fix location of lvm (change from /sbin to /usr/sbin) + +* Wed Apr 04 2012 Jon Ciesla - 3.9.2-3.3 +- Rebuilt to fix rawhide dependency issues (caused by move of fsck from + /sbin to /usr/sbin). + +* Fri Mar 30 2012 Jon Ciesla - 3.9.2-3.1 +- libnet rebuild. + +* Sat Jan 14 2012 Fedora Release Engineering - 3.9.2-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Fri Jul 8 2011 Fabio M. Di Nitto - 3.9.2-2 +- add post call to resource-agents to integrate with cluster 3.1.4 + +* Thu Jun 30 2011 Fabio M. Di Nitto - 3.9.2-1 +- new upstream release +- fix 2 regressions from 3.9.1 + +* Mon Jun 20 2011 Fabio M. Di Nitto - 3.9.1-1 +- new upstream release +- import spec file from upstream + +* Tue Mar 1 2011 Fabio M. Di Nitto - 3.1.1-1 +- new upstream release 3.1.1 and 1.0.4 + +* Wed Feb 09 2011 Fedora Release Engineering - 3.1.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Thu Dec 2 2010 Fabio M. Di Nitto - 3.1.0-1 +- new upstream release +- spec file update: + Update upstream URL + Update source URL + use standard configure macro + use standard make invokation + +* Thu Oct 7 2010 Fabio M. Di Nitto - 3.0.17-1 +- new upstream release + Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013 + Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733 + Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718 + Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943 + Resolves: rhbz#639018 + +* Thu Oct 7 2010 Andrew Beekhof - 3.0.16-2 +- new upstream release of the Pacemaker agents: 71b1377f907c + +* Thu Sep 2 2010 Fabio M. Di Nitto - 3.0.16-1 +- new upstream release + Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680 + Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844 + Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691 + Resolves: rhbz#622576 + +* Thu Jul 29 2010 Fabio M. Di Nitto - 3.0.14-1 +- new upstream release + Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003 + Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547 + Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368 + Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989 + Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181 + Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110 + Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356 + Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202 + Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566 + Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814 + +* Mon Jun 7 2010 Fabio M. Di Nitto - 3.0.13-1 +- new upstream release + Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626 + Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002 + Resolves: rhbz#599643 + +* Tue May 18 2010 Andrew Beekhof - 3.0.12-2 +- libnet is not available on RHEL +- Do not package ldirectord on RHEL + Resolves: rhbz#577264 + +* Mon May 10 2010 Fabio M. Di Nitto - 3.0.12-1 +- new upstream release + Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753 + Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890 + Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010 + Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823 + +* Mon May 10 2010 Andrew Beekhof - 3.0.12-1 +- New pacemaker agents upstream release: a7c0f35916bf + + High: pgsql: properly implement pghost parameter + + High: RA: mysql: fix syntax error + + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371) + + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378) + + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data + + Medium: IPaddr: return the correct code if interface delete failed + + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis) + + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815) + + Medium: pgsql: implement "config" parameter + + Medium: RA: iSCSITarget: follow changed IET access policy + +* Wed Apr 21 2010 Fabio M. Di Nitto - 3.0.11-1 +- new upstream release + Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017 + Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017 + Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533 +- Switch to file based Requires. + Also address several other problems related to missing runtime + components in different agents. + With the current Requires: set, we guarantee all basic functionalities + out of the box for lvm/fs/clusterfs/netfs/networking. + Resolves: rhbz#570008 + +* Sat Apr 17 2010 Andrew Beekhof - 3.0.10-2 +- New pacemaker agents upstream release + + High: RA: vmware: fix set_environment() invocation (LF 2342) + + High: RA: vmware: update to version 0.2 + + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388) + + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg) + + Medium: IPsrcaddr: modify the interface route (lf#2367) + + Medium: ldirectord: Allow multiple email addresses (LF 2168) + + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328) + + Medium: meta-data: improve timeouts in most resource agents + + Medium: nfsserver: use default values (lf#2321) + + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal + + Medium: ocf-shellfuncs: don't output to stderr if using syslog + + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid + + Medium: RA: iSCSILogicalUnit: fix monitor for STGT + + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284) + + Medium: RA: ManageRAID: require bash + + Medium: RA: ManageRAID: require bash + + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988) + + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION + + Medium: RA: VirtualDomain: improve error messages + + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name + + Medium: Route: add route table parameter (lf#2335) + + Medium: sfex: don't use pid file (lf#2363,bnc#585416) + + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416) + +* Fri Apr 9 2010 Fabio M. Di Nitto - 3.0.10-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027 + Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335 + Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249 + Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626 + Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626 + Resolves: rhbz#579059 + +* Wed Mar 24 2010 Andrew Beekhof - 3.0.9-2 +- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page +- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built + +* Mon Mar 1 2010 Fabio M. Di Nitto - 3.0.9-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902 + Resolves: rhbz#512171, rhbz#519491 + +* Mon Feb 22 2010 Fabio M. Di Nitto - 3.0.8-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901 + Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157 + Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201 + Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363 + Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630 + Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047 + Resolves: rhbz#554968, rhbz#555047 +- spec file update: + * update spec file copyright date + * use bz2 tarball + +* Fri Jan 15 2010 Fabio M. Di Nitto - 3.0.7-2 +- Add python as BuildRequires + +* Mon Jan 11 2010 Fabio M. Di Nitto - 3.0.7-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#526286, rhbz#533461 + +* Mon Jan 11 2010 Andrew Beekhof - 3.0.6-2 +- Update Pacameker agents to upstream version: c76b4a6eb576 + + High: RA: VirtualDomain: fix forceful stop (LF 2283) + + High: apache: monitor operation of depth 10 for web applications (LF 2234) + + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281) + + Medium: RA: Route: improve validate (LF 2232) + + Medium: mark obsolete RAs as deprecated (LF 2244) + + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work + +* Mon Dec 7 2009 Fabio M. Di Nitto - 3.0.6-1 +- New rgmanager resource agents upstream release +- spec file update: + * use global instead of define + * use new Source0 url + * use %%name macro more aggressively + +* Mon Dec 7 2009 Andrew Beekhof - 3.0.5-2 +- Update Pacameker agents to upstream version: bc00c0b065d9 + + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239) + + High: doc: add man pages for all RAs (LF2237) + + High: syslog-ng: new RA + + High: vmware: make meta-data work and several cleanups (LF 2212) + + Medium: .ocf-shellfuncs: add ocf_is_probe function + + Medium: Dev: make RAs executable (LF2239) + + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034) + + Medium: add mercurial repository version information to .ocf-shellfuncs + + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469) + + Medium: iSCSITarget, iSCSILogicalUnit: support LIO + + Medium: nfsserver: use check_binary properly in validate (LF 2211) + + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219) + + Medium: oracle/oralsnr: export variables properly + + Medium: pgsql: remove the previous backup_label if it exists + + Medium: postfix: fix double stop (thanks to Dinh N. Quoc) + + RA: LVM: Make monitor operation quiet in logs (bnc#546353) + + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968) + + ldirectord: OCF agent: overhaul + +* Fri Nov 20 2009 Fabio M. Di Nitto - 3.0.5-1 +- New rgmanager resource agents upstream release +- Allow pacemaker to use rgmanager resource agents + +* Wed Oct 28 2009 Andrew Beekhof - 3.0.4-2 +- Update Pacameker agents to upstream version: e2338892f59f + + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes + + High: Trap sigterm for compatibility with the libnet version of send_arp + + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down + + Medium: IPv6addr: recognize network masks properly + + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define" + +* Wed Oct 21 2009 Fabio M. Di Nitto - 3.0.4-1 +- New rgmanager resource agents upstream release + +* Mon Oct 12 2009 Andrew Beekhof - 3.0.3-3 +- Update Pacameker agents to upstream version: 099c0e5d80db + + Add the ha_parameter function back into .ocf-shellfuncs. + + Bug bnc#534803 - Provide a default for MAILCMD + + Fix use of undefined macro @HA_NOARCHDATAHBDIR@ + + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff) + + Import shellfuncs from heartbeat as badly written RAs use it + + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate + + Medium: RA: Filesystem: implement monitor operation + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum) + + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID + + Medium: RA: iSCSITarget: be more persistent deleting targets on stop + + Medium: RA: portblock: add per-IP filtering capability + + Medium: mysql-proxy: log_level and keepalive parameters + + Medium: oracle: drop spurious output from sqlplus + + RA: Filesystem: allow configuring smbfs mounts as clones + +* Wed Sep 23 2009 Fabio M. Di Nitto - 3.0.3-1 +- New rgmanager resource agents upstream release + +* Thu Aug 20 2009 Fabio M. Di Nitto - 3.0.1-1 +- New rgmanager resource agents upstream release + +* Tue Aug 18 2009 Andrew Beekhof - 3.0.0-16 +- Create an ldirectord package +- Update Pacameker agents to upstream version: 2198dc90bec4 + + Build: Import ldirectord. + + Ensure HA_VARRUNDIR has a value to substitute + + High: Add findif tool (mandatory for IPaddr/IPaddr2) + + High: IPv6addr: new nic and cidr_netmask parameters + + High: postfix: new resource agent + + Include license information + + Low (LF 2159): Squid: make the regexp match more precisely output of netstat + + Low: configure: Fix package name. + + Low: ldirectord: add dependency on $remote_fs. + + Low: ldirectord: add mandatory required header to init script. + + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp + + Medium: VirtualDomain: destroy domain shortly before timeout expiry + + Medium: shellfuncs: Make the mktemp wrappers work. + + Remove references to Echo function + + Remove references to heartbeat shellfuncs. + + Remove useless path lookups + + findif: actually include the right header. Simplify configure. + + ldirectord: Remove superfluous configure artifact. + + ocf-tester: Fix package reference and path to DTD. + +* Tue Aug 11 2009 Ville Skyttä - 3.0.0-15 +- Use bzipped upstream hg tarball. + +* Wed Jul 29 2009 Fabio M. Di Nitto - 3.0.0-14 +- Merge Pacemaker cluster resource agents: + * Add Source1. + * Drop noarch. We have real binaries now. + * Update BuildRequires. + * Update all relevant prep/build/install/files/description sections. + +* Sun Jul 26 2009 Fedora Release Engineering - 3.0.0-13 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Wed Jul 8 2009 Fabio M. Di Nitto - 3.0.0-12 +- spec file updates: + * Update copyright header + * final release.. undefine alphatag + +* Thu Jul 2 2009 Fabio M. Di Nitto - 3.0.0-11.rc4 +- New upstream release. + +* Sat Jun 20 2009 Fabio M. Di Nitto - 3.0.0-10.rc3 +- New upstream release. + +* Wed Jun 10 2009 Fabio M. Di Nitto - 3.0.0-9.rc2 +- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970 + +* Tue Mar 24 2009 Fabio M. Di Nitto - 3.0.0-8.rc1 +- New upstream release. +- Update BuildRoot usage to preferred versions/names + +* Mon Mar 9 2009 Fabio M. Di Nitto - 3.0.0-7.beta1 +- New upstream release. + +* Fri Mar 6 2009 Fabio M. Di Nitto - 3.0.0-6.alpha7 +- New upstream release. + +* Tue Mar 3 2009 Fabio M. Di Nitto - 3.0.0-5.alpha6 +- New upstream release. + +* Tue Feb 24 2009 Fabio M. Di Nitto - 3.0.0-4.alpha5 +- Drop Conflicts with rgmanager. + +* Mon Feb 23 2009 Fabio M. Di Nitto - 3.0.0-3.alpha5 +- New upstream release. + +* Thu Feb 19 2009 Fabio M. Di Nitto - 3.0.0-2.alpha4 +- Add comments on how to build this package. + +* Thu Feb 5 2009 Fabio M. Di Nitto - 3.0.0-1.alpha4 +- New upstream release. +- Fix datadir/cluster directory ownership. + +* Tue Jan 27 2009 Fabio M. Di Nitto - 3.0.0-1.alpha3 + - Initial packaging