From d8a7480e9a401dacbf199b013897c54bf00423ad Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Fri, 14 May 2021 04:17:31 +0000 Subject: [PATCH] import resource-agents-4.1.1-94.el8 --- .gitignore | 10 + .resource-agents.metadata | 10 + ...-configure-add-python-path-detection.patch | 29 + SOURCES/10-gcloud-support-info.patch | 25 + ...-ci-skip-python-agents-in-shellcheck.patch | 24 + SOURCES/3-gcp-vpc-move-vip.patch | 646 ++++ SOURCES/4-gcp-vpc-move-route.patch | 632 ++++ SOURCES/5-python-library.patch | 600 +++ ...6-gcp-move-vip-filter-aggregatedlist.patch | 25 + SOURCES/7-gcp-bundled.patch | 35 + SOURCES/7-gcp-stackdriver-logging-note.patch | 28 + SOURCES/8-google-cloud-sdk-fixes.patch | 12 + ...th2client-python-rsa-to-cryptography.patch | 129 + SOURCES/IPaddr2-monitor_retries.patch | 66 + .../LVM-activate-1-warn-vg_access_mode.patch | 42 + ...ivate-2-parameters-access-mode-fixes.patch | 137 + .../LVM-activate-fix-issue-with-dashes.patch | 54 + SOURCES/LVM-fix-missing-dash.patch | 22 + SOURCES/LVM-volume_group_check_only.patch | 72 + SOURCES/VirtualDomain-stateless-support.patch | 126 + SOURCES/aliyun-vpc-move-ip-1.patch | 275 ++ SOURCES/aliyun-vpc-move-ip-2-fixes.patch | 451 +++ .../aliyun-vpc-move-ip-3-fix-manpage.patch | 22 + SOURCES/aliyun-vpc-move-ip-4-bundled.patch | 15 + ...c-move-ip-5-improve-metadata-manpage.patch | 49 + SOURCES/aliyuncli-python3-fixes.patch | 398 ++ ...ve-ip-1-avoid-false-positive-monitor.patch | 39 + ...ve-ip-2-avoid-false-positive-monitor.patch | 31 + SOURCES/build-add-missing-manpages.patch | 43 + SOURCES/bz1471182-crypt-1-new-ra.patch | 415 ++ SOURCES/bz1471182-crypt-2-fix-bashism.patch | 22 + .../bz1471182-crypt-3-fix-missing-and.patch | 22 + SOURCES/bz1552330-vdo-vol.patch | 285 ++ SOURCES/bz1607607-podman.patch | 538 +++ ...1-systemd-tmpfiles-configurable-path.patch | 48 + SOURCES/bz1633251-gcp-pd-move-1.patch | 425 +++ ...-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch | 18 + ...-add-stackdriver_logging-to-metadata.patch | 48 + ...gcp-pd-move-4-fixes-and-improvements.patch | 176 + SOURCES/bz1633251-gcp-pd-move-5-bundle.patch | 10 + SOURCES/bz1635785-redis-pidof-basename.patch | 61 + ...0587-pgsql-ignore-masters-re-promote.patch | 40 + .../bz1642027-nfsserver-var-lib-nfs-fix.patch | 43 + ...LVM-activate-dont-fail-initial-probe.patch | 24 + ...M-activate-dont-require-locking_type.patch | 27 + ...bz1662466-vdo-vol-fix-monitor-action.patch | 12 + ...systemd-when-catalina.sh-unavailable.patch | 59 + ...VM-activate-support-LVs-from-same-VG.patch | 23 + ...14-2-LVM-activate-only-count-volumes.patch | 29 + ...Route-make-family-parameter-optional.patch | 31 + ...z1683548-redis-mute-password-warning.patch | 62 + .../bz1689184-Squid-1-fix-pidfile-issue.patch | 70 + ...4-Squid-2-dont-run-pgrep-without-PID.patch | 24 + ...bz1691456-gcloud-dont-detect-python2.patch | 29 + ...rget-create-iqn-when-it-doesnt-exist.patch | 31 + ...3-2-iSCSILogicalUnit-create-acls-fix.patch | 24 + ...era-runuser-su-to-avoid-dac_override.patch | 93 + ...ove-ip-avoid-possible-race-condition.patch | 104 + ...te-return-NOT_RUNNING-rejoin-cluster.patch | 82 + ...-move-route-vip-fix-python3-encoding.patch | 46 + ...-move-ip-1-multi-route-table-support.patch | 122 + ...ove-ip-2-fix-route-update-multi-NICs.patch | 221 ++ ...do-not-log-debug-when-HA_debug-unset.patch | 32 + ...969-2-ocf_is_true-add-True-to-regexp.patch | 22 + ...m-remove-notify-action-from-metadata.patch | 21 + ...9-podman-1-avoid-double-inspect-call.patch | 46 + ...8219-podman-2-improve-monitor-action.patch | 63 + ...18219-podman-3-remove-docker-remnant.patch | 34 + ...use-exec-to-avoid-performance-issues.patch | 161 + ...84-dhcpd-keep-SELinux-context-chroot.patch | 28 + ...730455-LVM-activate-fix-monitor-hang.patch | 22 + ...867-CTDB-1-explicitly-use-bash-shell.patch | 39 + ...-2-add-ctdb_max_open_files-parameter.patch | 40 + SOURCES/bz1732867-CTDB-3-fixes.patch | 131 + .../bz1732867-CTDB-4-add-v4.9-support.patch | 452 +++ .../bz1736746-podman-drop-in-support.patch | 193 + ...ctivate-detect-volume-without-reboot.patch | 48 + ...-IPaddr2-add-noprefixroute-parameter.patch | 66 + ...1843-LVM-activate-partial-activation.patch | 69 + ...Filesystem-1-monitor-symlink-support.patch | 39 + ...103-Filesystem-2-add-symlink-support.patch | 43 + ...Filesystem-3-fix-umount-disk-failure.patch | 53 + ...4103-Filesystem-4-fix-readlink-issue.patch | 32 + ...ilesystem-1-avoid-corrupt-mount-list.patch | 46 + ...esystem-2-prevent-killing-bind-mount.patch | 52 + ...lesystem-3-improved-bind-mount-check.patch | 42 + ...-1-set-primary-standby-initial-score.patch | 34 + ...1744190-pgsql-2-improve-start-checks.patch | 34 + ...add-destination-and-table-parameters.patch | 202 + ...caddr-2-local-rule-destination-fixes.patch | 42 + ...1744224-IPsrcaddr-3-fix-probe-issues.patch | 45 + ...224-IPsrcaddr-4-fix-hardcoded-device.patch | 23 + ...itmq-cluster-1-monitor-mnesia-status.patch | 57 + ...er-2-fail-when-in-minority-partition.patch | 96 + ...bbitmq-cluster-3-fix-stop-regression.patch | 63 + ...ter-4-retry-start-cluster-join-fails.patch | 83 + ...ter-5-ensure-node-attribures-removed.patch | 42 + ...uster-6-debug-log-mnesia-query-fails.patch | 32 + ...cluster-7-suppress-additional-output.patch | 87 + .../bz1748768-docker-fix-stop-issues.patch | 88 + ...1-dont-fence-when-parameters-not-set.patch | 35 + ...-Route-2-validate-start-validate-all.patch | 40 + ...uster-restore-users-single-node-mode.patch | 148 + ...z1751700-IPaddr2-1-sanitize-IPv6-IPs.patch | 47 + ...2-return-empty-when-sanitation-fails.patch | 22 + ...fsserver-1-systemd-perf-improvements.patch | 77 + ...62-nfsserver-2-systemd-use-no-legend.patch | 38 + ...z1755760-NovaEvacuate-evacuate_delay.patch | 50 + ...ix-regression-without-NetworkManager.patch | 75 + .../bz1759115-aws-vpc-route53-1-update.patch | 273 ++ ...-add-public-and-secondary-ip-support.patch | 220 ++ ...15-aws-vpc-route53-3-awscli-property.patch | 302 ++ .../bz1763249-manpages-fix-pcs-syntax.patch | 53 + .../bz1764888-exportfs-allow-same-fsid.patch | 22 + ...765128-mysql-galera-fix-incorrect-rc.patch | 46 + ...7916-IPaddr2-clusterip-not-supported.patch | 104 + .../bz1777381-Filesystem-1-refresh-UUID.patch | 33 + .../bz1777381-Filesystem-2-udev-settle.patch | 124 + ...889-podman-improve-image-exist-check.patch | 61 + ...ster-delete-nodename-when-stop-fails.patch | 38 + .../bz1792237-redis-1-fix-validate-all.patch | 32 + ...37-redis-2-run-validate-during-start.patch | 24 + ...35-pgsql-1-add-postgresql-12-support.patch | 105 + ...sql-2-fix-uppercase-hostname-support.patch | 48 + ...58-azure-lb-1-remove-status-metadata.patch | 21 + ...1804658-azure-lb-2-add-socat-support.patch | 38 + ...z1808468-1-lvmlockd-fix-conditionals.patch | 28 + SOURCES/bz1808468-2-remove-locking_type.patch | 52 + ...vpc-move-ip-1-add-routing_table_role.patch | 78 + ...66-aws-vpc-move-ip-2-update-metadata.patch | 30 + ...tem-fast_stop-default-to-no-for-GFS2.patch | 46 + ...-password-correctly-based-on-version.patch | 169 + ...z1817432-use-safe-temp-file-location.patch | 44 + ...cf_is_clone-1-fix-clone-max-can-be-0.patch | 23 + ...817598-ocf_is_clone-2-update-comment.patch | 24 + ...er-nfsnotify-fix-selinux-label-issue.patch | 48 + ...7-nfsserver-1-fix-nfsv4-only-support.patch | 43 + ...-nfsserver-2-stop-nfsdcld-if-present.patch | 34 + ...ve-ip-delete-remaining-route-entries.patch | 24 + SOURCES/bz1819965-1-ocf.py-update.patch | 357 ++ SOURCES/bz1819965-2-azure-events.patch | 1060 ++++++ ...zure-events-decode-when-type-not-str.patch | 59 + ...20523-exportfs-1-add-symlink-support.patch | 51 + ...820523-exportfs-2-fix-monitor-action.patch | 35 + ...NovaEvacuate-suppress-expected-error.patch | 37 + ...bbitmq-cluster-increase-wait-timeout.patch | 60 + ...pgsql-support-Pacemaker-v2.03-output.patch | 52 + ...836945-db2-hadr-promote-standby-node.patch | 36 + ...odman-force-rm-container-if-rm-fails.patch | 53 + ...-vpc-move-ip-log-output-when-failing.patch | 265 ++ ...vents-1-handle-exceptions-in-urlopen.patch | 70 + ...ts-2-import-urlerror-encode-postdata.patch | 68 + ...-log-error-message-file-doesnt-exist.patch | 31 + ...scribe-clientspec-format-in-metadata.patch | 27 + SOURCES/bz1845583-exportfs-2-fix-typo.patch | 23 + ...ove-vip-1-support-multiple-alias-ips.patch | 317 ++ ...733-gcp-vpc-move-vip-2-fix-list-sort.patch | 32 + ...ASE-run-verify-for-start-action-only.patch | 41 + ...z1850778-azure-lb-fix-redirect-issue.patch | 54 + ...system-support-whitespace-device-dir.patch | 566 +++ ...1001-sybaseASE-add-logfile-parameter.patch | 53 + SOURCES/bz1872754-pgsqlms-new-ra.patch | 3338 +++++++++++++++++ ...aws-vpc-move-ip-add-region-parameter.patch | 81 + ...recover-joining-non-existing-cluster.patch | 51 + ...62-podman-recover-from-killed-conmon.patch | 63 + ...move-fix-partially-matched-disk_name.patch | 58 + ...t-bootstrap-attribute-before-promote.patch | 36 + ...891855-galera-recover-2-node-cluster.patch | 80 + SOURCES/bz1891883-ethmonitor-vlan-fix.patch | 25 + ...-ip-dont-warn-for-expected-scenarios.patch | 84 + .../bz1897570-aws-add-imdsv2-support.patch | 97 + ...-make-key_file-crypt_type_not-unique.patch | 31 + ...ovaEvacuate-fix-delay_evacuate-unset.patch | 33 + ...man-recover-from-storage-out-of-sync.patch | 64 + ...1357-crypt-1-support-symlink-devices.patch | 23 + ...ypt-2-dont-sanity-check-during-probe.patch | 44 + ...bz1902045-iface-vlan-vlan-not-unique.patch | 40 + ...activate-stop-before-storage-service.patch | 60 + ...uncs-fix-traceback-redirection-bash5.patch | 45 + ...1-gcp-vpc-move-add-project-parameter.patch | 86 + ...bz1913932-2-gcp-vpc-move-route-fixes.patch | 106 + ...move-route-make-vpc_network-optional.patch | 22 + ...0698-podman-return-not-running-probe.patch | 42 + ...924363-nfsserver-error-check-unmount.patch | 57 + ...1932863-VirtualDomain-fix-pid-status.patch | 31 + ...ARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch | 23 + ...2-python-pygments-fix-CVE-2021-20270.patch | 52 + ...direct-to-avoid-nc-dying-EPIPE-error.patch | 118 + ...39281-aws-vpc-move-ip-add-ENI-lookup.patch | 141 + ...9992-awsvip-dont-partially-match-IPs.patch | 23 + .../bz1940094-aws-agents-dont-spam-logs.patch | 64 + ...1940363-1-galera-redis-use-output-as.patch | 100 + ...40363-2-bundle-disable-validate-with.patch | 176 + ...4-python-pygments-fix-CVE-2021-27291.patch | 138 + .../bz1957765-gcp-vpc-move-vip-retry.patch | 102 + ...se-ocf_attribute_target-for-metadata.patch | 76 + ...indif-only-match-lines-with-netmasks.patch | 25 + SOURCES/lvmlockd-add-cmirrord-support.patch | 118 + SOURCES/metadata-add-missing-s-suffix.patch | 183 + SOURCES/nfsserver-mount-rpc_pipefs.patch | 100 + SOURCES/nova-compute-wait-NovaEvacuate.patch | 747 ++++ SOURCES/python3-syntax-fixes.patch | 705 ++++ SOURCES/timeout-interval-add-s-suffix.patch | 161 + SPECS/resource-agents.spec | 2123 +++++++++++ 204 files changed, 25851 insertions(+) create mode 100644 .gitignore create mode 100644 .resource-agents.metadata create mode 100644 SOURCES/1-configure-add-python-path-detection.patch create mode 100644 SOURCES/10-gcloud-support-info.patch create mode 100644 SOURCES/2-ci-skip-python-agents-in-shellcheck.patch create mode 100644 SOURCES/3-gcp-vpc-move-vip.patch create mode 100644 SOURCES/4-gcp-vpc-move-route.patch create mode 100644 SOURCES/5-python-library.patch create mode 100644 SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch create mode 100644 SOURCES/7-gcp-bundled.patch create mode 100644 SOURCES/7-gcp-stackdriver-logging-note.patch create mode 100644 SOURCES/8-google-cloud-sdk-fixes.patch create mode 100644 SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch create mode 100644 SOURCES/IPaddr2-monitor_retries.patch create mode 100644 SOURCES/LVM-activate-1-warn-vg_access_mode.patch create mode 100644 SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch create mode 100644 SOURCES/LVM-activate-fix-issue-with-dashes.patch create mode 100644 SOURCES/LVM-fix-missing-dash.patch create mode 100644 SOURCES/LVM-volume_group_check_only.patch create mode 100644 SOURCES/VirtualDomain-stateless-support.patch create mode 100644 SOURCES/aliyun-vpc-move-ip-1.patch create mode 100644 SOURCES/aliyun-vpc-move-ip-2-fixes.patch create mode 100644 SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch create mode 100644 SOURCES/aliyun-vpc-move-ip-4-bundled.patch create mode 100644 SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch create mode 100644 SOURCES/aliyuncli-python3-fixes.patch create mode 100644 SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch create mode 100644 SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch create mode 100644 SOURCES/build-add-missing-manpages.patch create mode 100644 SOURCES/bz1471182-crypt-1-new-ra.patch create mode 100644 SOURCES/bz1471182-crypt-2-fix-bashism.patch create mode 100644 SOURCES/bz1471182-crypt-3-fix-missing-and.patch create mode 100644 SOURCES/bz1552330-vdo-vol.patch create mode 100644 SOURCES/bz1607607-podman.patch create mode 100644 SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch create mode 100644 SOURCES/bz1633251-gcp-pd-move-1.patch create mode 100644 SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch create mode 100644 SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch create mode 100644 SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch create mode 100644 SOURCES/bz1633251-gcp-pd-move-5-bundle.patch create mode 100644 SOURCES/bz1635785-redis-pidof-basename.patch create mode 100644 SOURCES/bz1640587-pgsql-ignore-masters-re-promote.patch create mode 100644 SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch create mode 100644 SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch create mode 100644 SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch create mode 100644 SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch create mode 100644 SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch create mode 100644 SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch create mode 100644 SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch create mode 100644 SOURCES/bz1669140-Route-make-family-parameter-optional.patch create mode 100644 SOURCES/bz1683548-redis-mute-password-warning.patch create mode 100644 SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch create mode 100644 SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch create mode 100644 SOURCES/bz1691456-gcloud-dont-detect-python2.patch create mode 100644 SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch create mode 100644 SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch create mode 100644 SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch create mode 100644 SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch create mode 100644 SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch create mode 100644 SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch create mode 100644 SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch create mode 100644 SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch create mode 100644 SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch create mode 100644 SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch create mode 100644 SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch create mode 100644 SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch create mode 100644 SOURCES/bz1718219-podman-2-improve-monitor-action.patch create mode 100644 SOURCES/bz1718219-podman-3-remove-docker-remnant.patch create mode 100644 SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch create mode 100644 SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch create mode 100644 SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch create mode 100644 SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch create mode 100644 SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch create mode 100644 SOURCES/bz1732867-CTDB-3-fixes.patch create mode 100644 SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch create mode 100644 SOURCES/bz1736746-podman-drop-in-support.patch create mode 100644 SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch create mode 100644 SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch create mode 100644 SOURCES/bz1741843-LVM-activate-partial-activation.patch create mode 100644 SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch create mode 100644 SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch create mode 100644 SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch create mode 100644 SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch create mode 100644 SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch create mode 100644 SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch create mode 100644 SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch create mode 100644 SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch create mode 100644 SOURCES/bz1744190-pgsql-2-improve-start-checks.patch create mode 100644 SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch create mode 100644 SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch create mode 100644 SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch create mode 100644 SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch create mode 100644 SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch create mode 100644 SOURCES/bz1748768-docker-fix-stop-issues.patch create mode 100644 SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch create mode 100644 SOURCES/bz1750261-Route-2-validate-start-validate-all.patch create mode 100644 SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch create mode 100644 SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch create mode 100644 SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch create mode 100644 SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch create mode 100644 SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch create mode 100644 SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch create mode 100644 SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch create mode 100644 SOURCES/bz1759115-aws-vpc-route53-1-update.patch create mode 100644 SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch create mode 100644 SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch create mode 100644 SOURCES/bz1763249-manpages-fix-pcs-syntax.patch create mode 100644 SOURCES/bz1764888-exportfs-allow-same-fsid.patch create mode 100644 SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch create mode 100644 SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch create mode 100644 SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch create mode 100644 SOURCES/bz1777381-Filesystem-2-udev-settle.patch create mode 100644 SOURCES/bz1788889-podman-improve-image-exist-check.patch create mode 100644 SOURCES/bz1792196-rabbitmq-cluster-delete-nodename-when-stop-fails.patch create mode 100644 SOURCES/bz1792237-redis-1-fix-validate-all.patch create mode 100644 SOURCES/bz1792237-redis-2-run-validate-during-start.patch create mode 100644 SOURCES/bz1795535-pgsql-1-add-postgresql-12-support.patch create mode 100644 SOURCES/bz1795535-pgsql-2-fix-uppercase-hostname-support.patch create mode 100644 SOURCES/bz1804658-azure-lb-1-remove-status-metadata.patch create mode 100644 SOURCES/bz1804658-azure-lb-2-add-socat-support.patch create mode 100644 SOURCES/bz1808468-1-lvmlockd-fix-conditionals.patch create mode 100644 SOURCES/bz1808468-2-remove-locking_type.patch create mode 100644 SOURCES/bz1810466-aws-vpc-move-ip-1-add-routing_table_role.patch create mode 100644 SOURCES/bz1810466-aws-vpc-move-ip-2-update-metadata.patch create mode 100644 SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch create mode 100644 SOURCES/bz1815013-redis-parse-password-correctly-based-on-version.patch create mode 100644 SOURCES/bz1817432-use-safe-temp-file-location.patch create mode 100644 SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch create mode 100644 SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch create mode 100644 SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch create mode 100644 SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch create mode 100644 SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch create mode 100644 SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch create mode 100644 SOURCES/bz1819965-1-ocf.py-update.patch create mode 100644 SOURCES/bz1819965-2-azure-events.patch create mode 100644 SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch create mode 100644 SOURCES/bz1820523-exportfs-1-add-symlink-support.patch create mode 100644 SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch create mode 100644 SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch create mode 100644 SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch create mode 100644 SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch create mode 100644 SOURCES/bz1836945-db2-hadr-promote-standby-node.patch create mode 100644 SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch create mode 100644 SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch create mode 100644 SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch create mode 100644 SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch create mode 100644 SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch create mode 100644 SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch create mode 100644 SOURCES/bz1845583-exportfs-2-fix-typo.patch create mode 100644 SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch create mode 100644 SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch create mode 100644 SOURCES/bz1848025-sybaseASE-run-verify-for-start-action-only.patch create mode 100644 SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch create mode 100644 SOURCES/bz1858752-Filesystem-support-whitespace-device-dir.patch create mode 100644 SOURCES/bz1861001-sybaseASE-add-logfile-parameter.patch create mode 100644 SOURCES/bz1872754-pgsqlms-new-ra.patch create mode 100644 SOURCES/bz1872999-aws-vpc-move-ip-add-region-parameter.patch create mode 100644 SOURCES/bz1881114-galera-recover-joining-non-existing-cluster.patch create mode 100644 SOURCES/bz1886262-podman-recover-from-killed-conmon.patch create mode 100644 SOURCES/bz1890068-gcp-pd-move-fix-partially-matched-disk_name.patch create mode 100644 SOURCES/bz1891835-galera-set-bootstrap-attribute-before-promote.patch create mode 100644 SOURCES/bz1891855-galera-recover-2-node-cluster.patch create mode 100644 SOURCES/bz1891883-ethmonitor-vlan-fix.patch create mode 100644 SOURCES/bz1895811-aws-vpc-move-ip-dont-warn-for-expected-scenarios.patch create mode 100644 SOURCES/bz1897570-aws-add-imdsv2-support.patch create mode 100644 SOURCES/bz1898690-crypt-make-key_file-crypt_type_not-unique.patch create mode 100644 SOURCES/bz1899551-NovaEvacuate-fix-delay_evacuate-unset.patch create mode 100644 SOURCES/bz1900015-podman-recover-from-storage-out-of-sync.patch create mode 100644 SOURCES/bz1901357-crypt-1-support-symlink-devices.patch create mode 100644 SOURCES/bz1901357-crypt-2-dont-sanity-check-during-probe.patch create mode 100644 SOURCES/bz1902045-iface-vlan-vlan-not-unique.patch create mode 100644 SOURCES/bz1902208-LVM-activate-stop-before-storage-service.patch create mode 100644 SOURCES/bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch create mode 100644 SOURCES/bz1913932-1-gcp-vpc-move-add-project-parameter.patch create mode 100644 SOURCES/bz1913932-2-gcp-vpc-move-route-fixes.patch create mode 100644 SOURCES/bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch create mode 100644 SOURCES/bz1920698-podman-return-not-running-probe.patch create mode 100644 SOURCES/bz1924363-nfsserver-error-check-unmount.patch create mode 100644 SOURCES/bz1932863-VirtualDomain-fix-pid-status.patch create mode 100644 SOURCES/bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch create mode 100644 SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch create mode 100644 SOURCES/bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch create mode 100644 SOURCES/bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch create mode 100644 SOURCES/bz1939992-awsvip-dont-partially-match-IPs.patch create mode 100644 SOURCES/bz1940094-aws-agents-dont-spam-logs.patch create mode 100644 SOURCES/bz1940363-1-galera-redis-use-output-as.patch create mode 100644 SOURCES/bz1940363-2-bundle-disable-validate-with.patch create mode 100644 SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch create mode 100644 SOURCES/bz1957765-gcp-vpc-move-vip-retry.patch create mode 100644 SOURCES/dont-use-ocf_attribute_target-for-metadata.patch create mode 100644 SOURCES/findif-only-match-lines-with-netmasks.patch create mode 100644 SOURCES/lvmlockd-add-cmirrord-support.patch create mode 100644 SOURCES/metadata-add-missing-s-suffix.patch create mode 100644 SOURCES/nfsserver-mount-rpc_pipefs.patch create mode 100644 SOURCES/nova-compute-wait-NovaEvacuate.patch create mode 100644 SOURCES/python3-syntax-fixes.patch create mode 100644 SOURCES/timeout-interval-add-s-suffix.patch create mode 100644 SPECS/resource-agents.spec diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9640a61 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz +SOURCES/aliyun-cli-2.1.10.tar.gz +SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz +SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz +SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz +SOURCES/colorama-0.3.3.tar.gz +SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz +SOURCES/httplib2-0.18.1.tar.gz +SOURCES/pycryptodome-3.6.4.tar.gz +SOURCES/pyroute2-0.4.13.tar.gz diff --git a/.resource-agents.metadata b/.resource-agents.metadata new file mode 100644 index 0000000..546b20f --- /dev/null +++ b/.resource-agents.metadata @@ -0,0 +1,10 @@ +0358e1cb7fe86b2105bd2646cbe86f3c0273844a SOURCES/ClusterLabs-resource-agents-e711383f.tar.gz +306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz +0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz +c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz +f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz +0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz +876e2b0c0e3031c6e6101745acd08e4e9f53d6a9 SOURCES/google-cloud-sdk-241.0.0-linux-x86_64.tar.gz +c5d22ce6660999633154927684eb9b799123e569 SOURCES/httplib2-0.18.1.tar.gz +326a73f58a62ebee00c11a12cfdd838b196e0e8e SOURCES/pycryptodome-3.6.4.tar.gz +147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz diff --git a/SOURCES/1-configure-add-python-path-detection.patch b/SOURCES/1-configure-add-python-path-detection.patch new file mode 100644 index 0000000..f1ed530 --- /dev/null +++ b/SOURCES/1-configure-add-python-path-detection.patch @@ -0,0 +1,29 @@ +From 266e10a719a396a3a522e4b0ce4271a372e4f6f1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Jul 2018 08:59:45 +0200 +Subject: [PATCH 1/3] configure: add Python path detection + +--- + configure.ac | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/configure.ac b/configure.ac +index 90ed2453..bdf057d3 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -501,6 +501,12 @@ AC_SUBST(PING) + AC_SUBST(RM) + AC_SUBST(TEST) + ++AM_PATH_PYTHON ++if test -z "$PYTHON"; then ++ echo "*** Essential program python not found" 1>&2 ++ exit 1 ++fi ++ + AC_PATH_PROGS(ROUTE, route) + AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) + +-- +2.17.1 + diff --git a/SOURCES/10-gcloud-support-info.patch b/SOURCES/10-gcloud-support-info.patch new file mode 100644 index 0000000..ef96ca5 --- /dev/null +++ b/SOURCES/10-gcloud-support-info.patch @@ -0,0 +1,25 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py +--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200 +@@ -900,6 +900,9 @@ + return """\ + For detailed information on this command and its flags, run: + {command_path} --help ++ ++WARNING: {command_path} is only supported for "{command_path} init" and for use ++with the agents in resource-agents. + """.format(command_path=' '.join(command.GetPath())) + + +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py +--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200 +@@ -84,7 +84,7 @@ + + pkg_root = os.path.dirname(os.path.dirname(surface.__file__)) + loader = cli.CLILoader( +- name='gcloud', ++ name='gcloud-ra', + command_root_directory=os.path.join(pkg_root, 'surface'), + allow_non_existing_modules=True, + version_func=VersionFunc, diff --git a/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch new file mode 100644 index 0000000..fd891e0 --- /dev/null +++ b/SOURCES/2-ci-skip-python-agents-in-shellcheck.patch @@ -0,0 +1,24 @@ +From 059effc058758c1294d80f03741bf5c078f1498d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Jul 2018 13:22:56 +0200 +Subject: [PATCH 2/3] CI: skip Python agents in shellcheck + +--- + ci/build.sh | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ci/build.sh b/ci/build.sh +index 608387ad..c331e9ab 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -58,6 +58,7 @@ check_all_executables() { + echo "Checking executables and .sh files..." + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue ++ file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue + head=$(head -n1 "$script") + [[ "$head" =~ .*ruby.* ]] && continue + [[ "$head" =~ .*zsh.* ]] && continue +-- +2.17.1 + diff --git a/SOURCES/3-gcp-vpc-move-vip.patch b/SOURCES/3-gcp-vpc-move-vip.patch new file mode 100644 index 0000000..75beb19 --- /dev/null +++ b/SOURCES/3-gcp-vpc-move-vip.patch @@ -0,0 +1,646 @@ +From 92da4155d881e9ac2dce3a51c6953817349d164a Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Mon, 25 Jun 2018 11:03:51 -0300 +Subject: [PATCH 1/4] gcp-vpc-move-vip.in: manage ip alias + +Add a resource agent to manage ip alias in the cluster. + +start: + Check if any machine in hostlist has the alias_ip assigned and + disassociate it. + Assign alias_ip to the current machine. + +stop: + Disassociate the alias_ip from the current machine. + +status/monitor: + Check if alias_ip is assigned with the current machine. + +--- + +This is a port to the following bash script to python: +https://storage.googleapis.com/sapdeploy/pacemaker-gcp/alias + +The problem with the bash script is the use of gcloud whose command line +API is not stable. + +ocf-tester.in results: + + > sudo ./tools/ocf-tester.in -o alias_ip='10.128.1.0/32' -o stackdriver_logging=yes -n gcp-vpc-move-vip.in heartbeat/gcp-vpc-move-vip.in + Beginning tests for heartbeat/gcp-vpc-move-vip.in... + ./tools/ocf-tester.in: line 226: cd: @datadir@/resource-agents: No such file or directory + close failed in file object destructor: + sys.excepthook is missing + lost sys.stderr + * rc=1: Your agent produces meta-data which does not conform to ra-api-1.dtd + Tests failed: heartbeat/gcp-vpc-move-vip.in failed 1 tests + +The only test faillig is the meta-data, but all the agents that I tried +also fails on this. If this is a concern, could you please point me out +to a test which succeeds so I can check what I am doing differently? + +This commit can also be viewed at: + https://github.com/collabora-gce/resource-agents/tree/alias + +Thanks +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-vpc-move-vip.in | 299 ++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 302 insertions(+) + create mode 100755 heartbeat/gcp-vpc-move-vip.in + +diff --git a/configure.ac b/configure.ac +index bdf057d33..3d8f9ca74 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -959,6 +959,7 @@ AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) ++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) + AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) + AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) + AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index c59126d13..e9eaf369f 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -114,6 +114,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_galera.7 \ + ocf_heartbeat_garbd.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ ++ ocf_heartbeat_gcp-vpc-move-vip.7 \ + ocf_heartbeat_iSCSILogicalUnit.7 \ + ocf_heartbeat_iSCSITarget.7 \ + ocf_heartbeat_iface-bridge.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 4f5059e27..36b271956 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -111,6 +111,7 @@ ocf_SCRIPTS = AoEtarget \ + galera \ + garbd \ + gcp-vpc-move-ip \ ++ gcp-vpc-move-vip \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +new file mode 100755 +index 000000000..4954e11df +--- /dev/null ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -0,0 +1,299 @@ ++#!/usr/bin/env python ++# --------------------------------------------------------------------- ++# Copyright 2016 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++# --------------------------------------------------------------------- ++# Description: Google Cloud Platform - Floating IP Address (Alias) ++# --------------------------------------------------------------------- ++ ++import json ++import logging ++import os ++import sys ++import time ++ ++import googleapiclient.discovery ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++CONN = None ++THIS_VM = None ++OCF_SUCCESS = 0 ++OCF_ERR_GENERIC = 1 ++OCF_ERR_CONFIGURED = 6 ++OCF_NOT_RUNNING = 7 ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = \ ++''' ++ ++ ++ 1.0 ++ Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance ++ Floating IP Address on Google Cloud Platform ++ ++ ++ List of hosts in the cluster ++ Host list ++ ++ ++ ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support ++ ++ ++ ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ ++ ++ ++ Subnet name for the Alias IP2 ++ Subnet name for the Alias IP ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def get_instance(project, zone, instance): ++ request = CONN.instances().get( ++ project=project, zone=zone, instance=instance) ++ return request.execute() ++ ++ ++def get_network_ifaces(project, zone, instance): ++ return get_instance(project, zone, instance)['networkInterfaces'] ++ ++ ++def wait_for_operation(project, zone, operation): ++ while True: ++ result = CONN.zoneOperations().get( ++ project=project, ++ zone=zone, ++ operation=operation['name']).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return ++ time.sleep(1) ++ ++ ++def set_alias(project, zone, instance, alias, alias_range_name=None): ++ fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++ body = { ++ 'aliasIpRanges': [], ++ 'fingerprint': fingerprint ++ } ++ if alias: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ body['aliasIpRanges'].append(obj) ++ ++ request = CONN.instances().updateNetworkInterface( ++ instance=instance, networkInterface='nic0', project=project, zone=zone, ++ body=body) ++ operation = request.execute() ++ wait_for_operation(project, zone, operation) ++ ++ ++def get_alias(project, zone, instance): ++ iface = get_network_ifaces(project, zone, instance) ++ try: ++ return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ except KeyError: ++ return '' ++ ++ ++def get_localhost_alias(): ++ net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) ++ net_iface = json.loads(net_iface.decode('utf-8')) ++ try: ++ return net_iface[0]['ipAliases'][0] ++ except (KeyError, IndexError): ++ return '' ++ ++ ++def get_zone(project, instance): ++ request = CONN.instances().aggregatedList(project=project) ++ while request is not None: ++ response = request.execute() ++ zones = response.get('items', {}) ++ for zone in zones.values(): ++ for inst in zone.get('instances', []): ++ if inst['name'] == instance: ++ return inst['zone'].split("/")[-1] ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ raise Exception("Unable to find instance %s" % (instance)) ++ ++ ++def gcp_alias_start(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ my_zone = get_metadata('instance/zone').split('/')[-1] ++ project = get_metadata('project/project-id') ++ ++ # If I already have the IP, exit. If it has an alias IP that isn't the VIP, ++ # then remove it ++ if my_alias == alias: ++ logging.info( ++ '%s already has %s attached. No action required' % (THIS_VM, alias)) ++ sys.exit(OCF_SUCCESS) ++ elif my_alias: ++ logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ set_alias(project, my_zone, THIS_VM, '') ++ ++ # Loops through all hosts & remove the alias IP from the host that has it ++ hostlist = os.environ.get('OCF_RESKEY_hostlist', '') ++ hostlist.replace(THIS_VM, '') ++ for host in hostlist.split(): ++ host_zone = get_zone(project, host) ++ host_alias = get_alias(project, host_zone, host) ++ if alias == host_alias: ++ logging.info( ++ '%s is attached to %s - Removing all alias IP addresses from %s' % ++ (alias, host, host)) ++ set_alias(project, host_zone, host, '') ++ break ++ ++ # add alias IP to localhost ++ set_alias( ++ project, my_zone, THIS_VM, alias, ++ os.environ.get('OCF_RESKEY_alias_range_name')) ++ ++ # Check the IP has been added ++ my_alias = get_localhost_alias() ++ if alias == my_alias: ++ logging.info('Finished adding %s to %s' % (alias, THIS_VM)) ++ elif my_alias: ++ logging.error( ++ 'Failed to add IP. %s has an IP attached but it isn\'t %s' % ++ (THIS_VM, alias)) ++ sys.exit(OCF_ERR_GENERIC) ++ else: ++ logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ sys.exit(OCF_ERR_GENERIC) ++ ++ ++def gcp_alias_stop(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ my_zone = get_metadata('instance/zone').split('/')[-1] ++ project = get_metadata('project/project-id') ++ ++ if my_alias == alias: ++ logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ set_alias(project, my_zone, THIS_VM, '') ++ ++ ++def gcp_alias_status(alias): ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ my_alias = get_localhost_alias() ++ if alias == my_alias: ++ logging.info('%s has the correct IP address attached' % THIS_VM) ++ else: ++ sys.exit(OCF_NOT_RUNNING) ++ ++ ++def configure(): ++ global CONN ++ global THIS_VM ++ ++ # Populate global vars ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ THIS_VM = get_metadata('instance/name') ++ ++ # Prepare logging ++ logging.basicConfig( ++ format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=THIS_VM) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:alias "%(message)s"') ++ handler.setFormatter(formatter) ++ root_logger = logging.getLogger() ++ root_logger.addHandler(handler) ++ except ImportError: ++ logging.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def main(): ++ configure() ++ ++ alias = os.environ.get('OCF_RESKEY_alias_ip') ++ if 'start' in sys.argv[1]: ++ gcp_alias_start(alias) ++ elif 'stop' in sys.argv[1]: ++ gcp_alias_stop(alias) ++ elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: ++ gcp_alias_status(alias) ++ elif 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ else: ++ logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) ++ ++ ++if __name__ == "__main__": ++ main() + +From 0e6ba4894a748664ac1d8ff5b9e8c271f0b04d93 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 12 Jul 2018 09:01:22 -0300 +Subject: [PATCH 2/4] gcp-vpc-move-vip.in: minor fixes + +- Get hostlist from the project if the parameter is not given +- Verify if alias is present out of each action function +- Don't call configure if 'meta-data' action is given +--- + heartbeat/gcp-vpc-move-vip.in | 40 ++++++++++++++++++++++++++++------------ + 1 file changed, 28 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 4954e11df..f3d117bda 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -50,7 +50,7 @@ METADATA = \ + Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance + Floating IP Address on Google Cloud Platform + +- ++ + List of hosts in the cluster + Host list + +@@ -177,9 +177,22 @@ def get_zone(project, instance): + raise Exception("Unable to find instance %s" % (instance)) + + ++def get_instances_list(project, exclude): ++ hostlist = [] ++ request = CONN.instances().aggregatedList(project=project) ++ while request is not None: ++ response = request.execute() ++ zones = response.get('items', {}) ++ for zone in zones.values(): ++ for inst in zone.get('instances', []): ++ if inst['name'] != exclude: ++ hostlist.append(inst['name']) ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ return hostlist ++ ++ + def gcp_alias_start(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') +@@ -196,8 +209,11 @@ def gcp_alias_start(alias): + + # Loops through all hosts & remove the alias IP from the host that has it + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') +- hostlist.replace(THIS_VM, '') +- for host in hostlist.split(): ++ if hostlist: ++ hostlist.replace(THIS_VM, '').split() ++ else: ++ hostlist = get_instances_list(project, THIS_VM) ++ for host in hostlist: + host_zone = get_zone(project, host) + host_alias = get_alias(project, host_zone, host) + if alias == host_alias: +@@ -227,8 +243,6 @@ def gcp_alias_start(alias): + + + def gcp_alias_stop(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') +@@ -239,8 +253,6 @@ def gcp_alias_stop(alias): + + + def gcp_alias_status(alias): +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) + my_alias = get_localhost_alias() + if alias == my_alias: + logging.info('%s has the correct IP address attached' % THIS_VM) +@@ -280,17 +292,21 @@ def configure(): + + + def main(): +- configure() ++ if 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ return + + alias = os.environ.get('OCF_RESKEY_alias_ip') ++ if not alias: ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ configure() + if 'start' in sys.argv[1]: + gcp_alias_start(alias) + elif 'stop' in sys.argv[1]: + gcp_alias_stop(alias) + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + gcp_alias_status(alias) +- elif 'meta-data' in sys.argv[1]: +- print(METADATA) + else: + logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) + + +From 1f50c4bc80f23f561a8630c12076707366525899 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 12 Jul 2018 13:02:16 -0300 +Subject: [PATCH 3/4] gcp-vcp-move-vip.in: implement validate-all + +Also fix some return errors +--- + heartbeat/gcp-vpc-move-vip.in | 47 +++++++++++++++++++++++++++++++------------ + 1 file changed, 34 insertions(+), 13 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index f3d117bda..a90c2de8d 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -22,7 +22,10 @@ import os + import sys + import time + +-import googleapiclient.discovery ++try: ++ import googleapiclient.discovery ++except ImportError: ++ pass + + if sys.version_info >= (3, 0): + # Python 3 imports. +@@ -36,6 +39,7 @@ else: + + CONN = None + THIS_VM = None ++ALIAS = None + OCF_SUCCESS = 0 + OCF_ERR_GENERIC = 1 + OCF_ERR_CONFIGURED = 6 +@@ -210,7 +214,7 @@ def gcp_alias_start(alias): + # Loops through all hosts & remove the alias IP from the host that has it + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: +- hostlist.replace(THIS_VM, '').split() ++ hostlist = hostlist.replace(THIS_VM, '').split() + else: + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: +@@ -260,14 +264,31 @@ def gcp_alias_status(alias): + sys.exit(OCF_NOT_RUNNING) + + +-def configure(): ++def validate(): ++ global ALIAS + global CONN + global THIS_VM + + # Populate global vars +- CONN = googleapiclient.discovery.build('compute', 'v1') +- THIS_VM = get_metadata('instance/name') ++ try: ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logging.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ try: ++ THIS_VM = get_metadata('instance/name') ++ except Exception as e: ++ logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) + ++ ALIAS = os.environ.get('OCF_RESKEY_alias_ip') ++ if not ALIAS: ++ logging.error('Missing alias_ip parameter') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ++def configure_logs(): + # Prepare logging + logging.basicConfig( + format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) +@@ -296,19 +317,19 @@ def main(): + print(METADATA) + return + +- alias = os.environ.get('OCF_RESKEY_alias_ip') +- if not alias: +- sys.exit(OCF_ERR_CONFIGURED) ++ validate() ++ if 'validate-all' in sys.argv[1]: ++ return + +- configure() ++ configure_logs() + if 'start' in sys.argv[1]: +- gcp_alias_start(alias) ++ gcp_alias_start(ALIAS) + elif 'stop' in sys.argv[1]: +- gcp_alias_stop(alias) ++ gcp_alias_stop(ALIAS) + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: +- gcp_alias_status(alias) ++ gcp_alias_status(ALIAS) + else: +- logging.error('gcp:alias - no such function %s' % str(sys.argv[1])) ++ logging.error('no such function %s' % str(sys.argv[1])) + + + if __name__ == "__main__": + +From f11cb236bb348ebee74e962d0ded1cb2fc97bd5f Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 13 Jul 2018 08:01:02 -0300 +Subject: [PATCH 4/4] gcp-vpc-move-vip.in: minor fixes + +--- + heartbeat/gcp-vpc-move-vip.in | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index a90c2de8d..9fc87242f 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!@PYTHON@ -tt + # --------------------------------------------------------------------- + # Copyright 2016 Google Inc. + # +@@ -59,7 +59,7 @@ METADATA = \ + Host list + + +- ++ + If enabled (set to true), IP failover logs will be posted to stackdriver logging + Stackdriver-logging support + +@@ -80,6 +80,7 @@ METADATA = \ + + + ++ + + ''' + diff --git a/SOURCES/4-gcp-vpc-move-route.patch b/SOURCES/4-gcp-vpc-move-route.patch new file mode 100644 index 0000000..ccd221e --- /dev/null +++ b/SOURCES/4-gcp-vpc-move-route.patch @@ -0,0 +1,632 @@ +From 0ee4c62105ee8f90a43fe0bf8a65bc9b9da2e7e0 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Wed, 18 Jul 2018 11:54:40 -0300 +Subject: [PATCH 1/4] gcp-vpc-move-route.in: python implementation of + gcp-vpc-move-ip.in + +gcloud api is not reliable and it is slow, add a python version of +gcp-vpc-move-ip.in +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-vpc-move-route.in | 441 ++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 444 insertions(+) + create mode 100644 heartbeat/gcp-vpc-move-route.in + +diff --git a/configure.ac b/configure.ac +index 3d8f9ca74..039b4942c 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -960,6 +960,7 @@ AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) ++AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) + AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) + AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) + AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index e9eaf369f..3ac0569de 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -115,6 +115,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_garbd.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ + ocf_heartbeat_gcp-vpc-move-vip.7 \ ++ ocf_heartbeat_gcp-vpc-move-route.7 \ + ocf_heartbeat_iSCSILogicalUnit.7 \ + ocf_heartbeat_iSCSITarget.7 \ + ocf_heartbeat_iface-bridge.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 36b271956..d4750bf09 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -112,6 +112,7 @@ ocf_SCRIPTS = AoEtarget \ + garbd \ + gcp-vpc-move-ip \ + gcp-vpc-move-vip \ ++ gcp-vpc-move-route \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +new file mode 100644 +index 000000000..5f4569baa +--- /dev/null ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -0,0 +1,441 @@ ++#!@PYTHON@ -tt ++# - *- coding: utf- 8 - *- ++# ++# ++# OCF resource agent to move an IP address within a VPC in GCP ++# ++# License: GNU General Public License (GPL) ++# Copyright (c) 2018 Hervé Werner (MFG Labs) ++# Copyright 2018 Google Inc. ++# Based on code from Markus Guertler (aws-vpc-move-ip) ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++ ++####################################################################### ++ ++import atexit ++import logging ++import os ++import sys ++import time ++ ++try: ++ import googleapiclient.discovery ++ import pyroute2 ++except ImportError: ++ pass ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++OCF_SUCCESS = 0 ++OCF_ERR_GENERIC = 1 ++OCF_ERR_UNIMPLEMENTED = 3 ++OCF_ERR_PERM = 4 ++OCF_ERR_CONFIGURED = 6 ++OCF_NOT_RUNNING = 7 ++GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1' ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = \ ++''' ++ ++ ++1.0 ++ ++Resource Agent that can move a floating IP addresse within a GCP VPC by changing an ++entry in the routing table. This agent also configures the floating IP locally ++on the instance OS. ++Requirements : ++- IP forwarding must be enabled on all instances in order to be able to ++terminate the route ++- The floating IP address must be choosen so that it is outside all existing ++subnets in the VPC network ++- IAM permissions ++(see https://cloud.google.com/compute/docs/access/iam-permissions) : ++1) compute.routes.delete, compute.routes.get and compute.routes.update on the ++route ++2) compute.networks.updatePolicy on the network (to add a new route) ++3) compute.networks.get on the network (to check the VPC network existence) ++4) compute.routes.list on the project (to check conflicting routes) ++ ++Move IP within a GCP VPC ++ ++ ++ ++ ++If enabled (set to true), IP failover logs will be posted to stackdriver logging ++Stackdriver-logging support ++ ++ ++ ++ ++ ++Floating IP address. Note that this IP must be chosen outside of all existing ++subnet ranges ++ ++Floating IP ++ ++ ++ ++ ++ ++Name of the VPC network ++ ++VPC network ++ ++ ++ ++ ++ ++Name of the network interface ++ ++Network interface name ++ ++ ++ ++ ++ ++Route name ++ ++Route name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++class Context(object): ++ __slots__ = 'conn', 'iface_idx', 'instance', 'instance_url', 'interface', \ ++ 'ip', 'iproute', 'project', 'route_name', 'vpc_network', \ ++ 'vpc_network_url', 'zone' ++ ++ ++def wait_for_operation(ctx, response): ++ """Blocks until operation completes. ++ Code from GitHub's GoogleCloudPlatform/python-docs-samples ++ ++ Args: ++ response: dict, a request's response ++ """ ++ def _OperationGetter(response): ++ operation = response[u'name'] ++ if response.get(u'zone'): ++ return ctx.conn.zoneOperations().get( ++ project=ctx.project, zone=ctx.zone, operation=operation) ++ else: ++ return ctx.conn.globalOperations().get( ++ project=ctx.project, operation=operation) ++ ++ while True: ++ result = _OperationGetter(response).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return result ++ ++ time.sleep(1) ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def validate(ctx): ++ if os.geteuid() != 0: ++ logging.error('You must run this agent as root') ++ sys.exit(OCF_ERR_PERM) ++ ++ try: ++ ctx.conn = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logging.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ctx.ip = os.environ.get('OCF_RESKEY_ip') ++ if not ctx.ip: ++ logging.error('Missing ip parameter') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ try: ++ ctx.instance = get_metadata('instance/name') ++ ctx.zone = get_metadata('instance/zone').split('/')[-1] ++ ctx.project = get_metadata('project/project-id') ++ except Exception as e: ++ logging.error( ++ 'Instance information not found. Is this a GCE instance ?: %s', str(e)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) ++ ctx.vpc_network = os.environ.get('OCF_RESKEY_vpc_network', 'default') ++ ctx.vpc_network_url = '%s/projects/%s/global/networks/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network) ++ ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0') ++ ctx.route_name = os.environ.get( ++ 'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME']) ++ ctx.iproute = pyroute2.IPRoute() ++ atexit.register(ctx.iproute.close) ++ idxs = ctx.iproute.link_lookup(ifname=ctx.interface) ++ if not idxs: ++ logging.error('Network interface not found') ++ sys.exit(OCF_ERR_CONFIGURED) ++ ctx.iface_idx = idxs[0] ++ ++ ++def check_conflicting_routes(ctx): ++ fl = '(destRange = "%s*") AND (network = "%s") AND (name != "%s")' % ( ++ ctx.ip, ctx.vpc_network_url, ctx.route_name) ++ request = ctx.conn.routes().list(project=ctx.project, filter=fl) ++ response = request.execute() ++ route_list = response.get('items', None) ++ if route_list: ++ logging.error( ++ 'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s', ++ ctx.ip, ctx.vpc_network, str(route_list)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ ++def route_release(ctx): ++ request = ctx.conn.routes().delete(project=ctx.project, route=ctx.route_name) ++ wait_for_operation(ctx, request.execute()) ++ ++ ++def ip_monitor(ctx): ++ logging.info('IP monitor: checking local network configuration') ++ ++ def address_filter(addr): ++ for attr in addr['attrs']: ++ if attr[0] == 'IFA_LOCAL': ++ if attr[1] == ctx.ip: ++ return True ++ else: ++ return False ++ ++ route = ctx.iproute.get_addr( ++ index=ctx.iface_idx, match=address_filter) ++ if not route: ++ logging.warn( ++ 'The floating IP %s is not locally configured on this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_NOT_RUNNING ++ ++ logging.debug( ++ 'The floating IP %s is correctly configured on this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_SUCCESS ++ ++ ++def ip_release(ctx): ++ ctx.iproute.addr('del', index=ctx.iface_idx, address=ctx.ip, mask=32) ++ ++ ++def ip_and_route_start(ctx): ++ logging.info('Bringing up the floating IP %s', ctx.ip) ++ ++ # Add a new entry in the routing table ++ # If the route entry exists and is pointing to another instance, take it over ++ ++ # Ensure that there is no route that we are not aware of that is also handling our IP ++ check_conflicting_routes(ctx) ++ ++ # There is no replace API, We need to first delete the existing route if any ++ try: ++ request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) ++ request.execute() ++ # TODO: check specific exception for 404 ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status != 404: ++ raise ++ else: ++ route_release(ctx) ++ ++ route_body = { ++ 'name': ctx.route_name, ++ 'network': ctx.vpc_network_url, ++ 'destRange': '%s/32' % ctx.ip, ++ 'nextHopInstance': ctx.instance_url, ++ } ++ try: ++ request = ctx.conn.routes().insert(project=ctx.project, body=route_body) ++ wait_for_operation(ctx, request.execute()) ++ except googleapiclient.errors.HttpError: ++ try: ++ request = ctx.conn.networks().get( ++ project=ctx.project, network=ctx.vpc_network) ++ request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logging.error('VPC network not found') ++ sys.exit(OCF_ERR_CONFIGURED) ++ else: ++ raise ++ else: ++ raise ++ ++ # Configure the IP address locally ++ # We need to release the IP first ++ if ip_monitor(ctx) == OCF_SUCCESS: ++ ip_release(ctx) ++ ++ ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32) ++ ctx.iproute.link('set', index=ctx.iface_idx, state='up') ++ logging.info('Successfully brought up the floating IP %s', ctx.ip) ++ ++ ++def route_monitor(ctx): ++ logging.info('GCP route monitor: checking route table') ++ ++ # Ensure that there is no route that we are not aware of that is also handling our IP ++ check_conflicting_routes ++ ++ try: ++ request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) ++ response = request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if 'Insufficient Permission' in e.content: ++ return OCF_ERR_PERM ++ elif e.resp.status == 404: ++ return OCF_NOT_RUNNING ++ else: ++ raise ++ ++ routed_to_instance = response.get('nextHopInstance', '') ++ instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( ++ GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) ++ if routed_to_instance != instance_url: ++ logging.warn( ++ 'The floating IP %s is not routed to this instance (%s) but to instance %s', ++ ctx.ip, ctx.instance, routed_to_instance.split('/')[-1]) ++ return OCF_NOT_RUNNING ++ ++ logging.debug( ++ 'The floating IP %s is correctly routed to this instance (%s)', ++ ctx.ip, ctx.instance) ++ return OCF_SUCCESS ++ ++ ++def ip_and_route_stop(ctx): ++ logging.info('Bringing down the floating IP %s', ctx.ip) ++ ++ # Delete the route entry ++ # If the route entry exists and is pointing to another instance, don't touch it ++ if route_monitor(ctx) == OCF_NOT_RUNNING: ++ logging.info( ++ 'The floating IP %s is already not routed to this instance (%s)', ++ ctx.ip, ctx.instance) ++ else: ++ route_release(ctx) ++ ++ if ip_monitor(ctx) == OCF_NOT_RUNNING: ++ logging.info('The floating IP %s is already down', ctx.ip) ++ else: ++ ip_release(ctx) ++ ++ ++def configure_logs(ctx): ++ # Prepare logging ++ logging.basicConfig( ++ format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO) ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=ctx.instance) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:route "%(message)s"') ++ handler.setFormatter(formatter) ++ root_logger = logging.getLogger() ++ root_logger.addHandler(handler) ++ except ImportError: ++ logging.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def main(): ++ if 'meta-data' in sys.argv[1]: ++ print(METADATA) ++ return ++ ++ ctx = Context() ++ ++ validate(ctx) ++ if 'validate-all' in sys.argv[1]: ++ return ++ ++ configure_logs(ctx) ++ if 'start' in sys.argv[1]: ++ ip_and_route_start(ctx) ++ elif 'stop' in sys.argv[1]: ++ ip_and_route_stop(ctx) ++ elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: ++ sys.exit(ip_monitor(ctx)) ++ else: ++ usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}' ++ logging.error(usage) ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ ++ ++if __name__ == "__main__": ++ main() + +From 6590c99f462403808854114ec1031755e5ce6b36 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 19 Jul 2018 12:33:44 -0300 +Subject: [PATCH 2/4] gcp-vpc-move-ip.in: add deprecation message + +--- + heartbeat/gcp-vpc-move-ip.in | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in +index 4a6c343a8..3b8d998b3 100755 +--- a/heartbeat/gcp-vpc-move-ip.in ++++ b/heartbeat/gcp-vpc-move-ip.in +@@ -348,6 +348,8 @@ ip_and_route_stop() { + # + ############################################################################### + ++ocf_log warn "gcp-vpc-move-ip is deprecated, prefer to use gcp-vpc-move-route instead" ++ + case $__OCF_ACTION in + meta-data) metadata + exit $OCF_SUCCESS + +From 73608196d21068c6c2d5fb9f77e3d40179c85fee Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 20 Jul 2018 08:26:17 -0300 +Subject: [PATCH 3/4] gcp-vpc-move-route.in: move stackdriver parameter + +Move stackdriver parameter to the bottom of metadata list +--- + heartbeat/gcp-vpc-move-route.in | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 5f4569baa..8d5bfff36 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -90,12 +90,6 @@ route + + + +- +-If enabled (set to true), IP failover logs will be posted to stackdriver logging +-Stackdriver-logging support +- +- +- + + + Floating IP address. Note that this IP must be chosen outside of all existing +@@ -128,6 +122,12 @@ Route name + Route name + + ++ ++ ++If enabled (set to true), IP failover logs will be posted to stackdriver logging ++Stackdriver-logging support ++ ++ + + + + +From e54565ec69f809b28337c0471ad0a9b26a64f8bf Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Fri, 20 Jul 2018 08:45:53 -0300 +Subject: [PATCH 4/4] gcp-vpc-move-route.in: minor fixes + +--- + heartbeat/gcp-vpc-move-route.in | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 8d5bfff36..566a70f86 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -104,7 +104,7 @@ subnet ranges + Name of the VPC network + + VPC network +- ++ + + + +@@ -112,7 +112,7 @@ Name of the VPC network + Name of the network interface + + Network interface name +- ++ + + + +@@ -120,7 +120,7 @@ Name of the network interface + Route name + + Route name +- ++ + + + +@@ -138,7 +138,7 @@ Route name + + + +-''' ++''' % os.path.basename(sys.argv[0]) + + + class Context(object): +@@ -229,7 +229,7 @@ def validate(ctx): + GCP_API_URL_PREFIX, ctx.project, ctx.vpc_network) + ctx.interface = os.environ.get('OCF_RESKEY_interface', 'eth0') + ctx.route_name = os.environ.get( +- 'OCF_RESKEY_route_name', 'ra-%s' % os.environ['__SCRIPT_NAME']) ++ 'OCF_RESKEY_route_name', 'ra-%s' % os.path.basename(sys.argv[0])) + ctx.iproute = pyroute2.IPRoute() + atexit.register(ctx.iproute.close) + idxs = ctx.iproute.link_lookup(ifname=ctx.interface) +@@ -432,7 +432,8 @@ def main(): + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + sys.exit(ip_monitor(ctx)) + else: +- usage = 'usage: $0 {start|stop|monitor|status|meta-data|validate-all}' ++ usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \ ++ os.path.basename(sys.argv[0]) + logging.error(usage) + sys.exit(OCF_ERR_UNIMPLEMENTED) + diff --git a/SOURCES/5-python-library.patch b/SOURCES/5-python-library.patch new file mode 100644 index 0000000..0066119 --- /dev/null +++ b/SOURCES/5-python-library.patch @@ -0,0 +1,600 @@ +From 13ae97dec5754642af4d0d0edc03d9290e792e7f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 19 Jul 2018 16:12:35 +0200 +Subject: [PATCH 1/5] Add Python library + +--- + heartbeat/Makefile.am | 3 +- + heartbeat/ocf.py | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 138 insertions(+), 1 deletion(-) + create mode 100644 heartbeat/ocf.py + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index d4750bf09..1333f8feb 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -185,7 +185,8 @@ ocfcommon_DATA = ocf-shellfuncs \ + ora-common.sh \ + mysql-common.sh \ + nfsserver-redhat.sh \ +- findif.sh ++ findif.sh \ ++ ocf.py + + # Legacy locations + hbdir = $(sysconfdir)/ha.d +diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py +new file mode 100644 +index 000000000..12be7a2a4 +--- /dev/null ++++ b/heartbeat/ocf.py +@@ -0,0 +1,136 @@ ++# ++# Copyright (c) 2016 Red Hat, Inc, Oyvind Albrigtsen ++# All Rights Reserved. ++# ++# ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++# ++ ++import sys, os, logging, syslog ++ ++argv=sys.argv ++env=os.environ ++ ++# ++# Common variables for the OCF Resource Agents supplied by ++# heartbeat. ++# ++ ++OCF_SUCCESS=0 ++OCF_ERR_GENERIC=1 ++OCF_ERR_ARGS=2 ++OCF_ERR_UNIMPLEMENTED=3 ++OCF_ERR_PERM=4 ++OCF_ERR_INSTALLED=5 ++OCF_ERR_CONFIGURED=6 ++OCF_NOT_RUNNING=7 ++ ++# Non-standard values. ++# ++# OCF does not include the concept of master/slave resources so we ++# need to extend it so we can discover a resource's complete state. ++# ++# OCF_RUNNING_MASTER: ++# The resource is in "master" mode and fully operational ++# OCF_FAILED_MASTER: ++# The resource is in "master" mode but in a failed state ++# ++# The extra two values should only be used during a probe. ++# ++# Probes are used to discover resources that were started outside of ++# the CRM and/or left behind if the LRM fails. ++# ++# They can be identified in RA scripts by checking for: ++# [ "${__OCF_ACTION}" = "monitor" -a "${OCF_RESKEY_CRM_meta_interval}" = "0" ] ++# ++# Failed "slaves" should continue to use: OCF_ERR_GENERIC ++# Fully operational "slaves" should continue to use: OCF_SUCCESS ++# ++OCF_RUNNING_MASTER=8 ++OCF_FAILED_MASTER=9 ++ ++ ++## Own logger handler that uses old-style syslog handler as otherwise ++## everything is sourced from /dev/syslog ++class SyslogLibHandler(logging.StreamHandler): ++ """ ++ A handler class that correctly push messages into syslog ++ """ ++ def emit(self, record): ++ syslog_level = { ++ logging.CRITICAL:syslog.LOG_CRIT, ++ logging.ERROR:syslog.LOG_ERR, ++ logging.WARNING:syslog.LOG_WARNING, ++ logging.INFO:syslog.LOG_INFO, ++ logging.DEBUG:syslog.LOG_DEBUG, ++ logging.NOTSET:syslog.LOG_DEBUG, ++ }[record.levelno] ++ ++ msg = self.format(record) ++ ++ # syslog.syslog can not have 0x00 character inside or exception ++ # is thrown ++ syslog.syslog(syslog_level, msg.replace("\x00","\n")) ++ return ++ ++ ++OCF_RESOURCE_INSTANCE = env.get("OCF_RESOURCE_INSTANCE") ++ ++HA_DEBUG = env.get("HA_debug", 0) ++HA_DATEFMT = env.get("HA_DATEFMT", "%b %d %T ") ++HA_LOGFACILITY = env.get("HA_LOGFACILITY") ++HA_LOGFILE = env.get("HA_LOGFILE") ++HA_DEBUGLOG = env.get("HA_DEBUGLOG") ++ ++log = logging.getLogger(os.path.basename(argv[0])) ++log.setLevel(logging.DEBUG) ++ ++## add logging to stderr ++if sys.stdout.isatty(): ++ seh = logging.StreamHandler(stream=sys.stderr) ++ if HA_DEBUG == 0: ++ seh.setLevel(logging.WARNING) ++ sehformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ seh.setFormatter(sehformatter) ++ log.addHandler(seh) ++ ++## add logging to syslog ++if HA_LOGFACILITY: ++ slh = SyslogLibHandler() ++ if HA_DEBUG == 0: ++ slh.setLevel(logging.WARNING) ++ slhformatter = logging.Formatter('%(levelname)s: %(message)s') ++ slh.setFormatter(slhformatter) ++ log.addHandler(slh) ++ ++## add logging to file ++if HA_LOGFILE: ++ lfh = logging.FileHandler(HA_LOGFILE) ++ if HA_DEBUG == 0: ++ lfh.setLevel(logging.WARNING) ++ lfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ lfh.setFormatter(lfhformatter) ++ log.addHandler(lfh) ++ ++## add debug logging to file ++if HA_DEBUGLOG and HA_LOGFILE != HA_DEBUGLOG: ++ dfh = logging.FileHandler(HA_DEBUGLOG) ++ if HA_DEBUG == 0: ++ dfh.setLevel(logging.WARNING) ++ dfhformatter = logging.Formatter('%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s', datefmt=HA_DATEFMT) ++ dfh.setFormatter(dfhformatter) ++ log.addHandler(dfh) ++ ++logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + +From 2ade8dbf1f6f6d3889dd1ddbf40858edf10fbdc7 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 19 Jul 2018 16:20:39 +0200 +Subject: [PATCH 2/5] gcp-vpc-move-vip: use Python library + +--- + heartbeat/gcp-vpc-move-vip.in | 42 +++++++++++++++++++++--------------------- + 1 file changed, 21 insertions(+), 21 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index af2080502..eb5bce6a8 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -22,6 +22,11 @@ import os + import sys + import time + ++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++from ocf import * ++ + try: + import googleapiclient.discovery + except ImportError: +@@ -40,10 +45,6 @@ else: + CONN = None + THIS_VM = None + ALIAS = None +-OCF_SUCCESS = 0 +-OCF_ERR_GENERIC = 1 +-OCF_ERR_CONFIGURED = 6 +-OCF_NOT_RUNNING = 7 + METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' + METADATA_HEADERS = {'Metadata-Flavor': 'Google'} + METADATA = \ +@@ -206,11 +207,11 @@ def gcp_alias_start(alias): + # If I already have the IP, exit. If it has an alias IP that isn't the VIP, + # then remove it + if my_alias == alias: +- logging.info( ++ logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) + elif my_alias: +- logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ logger.info('Removing %s from %s' % (my_alias, THIS_VM)) + set_alias(project, my_zone, THIS_VM, '') + + # Loops through all hosts & remove the alias IP from the host that has it +@@ -223,7 +224,7 @@ def gcp_alias_start(alias): + host_zone = get_zone(project, host) + host_alias = get_alias(project, host_zone, host) + if alias == host_alias: +- logging.info( ++ logger.info( + '%s is attached to %s - Removing all alias IP addresses from %s' % + (alias, host, host)) + set_alias(project, host_zone, host, '') +@@ -237,14 +238,14 @@ def gcp_alias_start(alias): + # Check the IP has been added + my_alias = get_localhost_alias() + if alias == my_alias: +- logging.info('Finished adding %s to %s' % (alias, THIS_VM)) ++ logger.info('Finished adding %s to %s' % (alias, THIS_VM)) + elif my_alias: +- logging.error( ++ logger.error( + 'Failed to add IP. %s has an IP attached but it isn\'t %s' % + (THIS_VM, alias)) + sys.exit(OCF_ERR_GENERIC) + else: +- logging.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + +@@ -254,14 +255,14 @@ def gcp_alias_stop(alias): + project = get_metadata('project/project-id') + + if my_alias == alias: +- logging.info('Removing %s from %s' % (my_alias, THIS_VM)) ++ logger.info('Removing %s from %s' % (my_alias, THIS_VM)) + set_alias(project, my_zone, THIS_VM, '') + + + def gcp_alias_status(alias): + my_alias = get_localhost_alias() + if alias == my_alias: +- logging.info('%s has the correct IP address attached' % THIS_VM) ++ logger.info('%s has the correct IP address attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,25 +276,24 @@ def validate(): + try: + CONN = googleapiclient.discovery.build('compute', 'v1') + except Exception as e: +- logging.error('Couldn\'t connect with google api: ' + str(e)) ++ logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logging.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') + if not ALIAS: +- logging.error('Missing alias_ip parameter') ++ logger.error('Missing alias_ip parameter') + sys.exit(OCF_ERR_CONFIGURED) + + + def configure_logs(): + # Prepare logging +- logging.basicConfig( +- format='gcp:alias - %(levelname)s - %(message)s', level=logging.INFO) ++ global logger + logging.getLogger('googleapiclient').setLevel(logging.WARN) + logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') + if logging_env: +@@ -307,10 +307,10 @@ def configure_logs(): + handler.setLevel(logging.INFO) + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) +- root_logger = logging.getLogger() +- root_logger.addHandler(handler) ++ log.addHandler(handler) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + except ImportError: +- logging.error('Couldn\'t import google.cloud.logging, ' ++ logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') + + +@@ -331,7 +331,7 @@ def main(): + elif 'status' in sys.argv[1] or 'monitor' in sys.argv[1]: + gcp_alias_status(ALIAS) + else: +- logging.error('no such function %s' % str(sys.argv[1])) ++ logger.error('no such function %s' % str(sys.argv[1])) + + + if __name__ == "__main__": + +From 9e9ea17c42df27d4c13fed9badba295df48437f2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 20 Jul 2018 13:27:42 +0200 +Subject: [PATCH 3/5] gcp-vpc-move-vip: moved alias-parameters to top of + metadata + +--- + heartbeat/gcp-vpc-move-vip.in | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index eb5bce6a8..ba61193b6 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -55,6 +55,16 @@ METADATA = \ + Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance + Floating IP Address on Google Cloud Platform + ++ ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ ++ ++ ++ Subnet name for the Alias IP ++ Subnet name for the Alias IP ++ ++ + + List of hosts in the cluster + Host list +@@ -65,16 +75,6 @@ METADATA = \ + Stackdriver-logging support + + +- +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- +- +- +- Subnet name for the Alias IP2 +- Subnet name for the Alias IP +- +- + + + + +From 716d69040dba7a769efb5a60eca934fdd65585f2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 23 Jul 2018 11:17:00 +0200 +Subject: [PATCH 4/5] gcp-vpc-move-route: use Python library + +--- + heartbeat/gcp-vpc-move-route.in | 58 ++++++++++++++++++++--------------------- + 1 file changed, 28 insertions(+), 30 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 566a70f86..125289d86 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -39,6 +39,11 @@ import os + import sys + import time + ++OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++from ocf import * ++ + try: + import googleapiclient.discovery + import pyroute2 +@@ -55,12 +60,6 @@ else: + import urllib2 as urlrequest + + +-OCF_SUCCESS = 0 +-OCF_ERR_GENERIC = 1 +-OCF_ERR_UNIMPLEMENTED = 3 +-OCF_ERR_PERM = 4 +-OCF_ERR_CONFIGURED = 6 +-OCF_NOT_RUNNING = 7 + GCP_API_URL_PREFIX = 'https://www.googleapis.com/compute/v1' + METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' + METADATA_HEADERS = {'Metadata-Flavor': 'Google'} +@@ -199,18 +198,18 @@ def get_metadata(metadata_key, params=None, timeout=None): + + def validate(ctx): + if os.geteuid() != 0: +- logging.error('You must run this agent as root') ++ logger.error('You must run this agent as root') + sys.exit(OCF_ERR_PERM) + + try: + ctx.conn = googleapiclient.discovery.build('compute', 'v1') + except Exception as e: +- logging.error('Couldn\'t connect with google api: ' + str(e)) ++ logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ctx.ip = os.environ.get('OCF_RESKEY_ip') + if not ctx.ip: +- logging.error('Missing ip parameter') ++ logger.error('Missing ip parameter') + sys.exit(OCF_ERR_CONFIGURED) + + try: +@@ -218,7 +217,7 @@ def validate(ctx): + ctx.zone = get_metadata('instance/zone').split('/')[-1] + ctx.project = get_metadata('project/project-id') + except Exception as e: +- logging.error( ++ logger.error( + 'Instance information not found. Is this a GCE instance ?: %s', str(e)) + sys.exit(OCF_ERR_CONFIGURED) + +@@ -234,7 +233,7 @@ def validate(ctx): + atexit.register(ctx.iproute.close) + idxs = ctx.iproute.link_lookup(ifname=ctx.interface) + if not idxs: +- logging.error('Network interface not found') ++ logger.error('Network interface not found') + sys.exit(OCF_ERR_CONFIGURED) + ctx.iface_idx = idxs[0] + +@@ -246,7 +245,7 @@ def check_conflicting_routes(ctx): + response = request.execute() + route_list = response.get('items', None) + if route_list: +- logging.error( ++ logger.error( + 'Conflicting unnmanaged routes for destination %s/32 in VPC %s found : %s', + ctx.ip, ctx.vpc_network, str(route_list)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -258,7 +257,7 @@ def route_release(ctx): + + + def ip_monitor(ctx): +- logging.info('IP monitor: checking local network configuration') ++ logger.info('IP monitor: checking local network configuration') + + def address_filter(addr): + for attr in addr['attrs']: +@@ -271,12 +270,12 @@ def ip_monitor(ctx): + route = ctx.iproute.get_addr( + index=ctx.iface_idx, match=address_filter) + if not route: +- logging.warn( ++ logger.warning( + 'The floating IP %s is not locally configured on this instance (%s)', + ctx.ip, ctx.instance) + return OCF_NOT_RUNNING + +- logging.debug( ++ logger.debug( + 'The floating IP %s is correctly configured on this instance (%s)', + ctx.ip, ctx.instance) + return OCF_SUCCESS +@@ -287,7 +286,7 @@ def ip_release(ctx): + + + def ip_and_route_start(ctx): +- logging.info('Bringing up the floating IP %s', ctx.ip) ++ logger.info('Bringing up the floating IP %s', ctx.ip) + + # Add a new entry in the routing table + # If the route entry exists and is pointing to another instance, take it over +@@ -322,7 +321,7 @@ def ip_and_route_start(ctx): + request.execute() + except googleapiclient.errors.HttpError as e: + if e.resp.status == 404: +- logging.error('VPC network not found') ++ logger.error('VPC network not found') + sys.exit(OCF_ERR_CONFIGURED) + else: + raise +@@ -336,11 +335,11 @@ def ip_and_route_start(ctx): + + ctx.iproute.addr('add', index=ctx.iface_idx, address=ctx.ip, mask=32) + ctx.iproute.link('set', index=ctx.iface_idx, state='up') +- logging.info('Successfully brought up the floating IP %s', ctx.ip) ++ logger.info('Successfully brought up the floating IP %s', ctx.ip) + + + def route_monitor(ctx): +- logging.info('GCP route monitor: checking route table') ++ logger.info('GCP route monitor: checking route table') + + # Ensure that there is no route that we are not aware of that is also handling our IP + check_conflicting_routes +@@ -360,39 +359,38 @@ def route_monitor(ctx): + instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( + GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) + if routed_to_instance != instance_url: +- logging.warn( ++ logger.warning( + 'The floating IP %s is not routed to this instance (%s) but to instance %s', + ctx.ip, ctx.instance, routed_to_instance.split('/')[-1]) + return OCF_NOT_RUNNING + +- logging.debug( ++ logger.debug( + 'The floating IP %s is correctly routed to this instance (%s)', + ctx.ip, ctx.instance) + return OCF_SUCCESS + + + def ip_and_route_stop(ctx): +- logging.info('Bringing down the floating IP %s', ctx.ip) ++ logger.info('Bringing down the floating IP %s', ctx.ip) + + # Delete the route entry + # If the route entry exists and is pointing to another instance, don't touch it + if route_monitor(ctx) == OCF_NOT_RUNNING: +- logging.info( ++ logger.info( + 'The floating IP %s is already not routed to this instance (%s)', + ctx.ip, ctx.instance) + else: + route_release(ctx) + + if ip_monitor(ctx) == OCF_NOT_RUNNING: +- logging.info('The floating IP %s is already down', ctx.ip) ++ logger.info('The floating IP %s is already down', ctx.ip) + else: + ip_release(ctx) + + + def configure_logs(ctx): + # Prepare logging +- logging.basicConfig( +- format='gcp:route - %(levelname)s - %(message)s', level=logging.INFO) ++ global logger + logging.getLogger('googleapiclient').setLevel(logging.WARN) + logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') + if logging_env: +@@ -406,10 +404,10 @@ def configure_logs(ctx): + handler.setLevel(logging.INFO) + formatter = logging.Formatter('gcp:route "%(message)s"') + handler.setFormatter(formatter) +- root_logger = logging.getLogger() +- root_logger.addHandler(handler) ++ log.addHandler(handler) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) + except ImportError: +- logging.error('Couldn\'t import google.cloud.logging, ' ++ logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') + + +@@ -434,7 +432,7 @@ def main(): + else: + usage = 'usage: %s {start|stop|monitor|status|meta-data|validate-all}' % \ + os.path.basename(sys.argv[0]) +- logging.error(usage) ++ logger.error(usage) + sys.exit(OCF_ERR_UNIMPLEMENTED) + + + +From 6ec7e87693a51cbb16a1822e6d15f1dbfc11f8e6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 23 Jul 2018 15:55:48 +0200 +Subject: [PATCH 5/5] Python: add logging.basicConfig() to support background + logging + +--- + heartbeat/ocf.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py +index 12be7a2a4..36e7ccccd 100644 +--- a/heartbeat/ocf.py ++++ b/heartbeat/ocf.py +@@ -94,6 +94,7 @@ def emit(self, record): + HA_LOGFILE = env.get("HA_LOGFILE") + HA_DEBUGLOG = env.get("HA_DEBUGLOG") + ++logging.basicConfig() + log = logging.getLogger(os.path.basename(argv[0])) + log.setLevel(logging.DEBUG) + diff --git a/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch new file mode 100644 index 0000000..69ac757 --- /dev/null +++ b/SOURCES/6-gcp-move-vip-filter-aggregatedlist.patch @@ -0,0 +1,25 @@ +From 6bd66e337922403cb2dd3a8715ae401def8c0437 Mon Sep 17 00:00:00 2001 +From: Helen Koike +Date: Thu, 19 Jul 2018 13:00:58 -0300 +Subject: [PATCH] gcp-vpc-move-vip.in: filter call to aggregatedList + +Don't list all the instances in the project, filter only the one we are +interested in. +--- + heartbeat/gcp-vpc-move-vip.in | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 9fc87242f..af2080502 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -169,7 +169,8 @@ def get_localhost_alias(): + + + def get_zone(project, instance): +- request = CONN.instances().aggregatedList(project=project) ++ fl = 'name="%s"' % instance ++ request = CONN.instances().aggregatedList(project=project, filter=fl) + while request is not None: + response = request.execute() + zones = response.get('items', {}) diff --git a/SOURCES/7-gcp-bundled.patch b/SOURCES/7-gcp-bundled.patch new file mode 100644 index 0000000..b1b8a50 --- /dev/null +++ b/SOURCES/7-gcp-bundled.patch @@ -0,0 +1,35 @@ +diff -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in +--- a/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:20:26.164739897 +0200 ++++ b/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:21:01.331139742 +0200 +@@ -36,7 +36,7 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults +-OCF_RESKEY_gcloud_default="/usr/bin/gcloud" ++OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra" + OCF_RESKEY_configuration_default="default" + OCF_RESKEY_vpc_network_default="default" + OCF_RESKEY_interface_default="eth0" +diff -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2020-06-25 13:21:42.090334894 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-06-25 13:14:16.668092817 +0200 +@@ -28,6 +28,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + except ImportError: + pass +diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2020-06-25 13:22:03.216301380 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2020-06-25 13:13:19.864183380 +0200 +@@ -45,6 +45,8 @@ + from ocf import * + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp') ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + import pyroute2 + except ImportError: diff --git a/SOURCES/7-gcp-stackdriver-logging-note.patch b/SOURCES/7-gcp-stackdriver-logging-note.patch new file mode 100644 index 0000000..b714513 --- /dev/null +++ b/SOURCES/7-gcp-stackdriver-logging-note.patch @@ -0,0 +1,28 @@ +diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2018-07-30 16:56:23.486368292 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2018-07-30 17:11:54.189715666 +0200 +@@ -125,8 +125,8 @@ + + + +-If enabled (set to true), IP failover logs will be posted to stackdriver logging +-Stackdriver-logging support ++If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). ++Stackdriver-logging support. Requires additional libraries (google-cloud-logging). + + + +diff -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2018-07-30 16:56:23.486368292 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2018-07-30 17:06:17.260686483 +0200 +@@ -72,8 +72,8 @@ + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging +- Stackdriver-logging support ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). ++ Stackdriver-logging support. Requires additional libraries (google-cloud-logging). + + + diff --git a/SOURCES/8-google-cloud-sdk-fixes.patch b/SOURCES/8-google-cloud-sdk-fixes.patch new file mode 100644 index 0000000..d734d82 --- /dev/null +++ b/SOURCES/8-google-cloud-sdk-fixes.patch @@ -0,0 +1,12 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud +--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 2018-06-18 14:30:10.000000000 +0200 ++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2018-06-25 13:12:56.057000620 +0200 +@@ -64,6 +64,8 @@ + } + CLOUDSDK_ROOT_DIR=$(_cloudsdk_root_dir "$0") + ++CLOUDSDK_PYTHON_SITEPACKAGES=1 ++ + # if CLOUDSDK_PYTHON is empty + if [ -z "$CLOUDSDK_PYTHON" ]; then + # if python2 exists then plain python may point to a version != 2 diff --git a/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch new file mode 100644 index 0000000..de378c4 --- /dev/null +++ b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch @@ -0,0 +1,129 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py +--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100 ++++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200 +@@ -19,8 +19,14 @@ + certificates. + """ + ++from pyasn1.codec.der import decoder + from pyasn1_modules import pem +-import rsa ++from pyasn1_modules.rfc2459 import Certificate ++from pyasn1_modules.rfc5208 import PrivateKeyInfo ++from cryptography.hazmat.primitives import serialization, hashes ++from cryptography.hazmat.primitives.asymmetric import padding ++from cryptography import x509 ++from cryptography.hazmat.backends import default_backend + import six + + from oauth2client import _helpers +@@ -40,7 +46,7 @@ + '-----END RSA PRIVATE KEY-----') + _PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----', + '-----END PRIVATE KEY-----') +-_PKCS8_SPEC = None ++_PKCS8_SPEC = PrivateKeyInfo() + + + def _bit_list_to_bytes(bit_list): +@@ -67,7 +73,8 @@ + """ + + def __init__(self, pubkey): +- self._pubkey = pubkey ++ self._pubkey = serialization.load_pem_public_key(pubkey, ++ backend=default_backend()) + + def verify(self, message, signature): + """Verifies a message against a signature. +@@ -84,8 +91,9 @@ + """ + message = _helpers._to_bytes(message, encoding='utf-8') + try: +- return rsa.pkcs1.verify(message, signature, self._pubkey) +- except (ValueError, rsa.pkcs1.VerificationError): ++ return self._pubkey.verify(signature, message, padding.PKCS1v15(), ++ hashes.SHA256()) ++ except (ValueError, TypeError, InvalidSignature): + return False + + @classmethod +@@ -109,19 +117,18 @@ + """ + key_pem = _helpers._to_bytes(key_pem) + if is_x509_cert: +- from pyasn1.codec.der import decoder +- from pyasn1_modules import rfc2459 +- +- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE') +- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate()) ++ der = x509.load_pem_x509_certificate(pem_data, default_backend()) ++ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate()) + if remaining != b'': + raise ValueError('Unused bytes', remaining) + + cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo'] + key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey']) +- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER') ++ pubkey = serialization.load_der_public_key(decoded_key, ++ backend=default_backend()) + else: +- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM') ++ pubkey = serialization.load_pem_public_key(decoded_key, ++ backend=default_backend()) + return cls(pubkey) + + +@@ -134,6 +141,8 @@ + + def __init__(self, pkey): + self._key = pkey ++ self._pubkey = serialization.load_pem_private_key(pkey, ++ backend=default_backend()) + + def sign(self, message): + """Signs a message. +@@ -145,7 +154,7 @@ + string, The signature of the message for the given key. + """ + message = _helpers._to_bytes(message, encoding='utf-8') +- return rsa.pkcs1.sign(message, self._key, 'SHA-256') ++ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256()) + + @classmethod + def from_string(cls, key, password='notasecret'): +@@ -163,27 +172,24 @@ + ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in + PEM format. + """ +- global _PKCS8_SPEC + key = _helpers._from_bytes(key) # pem expects str in Py3 + marker_id, key_bytes = pem.readPemBlocksFromFile( + six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) + + if marker_id == 0: +- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, +- format='DER') +- elif marker_id == 1: +- from pyasn1.codec.der import decoder +- from pyasn1_modules import rfc5208 ++ pkey = serialization.load_der_private_key( ++ key_bytes, password=None, ++ backend=default_backend()) + +- if _PKCS8_SPEC is None: +- _PKCS8_SPEC = rfc5208.PrivateKeyInfo() ++ elif marker_id == 1: + key_info, remaining = decoder.decode( + key_bytes, asn1Spec=_PKCS8_SPEC) + if remaining != b'': + raise ValueError('Unused bytes', remaining) + pkey_info = key_info.getComponentByName('privateKey') +- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), +- format='DER') ++ pkey = serialization.load_der_private_key( ++ pkey_info.asOctets(), password=None, ++ backend=default_backend()) + else: + raise ValueError('No key could be detected.') + diff --git a/SOURCES/IPaddr2-monitor_retries.patch b/SOURCES/IPaddr2-monitor_retries.patch new file mode 100644 index 0000000..6f2629a --- /dev/null +++ b/SOURCES/IPaddr2-monitor_retries.patch @@ -0,0 +1,66 @@ +diff -uNr a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +--- a/heartbeat/IPaddr2 2018-06-27 10:29:08.000000000 +0200 ++++ b/heartbeat/IPaddr2 2018-06-29 16:01:50.538797379 +0200 +@@ -80,6 +80,7 @@ + OCF_RESKEY_arp_bg_default=true + OCF_RESKEY_run_arping_default=false + OCF_RESKEY_preferred_lft_default="forever" ++OCF_RESKEY_monitor_retries="1" + + : ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}} + : ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}} +@@ -92,6 +93,7 @@ + : ${OCF_RESKEY_arp_bg=${OCF_RESKEY_arp_bg_default}} + : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}} + : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}} ++: ${OCF_RESKEY_monitor_retries=${OCF_RESKEY_monitor_retries_default}} + ####################################################################### + + SENDARP=$HA_BIN/send_arp +@@ -368,6 +370,18 @@ + + + ++ ++ ++Set number of retries to find interface in monitor-action. ++ ++ONLY INCREASE IF THE AGENT HAS ISSUES FINDING YOUR NIC DURING THE ++MONITOR-ACTION. A HIGHER SETTING MAY LEAD TO DELAYS IN DETECTING ++A FAILURE. ++ ++Number of retries to find interface in monitor-action ++ ++ ++ + + + +@@ -536,15 +550,26 @@ + find_interface() { + local ipaddr="$1" + local netmask="$2" ++ local iface="" + + # + # List interfaces but exclude FreeS/WAN ipsecN virtual interfaces + # +- local iface="`$IP2UTIL -o -f $FAMILY addr show \ ++ for i in $(seq 1 $OCF_RESKEY_monitor_retries); do ++ iface="`$IP2UTIL -o -f $FAMILY addr show \ + | grep "\ $ipaddr/$netmask" \ + | cut -d ' ' -f2 \ + | grep -v '^ipsec[0-9][0-9]*$'`" + ++ if [ -n "$iface" ]; then ++ break ++ fi ++ ++ if [ $OCF_RESKEY_monitor_retries -gt 1 ]; then ++ sleep 1 ++ fi ++ done ++ + echo "$iface" + return 0 + } diff --git a/SOURCES/LVM-activate-1-warn-vg_access_mode.patch b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch new file mode 100644 index 0000000..3471524 --- /dev/null +++ b/SOURCES/LVM-activate-1-warn-vg_access_mode.patch @@ -0,0 +1,42 @@ +From 12ef5a343158bbfaa5233468a0506074fceaac81 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 21 Aug 2018 12:14:49 +0200 +Subject: [PATCH] LVM-activate: return OCF_ERR_CONFIGURED for incorrect + vg_access_mode + +--- + heartbeat/LVM-activate | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index fbd058288..55e36a2d2 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -448,7 +448,7 @@ lvm_validate() { + ;; + *) + ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode" +- exit $OCF_ERR_ARGS ++ exit $OCF_ERR_CONFIGURED + ;; + esac + +@@ -771,7 +771,6 @@ lvm_stop() { + return $OCF_SUCCESS + fi + +- lvm_validate + ocf_log info "Deactivating ${vol}" + + case ${VG_access_mode} in +@@ -788,8 +787,8 @@ lvm_stop() { + tagging_deactivate + ;; + *) +- ocf_exit_reason "VG [${VG}] is not properly configured in cluster. It's unsafe!" +- exit $OCF_ERR_CONFIGURED ++ ocf_log err "VG [${VG}] is not properly configured in cluster. It's unsafe!" ++ exit $OCF_SUCCESS + ;; + esac + diff --git a/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch new file mode 100644 index 0000000..ae1fe65 --- /dev/null +++ b/SOURCES/LVM-activate-2-parameters-access-mode-fixes.patch @@ -0,0 +1,137 @@ +From 792077bf2994e2e582ccfb0768f3186517de9025 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 12:00:07 +0200 +Subject: [PATCH] LVM-activate: fixes + +- read parameters for start/stop/monitor-actions +- fail during monitor-action when run with incorrect access_mode +--- + heartbeat/LVM-activate | 44 ++++++++++++++++++++++++++---------------- + 1 file changed, 27 insertions(+), 17 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 55e36a2d2..f46932c1c 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -56,6 +56,7 @@ LV=${OCF_RESKEY_lvname} + # 3: vg has system_id (new) + # 4: vg has tagging (old) + VG_access_mode=${OCF_RESKEY_vg_access_mode} ++VG_access_mode_num=0 + + # Activate LV(s) with "shared" lock for cluster fs + # or "exclusive" lock for local fs +@@ -176,7 +177,9 @@ END + # 2: vg is clustered - clvmd (old) + # 3: vg has system_id (new) + # 4: vg has tagging (old) +-get_VG_access_mode() { ++ ++get_VG_access_mode_num() ++{ + local access_mode + local vg_locktype + local vg_clustered +@@ -415,11 +418,8 @@ tagging_check() + return $OCF_SUCCESS + } + +-lvm_validate() { +- local lv_count +- local mode +- +- # Parameters checking ++read_parameters() ++{ + if [ -z "$VG" ] + then + ocf_exit_reason "You must identify the volume group name!" +@@ -435,22 +435,30 @@ lvm_validate() { + # Convert VG_access_mode from string to index + case ${VG_access_mode} in + lvmlockd) +- VG_access_mode=1 ++ VG_access_mode_num=1 + ;; + clvmd) +- VG_access_mode=2 ++ VG_access_mode_num=2 + ;; + system_id) +- VG_access_mode=3 ++ VG_access_mode_num=3 + ;; + tagging) +- VG_access_mode=4 ++ VG_access_mode_num=4 + ;; + *) ++ # dont exit with error-code here or nodes will get fenced on ++ # e.g. "pcs resource create" + ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode" +- exit $OCF_ERR_CONFIGURED + ;; + esac ++} ++ ++lvm_validate() { ++ local lv_count ++ local mode ++ ++ read_parameters + + check_binary pgrep + # Every LVM command is just symlink to lvm binary +@@ -471,9 +479,9 @@ lvm_validate() { + # Get the access mode from VG metadata and check if it matches the input + # value. Skip to check "tagging" mode because there's no reliable way to + # automatically check if "tagging" mode is being used. +- get_VG_access_mode ++ get_VG_access_mode_num + mode=$? +- if [ $VG_access_mode -ne 4 ] && [ $mode -ne $VG_access_mode ]; then ++ if [ $VG_access_mode_num -ne 4 ] && [ $mode -ne $VG_access_mode_num ]; then + ocf_exit_reason "The specified vg_access_mode doesn't match the lock_type on VG metadata!" + exit $OCF_ERR_ARGS + fi +@@ -495,8 +503,8 @@ lvm_validate() { + fi + fi + +- # VG_access_mode specific checking goes here +- case ${VG_access_mode} in ++ # VG_access_mode_num specific checking goes here ++ case ${VG_access_mode_num} in + 1) + lvmlockd_check + ;; +@@ -731,7 +739,7 @@ lvm_start() { + [ -z ${LV} ] && vol=${VG} || vol=${VG}/${LV} + ocf_log info "Activating ${vol}" + +- case ${VG_access_mode} in ++ case ${VG_access_mode_num} in + 1) + lvmlockd_activate + ;; +@@ -773,7 +781,7 @@ lvm_stop() { + + ocf_log info "Deactivating ${vol}" + +- case ${VG_access_mode} in ++ case ${VG_access_mode_num} in + 1) + lvmlockd_deactivate + ;; +@@ -811,9 +819,11 @@ start) + lvm_start + ;; + stop) ++ read_parameters + lvm_stop + ;; + monitor) ++ lvm_validate + lvm_status + ;; + validate-all) diff --git a/SOURCES/LVM-activate-fix-issue-with-dashes.patch b/SOURCES/LVM-activate-fix-issue-with-dashes.patch new file mode 100644 index 0000000..7f3b996 --- /dev/null +++ b/SOURCES/LVM-activate-fix-issue-with-dashes.patch @@ -0,0 +1,54 @@ +From 2c219dd000d7f5edd3765a1c6bc5f3e6efb7208b Mon Sep 17 00:00:00 2001 +From: Paul Mezzanini +Date: Fri, 1 Jun 2018 11:58:06 -0400 +Subject: [PATCH] Volume groups and logical volumes "-" in their name get + mangled with double dashes in dmsetup. Switching to wc and just counting + lines while depending on the vgname + lvname filter in the dmsetup call gets + around the issue with dmsetup outputting correctly but grep failing due to + the name mangle. + +Logic for both test cases and dmsetup calls changed so they match too. No reason +to not have matching tests. + +This is AllBad but there isn't a better way that I'm aware of yet. +--- + heartbeat/LVM-activate | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 60e656178..fbd058288 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -692,20 +692,27 @@ tagging_deactivate() { + # lvs/vgs when the metadata is somehow inconsistent. + # + # So, we have to make compromise that the VG is assumably active if any LV of the VG is active. ++# ++# Paul: ++# VGS + LVS with "-" in their name get mangled with double dashes in dmsetup. ++# Switching to wc and just counting lines while depending on the vgname + lvname filter ++# in dmsetup gets around the issue with dmsetup reporting correctly but grep failing. ++# ++# Logic for both test cases and dmsetup calls changed so they match too. ++# ++# This is AllBad but there isn't a better way that I'm aware of yet. + lvm_status() { + local dm_count + + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" \ +- | grep -Eq "${VG}-+${LV}" ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | wc -l ) + else +- dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-") +- test $dm_count -gt 0 ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l ) + fi + +- if [ $? -ne 0 ]; then ++ if [ $dm_count -eq 0 ]; then + return $OCF_NOT_RUNNING + fi + diff --git a/SOURCES/LVM-fix-missing-dash.patch b/SOURCES/LVM-fix-missing-dash.patch new file mode 100644 index 0000000..0e24f5f --- /dev/null +++ b/SOURCES/LVM-fix-missing-dash.patch @@ -0,0 +1,22 @@ +From 5a664525a20d3d5094912322be4faac668e4920e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 13 Aug 2018 14:30:50 +0200 +Subject: [PATCH] LVM: fix missing dash + +--- + heartbeat/lvm-tag.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh +index 71f53b20b..fe17e0f21 100644 +--- a/heartbeat/lvm-tag.sh ++++ b/heartbeat/lvm-tag.sh +@@ -147,7 +147,7 @@ lvm_init() { + if [ -n "$OCF_RESKEY_tag" ]; then + OUR_TAG=$OCF_RESKEY_tag + fi +- vgchange_activate_options="aly --config activation{volume_list=[\"@${OUR_TAG}\"]}" ++ vgchange_activate_options="-aly --config activation{volume_list=[\"@${OUR_TAG}\"]}" + vgchange_deactivate_options="-aln" + } + diff --git a/SOURCES/LVM-volume_group_check_only.patch b/SOURCES/LVM-volume_group_check_only.patch new file mode 100644 index 0000000..505c66a --- /dev/null +++ b/SOURCES/LVM-volume_group_check_only.patch @@ -0,0 +1,72 @@ +From c414259728610f95243d9e34289fefd596b0ac8b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 22 Jun 2018 15:37:36 +0200 +Subject: [PATCH] LVM: add "volume_group_check_only" parameter to avoid + timeouts in some cases + +--- + heartbeat/LVM | 10 ++++++++++ + heartbeat/lvm-tag.sh | 24 +++++++++++++----------- + 2 files changed, 23 insertions(+), 11 deletions(-) + +diff --git a/heartbeat/LVM b/heartbeat/LVM +index d3cd1a14..10f7186d 100755 +--- a/heartbeat/LVM ++++ b/heartbeat/LVM +@@ -103,6 +103,16 @@ logical volumes. + + + ++ ++ ++If set, only the volume group will be checked during monitoring. ++ ++WARNING: ONLY USE IF YOU HAVE ISSUES WITH TIMEOUTS! ++ ++Only check volume group during monitoring ++ ++ ++ + + + +diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh +index 71f53b20..170426e8 100644 +--- a/heartbeat/lvm-tag.sh ++++ b/heartbeat/lvm-tag.sh +@@ -160,19 +160,21 @@ lvm_validate_all() { + lvm_status() { + local rc=0 + +- # If vg is running, make sure the correct tag is present. Otherwise we +- # can not guarantee exclusive activation. +- if ! check_tags; then +- ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\"" +- rc=$OCF_ERR_GENERIC +- fi ++ if ! ocf_is_true "$OCF_RESKEY_volume_group_check_only"; then ++ # If vg is running, make sure the correct tag is present. Otherwise we ++ # can not guarantee exclusive activation. ++ if ! check_tags; then ++ ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\"" ++ rc=$OCF_ERR_GENERIC ++ fi + +- # make sure the environment for tags activation is still valid +- if ! verify_tags_environment; then +- rc=$OCF_ERR_GENERIC ++ # make sure the environment for tags activation is still valid ++ if ! verify_tags_environment; then ++ rc=$OCF_ERR_GENERIC ++ fi ++ # let the user know if their initrd is older than lvm.conf. ++ check_initrd_warning + fi +- # let the user know if their initrd is older than lvm.conf. +- check_initrd_warning + + return $rc + } +-- +2.17.1 + diff --git a/SOURCES/VirtualDomain-stateless-support.patch b/SOURCES/VirtualDomain-stateless-support.patch new file mode 100644 index 0000000..9d79622 --- /dev/null +++ b/SOURCES/VirtualDomain-stateless-support.patch @@ -0,0 +1,126 @@ +diff -uNr a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain +--- a/heartbeat/VirtualDomain 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/VirtualDomain 2018-07-03 14:01:25.892705351 +0200 +@@ -26,6 +26,9 @@ + OCF_RESKEY_CRM_meta_timeout_default=90000 + OCF_RESKEY_save_config_on_stop_default=false + OCF_RESKEY_sync_config_on_stop_default=false ++OCF_RESKEY_backingfile_default="" ++OCF_RESKEY_stateless_default="false" ++OCF_RESKEY_copyindirs_default="" + + : ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}} + : ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}} +@@ -36,6 +39,9 @@ + : ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}} + : ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}} + : ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}} ++: ${OCF_RESKEY_backingfile=${OCF_RESKEY_backingfile_default}} ++: ${OCF_RESKEY_stateless=${OCF_RESKEY_stateless_default}} ++: ${OCF_RESKEY_copyindirs=${OCF_RESKEY_copyindirs_default}} + + if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then + OCF_RESKEY_save_config_on_stop="true" +@@ -271,6 +277,35 @@ + + + ++ ++ ++When the VM is used in Copy-On-Write mode, this is the backing file to use (with its full path). ++The VMs image will be created based on this backing file. ++This backing file will never be changed during the life of the VM. ++ ++If the VM is wanted to work with Copy-On-Write mode, this is the backing file to use (with its full path) ++ ++ ++ ++ ++ ++If set to true and backingfile is defined, the start of the VM will systematically create a new qcow2 based on ++the backing file, therefore the VM will always be stateless. If set to false, the start of the VM will use the ++COW (<vmname>.qcow2) file if it exists, otherwise the first start will create a new qcow2 based on the backing ++file given as backingfile. ++ ++If set to true, the (<vmname>.qcow2) file will be re-created at each start, based on the backing file (if defined) ++ ++ ++ ++ ++ ++List of directories for the virt-copy-in before booting the VM. Used only in stateless mode. ++ ++List of directories for the virt-copy-in before booting the VM stateless mode. ++ ++ ++ + + + virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown' +@@ -545,11 +580,49 @@ + # is restored to an 'undefined' state before creating. + verify_undefined + +- virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." +- return $OCF_ERR_GENERIC ++ if [ -z "${OCF_RESKEY_backingfile}" ]; then ++ virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ else ++ if ocf_is_true "${OCF_RESKEY_stateless}" || [ ! -s "${OCF_RESKEY_config%%.*}.qcow2" ]; then ++ # Create the Stateless image ++ dirconfig=`dirname ${OCF_RESKEY_config}` ++ qemu-img create -f qcow2 -b ${OCF_RESKEY_backingfile} ${OCF_RESKEY_config%%.*}.qcow2 ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed qemu-img create ${DOMAIN_NAME} with backing file ${OCF_RESKEY_backingfile}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ virsh define ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ if [ -n "${OCF_RESKEY_copyindirs}" ]; then ++ # Inject copyindirs directories and files ++ virt-copy-in -d ${DOMAIN_NAME} ${OCF_RESKEY_copyindirs} / ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed on virt-copy-in command ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ else ++ virsh define ${OCF_RESKEY_config} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ virsh $VIRSH_OPTIONS start ${DOMAIN_NAME} ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." ++ return $OCF_ERR_GENERIC ++ fi + fi + + while ! VirtualDomain_monitor; do +@@ -926,6 +999,11 @@ + ocf_exit_reason "migration_downtime has to be a decimal value" + return $OCF_ERR_CONFIGURED + fi ++ ++ if ocf_is_true "${OCF_RESKEY_stateless}" && [ -z "${OCF_RESKEY_backingfile}" ]; then ++ ocf_exit_reason "Stateless functionality can't be achieved without a backing file." ++ return $OCF_ERR_CONFIGURED ++ fi + } + + VirtualDomain_getconfig() { diff --git a/SOURCES/aliyun-vpc-move-ip-1.patch b/SOURCES/aliyun-vpc-move-ip-1.patch new file mode 100644 index 0000000..ab948dc --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-1.patch @@ -0,0 +1,275 @@ +From e45d0ca9ccc3d5fbe94372f40bedb7559dc9530a Mon Sep 17 00:00:00 2001 +From: "feng.changf1" +Date: Tue, 24 Jul 2018 15:08:45 +0800 +Subject: [PATCH] Add Aliyun vpc-move-ip agent. + +--- + heartbeat/aliyun-vpc-move-ip | 258 +++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 258 insertions(+) + create mode 100644 heartbeat/aliyun-vpc-move-ip + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +new file mode 100644 +index 000000000..bc97822a8 +--- /dev/null ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -0,0 +1,258 @@ ++#!/bin/bash ++# ++# OCF resource agent to move an IP address within a VPC in the Aliyun ++# Based on code of Markus Guertler (GitHub AWS-VPC-move-IP) ++# Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip) ++# ++ ++############################################################################### ++# For testing purposes delete OCF_ROOT after testing ++OCF_ROOT=/usr/lib/ocf/ ++# ++# INIT ++#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} ++#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then ++# . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ++#fi ++ ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} ++. ${OCF_FUNCTIONS} ++: ${__OCF_ACTION=$1} ++export HOME=/root ++####################################################################### ++ ++ ++USAGE="usage: $0 {start|stop|status|meta-data}"; ++############################################################################### ++ ++ ++############################################################################### ++# ++# Functions ++# ++############################################################################### ++ ++ ++metadata() { ++cat < ++ ++ ++2.0 ++ ++Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS ++by changing an entry in an specific routing table ++ ++Move IP within a APC of the Aliyun ECS ++ ++ ++ ++VPC private IP address ++ ++vpc ip ++ ++ ++ ++ ++Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++ ++routing table name ++ ++ ++ ++ ++Name of the network interfacen, i.e. eth0 ++ ++network interface name ++ ++ ++ ++ ++Valid Aliyun CLI profile name ++ ++profile name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++debugger() { ++ ocf_log info "DEBUG: $1" ++} ++ ++ecs_ip_validate() { ++ debugger "function: validate" ++ ++ # IP address ++ [[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED ++ ++ # Network Interface ++ [[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED ++ ++ # Routing Table ++ [[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED ++ ++ ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ ++ if [ -z "${ECS_INSTANCE_ID}" ]; then ++ ocf_exit_reason "Instance ID not found. Is this a ECS instance?" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++ecs_ip_monitor() { ++ ecs_ip_validate ++ debugger "function: ecsip_monitor: check routing table" ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ if [ -z "$ROUTE_TO_INSTANCE" ]; then ++ ROUTE_TO_INSTANCE="" ++ fi ++ ++ [[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING ++ cmd="ping -W 1 -c 1 $OCF_RESKEY_address" ++ debugger "executing command: $cmd" ++ $cmd > /dev/null ++ [[ $? -gt 0 ]] && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING ++ debugger "routed in VPC and locally reachable" ++ return $OCF_SUCCESS ++} ++ ++ ++ecs_ip_drop() { ++ debugger "function: ecsip_drop" ++ cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ [[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC ++ debugger "command succeeded" ++ return $OCF_SUCCESS ++} ++ ++wait_for_deleted() { ++ while [ ! -z "$ROUTE_TO_INSTANCE" ]; do ++ sleep 1 ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ done ++ sleep 5 ++} ++ ++wait_for_started() { ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ ++ while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do ++ sleep 1 ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ debugger "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ done ++ sleep 5 ++} ++ ++ecs_ip_get_and_configure() { ++ debugger "function: ecsip_get_and_configure" ++ ++ if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ++ if [ $ROUTE_TO_INSTANCE != "" ]; then ++ # Adjusting the routing table ++ cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ #wait_for_deleted ++ sleep 3 ++ fi ++ ++ cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ while [ $rc != 0 ]; do ++ sleep 2 ++ cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" ++ debugger "executing command: $cmd" ++ $cmd ++ rc=$? ++ done ++ wait_for_started ++ fi ++ ++ ++ # Reconfigure the local ip address ++ ecs_ip_drop ++ ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface ++ rc=$? ++ [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC ++ debugger "-success" ++ return $OCF_SUCCESS ++} ++ ++ecs_ip_stop() { ++ ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address" ++ ecs_ip_validate ++ ecs_ip_monitor ++ [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS ++ ecs_ip_drop ++ [[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC ++ ecs_ip_monitor ++ [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS ++ ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ return $OCF_ERR_GENERIC ++} ++ ++ecs_ip_start() { ++ ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" ++ ecs_ip_validate ++ ecs_ip_monitor ++ [[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS ++ ocf_log info "ECS: Adjusting routing table and locally configuring IP address" ++ ecs_ip_get_and_configure ++ [[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC ++ return $OCF_SUCCESS ++ ecs_ip_monitor ++ [[ $? == $OCF_SUCCESS ]] && return $? ++ ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" ++ return $OCF_ERR_GENERIC ++} ++ ++############################################################################### ++# ++# MAIN ++# ++############################################################################### ++ ++case $__OCF_ACTION in ++ meta-data) metadata ++ exit $OCF_SUCCESS;; ++ monitor) ++ ecs_ip_monitor;; ++ stop) ++ ecs_ip_stop;; ++ validate-all) ecs_ip_validate;; ++ start) ++ ecs_ip_start;; ++ *) exit $OCF_ERR_UNIMPLEMENTED;; ++esac +\ No newline at end of file diff --git a/SOURCES/aliyun-vpc-move-ip-2-fixes.patch b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch new file mode 100644 index 0000000..7c5db4c --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-2-fixes.patch @@ -0,0 +1,451 @@ +From db3df55a6f7097e1da7d77eb361e9e7560f13353 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 24 Jul 2018 13:57:08 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: fixes + +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/aliyun-vpc-move-ip | 336 ++++++++++++++++++++++++------------------- + 3 files changed, 189 insertions(+), 149 deletions(-) + mode change 100644 => 100755 heartbeat/aliyun-vpc-move-ip + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 3ac0569de..fc9a67161 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -93,6 +93,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_WinPopup.7 \ + ocf_heartbeat_Xen.7 \ + ocf_heartbeat_Xinetd.7 \ ++ ocf_heartbeat_aliyun-vpc-move-ip.7 \ + ocf_heartbeat_anything.7 \ + ocf_heartbeat_apache.7 \ + ocf_heartbeat_asterisk.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index d4750bf09..6adc6bc3c 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -90,6 +90,7 @@ ocf_SCRIPTS = AoEtarget \ + Xen \ + Xinetd \ + ZFS \ ++ aliyun-vpc-move-ip \ + anything \ + apache \ + asterisk \ +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +old mode 100644 +new mode 100755 +index bc97822a8..108feb247 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -1,30 +1,19 @@ +-#!/bin/bash ++#!/bin/sh + # + # OCF resource agent to move an IP address within a VPC in the Aliyun + # Based on code of Markus Guertler (GitHub AWS-VPC-move-IP) + # Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip) + # + +-############################################################################### +-# For testing purposes delete OCF_ROOT after testing +-OCF_ROOT=/usr/lib/ocf/ +-# +-# INIT +-#: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} +-#if [ -f ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs ]; then +-# . ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs +-#fi +- + ####################################################################### + # Initialization: +- +-: ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} +-. ${OCF_FUNCTIONS} +-: ${__OCF_ACTION=$1} +-export HOME=/root ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ####################################################################### + +- ++# aliyuncli doesnt work without HOME parameter ++export HOME="/root" ++ + USAGE="usage: $0 {start|stop|status|meta-data}"; + ############################################################################### + +@@ -36,8 +25,96 @@ USAGE="usage: $0 {start|stop|status|meta-data}"; + ############################################################################### + + +-metadata() { +-cat < + + +@@ -74,8 +151,8 @@ Name of the network interfacen, i.e. eth0 + Valid Aliyun CLI profile name + + profile name +- +- ++ ++ + + + +@@ -88,171 +165,132 @@ Valid Aliyun CLI profile name + END + } + +-debugger() { +- ocf_log info "DEBUG: $1" +-} +- + ecs_ip_validate() { +- debugger "function: validate" +- ++ ocf_log debug "function: validate" ++ + # IP address +- [[ -z "$OCF_RESKEY_address" ]] && ocf_log error "IP address parameter not set $OCF_RESKEY_ADDRESS!" && exit $OCF_ERR_CONFIGURED +- ++ if [ -z "$OCF_RESKEY_address" ]; then ++ ocf_log err "IP address parameter not set $OCF_RESKEY_ADDRESS!" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ + # Network Interface +- [[ -z "$OCF_RESKEY_interface" ]] && ocf_log error "Network interface parameter not set $OCF_RESKEY_INTERFACE!" && exit $OCF_ERR_CONFIGURED +- ++ if [ -z "$OCF_RESKEY_interface" ]; then ++ ocf_log err "Network interface parameter not set $OCF_RESKEY_INTERFACE!" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ + # Routing Table +- [[ -z "$OCF_RESKEY_routing_table" ]] && ocf_log error "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" && exit $OCF_ERR_CONFIGURED +- +- ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ if [ -z "$OCF_RESKEY_routing_table" ]; then ++ ocf_log err "Routing table parameter not set $OCF_RESKEY_ROUTING_TABLE!" ++ exit $OCF_ERR_CONFIGURED ++ fi + + if [ -z "${ECS_INSTANCE_ID}" ]; then + ocf_exit_reason "Instance ID not found. Is this a ECS instance?" + return $OCF_ERR_GENERIC + fi +- +- return $OCF_SUCCESS +-} + +-ecs_ip_monitor() { +- ecs_ip_validate +- debugger "function: ecsip_monitor: check routing table" +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- if [ -z "$ROUTE_TO_INSTANCE" ]; then +- ROUTE_TO_INSTANCE="" +- fi +- +- [[ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]] && debugger "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" && return $OCF_NOT_RUNNING +- cmd="ping -W 1 -c 1 $OCF_RESKEY_address" +- debugger "executing command: $cmd" +- $cmd > /dev/null +- [[ $? -gt 0 ]] && debugger "IP $OCF_RESKEY_address not locally reachable via ping on this system" && return $OCF_NOT_RUNNING +- debugger "routed in VPC and locally reachable" +- return $OCF_SUCCESS +-} +- +- +-ecs_ip_drop() { +- debugger "function: ecsip_drop" +- cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- [[ $rc -gt 2 ]] && debugger "command failed, rc $rc" && return $OCF_ERR_GENERIC +- debugger "command succeeded" + return $OCF_SUCCESS + } + +-wait_for_deleted() { +- while [ ! -z "$ROUTE_TO_INSTANCE" ]; do +- sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- done +- sleep 5 +-} ++ecs_ip_start() { ++ ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" + +-wait_for_started() { +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- +- while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do +- sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- debugger "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- done +- sleep 5 +-} ++ ecs_ip_monitor ++ if [ $? = $OCF_SUCCESS ]; then ++ ocf_log info "ECS: $OCF_RESKEY_address already started" ++ return $OCF_SUCCESS ++ fi + +-ecs_ip_get_and_configure() { +- debugger "function: ecsip_get_and_configure" +- +- if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then +- +- if [ $ROUTE_TO_INSTANCE != "" ]; then +- # Adjusting the routing table +- cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- #wait_for_deleted +- sleep 3 +- fi +- +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- #[[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- while [ $rc != 0 ]; do +- sleep 2 +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- debugger "executing command: $cmd" +- $cmd +- rc=$? +- done +- wait_for_started ++ ocf_log info "ECS: Adjusting routing table and locally configuring IP address" ++ ip_get_and_configure ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Received $rc from 'aliyun cli'" ++ return $OCF_ERR_GENERIC + fi +- +- +- # Reconfigure the local ip address +- ecs_ip_drop +- ip addr add "${OCF_RESKEY_address}/32" dev $OCF_RESKEY_interface ++ ++ ecs_ip_monitor + rc=$? +- [[ $rc != 0 ]] && debugger "command failed, rc: $rc" && return $OCF_ERR_GENERIC +- debugger "-success" ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log err "IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" ++ return $rc ++ fi ++ + return $OCF_SUCCESS + } + + ecs_ip_stop() { + ocf_log info "ECS: Bringing down IP address $OCF_RESKEY_address" +- ecs_ip_validate ++ + ecs_ip_monitor +- [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Address $OCF_RESKEY_address already down" && return $OCF_SUCCESS +- ecs_ip_drop +- [[ $? != $OCF_SUCCESS ]] && return $OCF_ERR_GENERIC ++ if [ $? = $OCF_NOT_RUNNING ]; then ++ ocf_log info "ECS: Address $OCF_RESKEY_address already down" ++ return $OCF_SUCCESS ++ fi ++ ++ ip_drop ++ if [ $? -ne $OCF_SUCCESS ]; then ++ ocf_log err "ECS: Couldn't drop IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ return $OCF_ERR_GENERIC ++ fi ++ + ecs_ip_monitor +- [[ $? == $OCF_NOT_RUNNING ]] && ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" && return $OCF_SUCCESS +- ocf_log error "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." ++ if [ $? = $OCF_NOT_RUNNING ]; then ++ ocf_log info "ECS: Successfully brought down $OCF_RESKEY_address" ++ return $OCF_SUCCESS ++ fi ++ ++ ocf_log err "ECS: Couldn't bring down IP address $OCF_RESKEY_address on interface $OCF_RESKEY_interface." + return $OCF_ERR_GENERIC + } + +-ecs_ip_start() { +- ocf_log info "ECS: Moving IP address $OCF_RESKEY_address to this host by adjusting routing table $OCF_RESKEY_routing_table" +- ecs_ip_validate +- ecs_ip_monitor +- [[ $? == $OCF_SUCCESS ]] && ocf_log info "ECS: $OCF_RESKEY_address already started" && return $OCF_SUCCESS +- ocf_log info "ECS: Adjusting routing table and locally configuring IP address" +- ecs_ip_get_and_configure +- [[ $? != 0 ]] && ocf_log error "Received $? from 'aliyun cli'" && return $OCF_ERR_GENERIC +- return $OCF_SUCCESS +- ecs_ip_monitor +- [[ $? == $OCF_SUCCESS ]] && return $? +- ocf_log error "ECS: IP address couldn't be configured on this host (IP: $OCF_RESKEY_address, Interface: $OCF_RESKEY_interface)" +- return $OCF_ERR_GENERIC ++ecs_ip_monitor() { ++ ocf_log debug "function: ecsip_monitor: check routing table" ++ cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" ++ ocf_log debug "executing command: $cmd" ++ ++ ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ ++ if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log debug "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ cmd="ping -W 1 -c 1 $OCF_RESKEY_address" ++ ocf_log debug "executing command: $cmd" ++ $cmd > /dev/null ++ if [ $? -ne 0 ]; then ++ ocf_log debug "IP $OCF_RESKEY_address not locally reachable via ping on this system" ++ return $OCF_NOT_RUNNING ++ fi ++ ocf_log debug "routed in VPC and locally reachable" ++ return $OCF_SUCCESS + } + ++ + ############################################################################### + # + # MAIN + # + ############################################################################### + +-case $__OCF_ACTION in +- meta-data) metadata ++case $__OCF_ACTION in ++ meta-data) ecs_ip_metadata + exit $OCF_SUCCESS;; +- monitor) +- ecs_ip_monitor;; +- stop) +- ecs_ip_stop;; + validate-all) ecs_ip_validate;; ++esac ++ ++ECS_INSTANCE_ID="$(curl -s http://100.100.100.200/latest/meta-data/instance-id)" ++ ++case $__OCF_ACTION in + start) ++ ecs_ip_validate + ecs_ip_start;; ++ stop) ++ ecs_ip_stop;; ++ monitor) ++ ecs_ip_monitor;; + *) exit $OCF_ERR_UNIMPLEMENTED;; +-esac +\ No newline at end of file ++esac diff --git a/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch new file mode 100644 index 0000000..619b721 --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-3-fix-manpage.patch @@ -0,0 +1,22 @@ +From ee081df601f914079f111eec10cb81ab212130a9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 25 Jul 2018 11:22:39 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: fix manpage + +--- + heartbeat/aliyun-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index 108feb247..e27952adb 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -117,7 +117,7 @@ ecs_ip_metadata() { + cat < + +- ++ + 2.0 + + Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS diff --git a/SOURCES/aliyun-vpc-move-ip-4-bundled.patch b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch new file mode 100644 index 0000000..29a92b9 --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch @@ -0,0 +1,15 @@ +--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200 +@@ -35,10 +35,10 @@ + USAGE="usage: $0 {start|stop|status|meta-data}"; + + if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then +- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" ++ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" + fi + +-if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then ++if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli-ra' ]] || [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then + OUTPUT="text" + EXECUTING='{ print $3 }' + IFS_=" " diff --git a/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch new file mode 100644 index 0000000..872158c --- /dev/null +++ b/SOURCES/aliyun-vpc-move-ip-5-improve-metadata-manpage.patch @@ -0,0 +1,49 @@ +From fc497e888afcb88babbc21a59883556335c070fa Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 31 Aug 2018 11:41:32 +0200 +Subject: [PATCH] aliyun-vpc-move-ip: improve metadata and manpage + +--- + heartbeat/aliyun-vpc-move-ip | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index e27952adb..c004d26fc 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -123,7 +123,7 @@ ecs_ip_metadata() { + Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS + by changing an entry in an specific routing table + +-Move IP within a APC of the Aliyun ECS ++Move IP within a VPC of the Aliyun ECS + + + +@@ -134,21 +134,23 @@ VPC private IP address + + + +-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++Name of the routing table, where the route for the IP address should be changed, i.e. vtb-... + + routing table name + + + + +-Name of the network interfacen, i.e. eth0 ++Name of the network interface, i.e. eth0 + + network interface name + + + + +-Valid Aliyun CLI profile name ++Valid Aliyun CLI profile name (see 'aliyuncli-ra configure'). ++ ++See https://www.alibabacloud.com/help/doc-detail/43039.htm?spm=a2c63.p38356.b99.16.38a914abRZtOU3 for more information about aliyuncli-ra. + + profile name + diff --git a/SOURCES/aliyuncli-python3-fixes.patch b/SOURCES/aliyuncli-python3-fixes.patch new file mode 100644 index 0000000..22be4e1 --- /dev/null +++ b/SOURCES/aliyuncli-python3-fixes.patch @@ -0,0 +1,398 @@ +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A file name is needed! please use \'--filename\' and add the file name." +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + print("A profile is needed! please use \'--filename\' and add the profile name.") +@@ -21,7 +21,7 @@ + + def getInstanceCount(self,keyValues): + count = 1 +- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0: ++ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0: + if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: + count = keyValues['--instancecount'][0] + else: +@@ -113,7 +113,7 @@ + + def isAllocatePublicIpAddress(self,keyValues): + _publicIp = False +- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0: ++ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0: + if keyValues['--allocatepublicip'][0] == "yes": + _publicIp = True + return _publicIp +@@ -125,7 +125,7 @@ + ''' + data = json.loads(jsonbody) + ''' +- if data.has_key('InstanceId') and len(data['InstanceId']) > 0: ++ if 'InstanceId' in data and len(data['InstanceId']) > 0: + instanceId = data['InstanceId'] + except Exception as e: + pass +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100 +@@ -38,7 +38,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A file name is needed! please use \'--filename\' and add the file name." +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100 +@@ -13,7 +13,7 @@ + + def getFileName(self,keyValues): + filename = None +- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: ++ if '--filename' in keyValues and len(keyValues['--filename']) > 0: + filename = keyValues['--filename'][0] + else: + return filename, "A filename is needed! please use \'--filename\' and add the file name." +@@ -21,7 +21,7 @@ + def getInstanceCount(self,keyValues): + count = 1 + import_count = "--count" +- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0: ++ if import_count in keyValues and len(keyValues[import_count]) > 0: + if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0: + count = keyValues[import_count][0] + else: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100 +@@ -17,37 +17,37 @@ + + def getConfigHandlerOptions(self): + return [ConfigCmd.name] +- ++ + def showConfig(self): + _credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials) + _configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure) + config = dict() + configContent = dict() +- credentialsContent = dict () +- if os.path.exists(_configurePath): ++ credentialsContent = dict () ++ if os.path.exists(_configurePath): + for line in open(_configurePath): + line = line.strip('\n') + if line.find('=') > 0: + list = line.split("=",1) +- configContent[list[0]] = list[1] +- else: +- pass +- config['configure'] = configContent +- if os.path.exists(_credentialsPath): +- for line in open(_credentialsPath): ++ configContent[list[0]] = list[1] ++ else: ++ pass ++ config['configure'] = configContent ++ if os.path.exists(_credentialsPath): ++ for line in open(_credentialsPath): + line = line.strip('\n') + if line.find('=') > 0: + list = line.split("=",1) +- credentialsContent[list[0]] = list[1] +- else: +- pass +- config ['credentials'] = credentialsContent +- response.display_response("showConfigure",config,'table') ++ credentialsContent[list[0]] = list[1] ++ else: ++ pass ++ config ['credentials'] = credentialsContent ++ response.display_response("showConfigure",config,'table') + def importConfig(): + pass + def exportConfig(): + pass +- ++ + + + if __name__ == "__main__": +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100 +@@ -20,7 +20,7 @@ + def handleProfileCmd(self, cmd, keyValues): + if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right + #check --name is valid +- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: ++ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: + _value = keyValues[ProfileCmd.name][0] # use the first value + self.extensionCliHandler.setUserProfile(_value) + else: +@@ -34,7 +34,7 @@ + newProfileName = '' + if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right + #check --name is valid +- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: ++ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: + _value = keyValues[ProfileCmd.name][0] # check the first value + # only input key and secret + newProfileName = _value +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100 +@@ -137,9 +137,9 @@ + values.append(self.args[index]) + index = index + 1 + keyValues[currentValue] = values +- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0: ++ if keystr in keyValues and keyValues[keystr].__len__() > 0: + _key = keyValues[keystr][0] +- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: ++ if secretstr in keyValues and keyValues[secretstr].__len__() > 0: + _secret = keyValues[secretstr][0] + #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) + return _key, _secret +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100 +@@ -19,8 +19,9 @@ + ''' + + import sys +-reload(sys) +-sys.setdefaultencoding('utf-8') ++if sys.version_info[0] < 3: ++ reload(sys) ++ sys.setdefaultencoding('utf-8') + __author__ = 'xixi.xxx' + import aliyunCliMain + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100 +@@ -18,7 +18,7 @@ + ''' + + import aliyunCliConfiugre +-import urllib2 ++import urllib3 + import re + import os + import platform +@@ -151,7 +151,7 @@ + # this functino will get the latest version + def _getLatestTimeFromServer(self): + try: +- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5) ++ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5) + s = f.read() + return s + except Exception as e: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100 +@@ -26,7 +26,7 @@ + import aliyunSdkConfigure + import json + import cliError +-import urllib2 ++import urllib3 + import handleEndPoint + + from __init__ import __version__ +@@ -259,7 +259,7 @@ + def changeEndPoint(self, classname, keyValues): + endpoint = "Endpoint" + try: +- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0: ++ if endpoint in keyValues and keyValues[endpoint].__len__() > 0: + classname._RestApi__domain = keyValues[endpoint][0] + except Exception as e: + pass +@@ -444,10 +444,10 @@ + + def getTempVersion(self,keyValues): + key='--version' +- if keyValues is not None and keyValues.has_key(key): ++ if keyValues is not None and key in keyValues: + return keyValues.get(key) + key = 'version' +- if keyValues is not None and keyValues.has_key(key): ++ if keyValues is not None and key in keyValues: + return keyValues.get(key) + + def getVersionFromFile(self,cmd): +@@ -513,7 +513,7 @@ + self.checkForServer(response,cmd,operation) + def getRequestId(self,response): + try: +- if response.has_key('RequestId') and len(response['RequestId']) > 0: ++ if 'RequestId' in response and len(response['RequestId']) > 0: + requestId = response['RequestId'] + return requestId + except Exception: +@@ -532,7 +532,7 @@ + ua = "" + url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation + try: +- f = urllib2.urlopen(url,data=None,timeout=5) ++ f = urllib3.urlopen(url,data=None,timeout=5) + s = f.read() + return s + except Exception : +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100 +@@ -39,7 +39,7 @@ + + def sdkConfigure(self,cmd,operation): + keyValues = self.parser._getKeyValues() +- if keyValues.has_key('--version') and len(keyValues['--version']) > 0: ++ if '--version' in keyValues and len(keyValues['--version']) > 0: + version=keyValues['--version'][0] + filename=self.fileName + self.writeCmdVersionToFile(cmd,version,filename) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100 +@@ -23,6 +23,8 @@ + import aliyunCliParser + import platform + ++if sys.version_info[0] > 2: ++ raw_input = input + + OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~') + OSS_CONFIG_SECTION = 'OSSCredentials' +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100 +@@ -19,7 +19,7 @@ + #/usr/bin/env python + #!-*- coding:utf-8 -*- + import os +-import urllib2 ++import urllib3 + import cliError + + +@@ -64,9 +64,9 @@ + print(e) + def _getParamFromUrl(prefix,value,mode): + +- req = urllib2.Request(value) ++ req = urllib3.Request(value) + try: +- response=urllib2.urlopen(req) ++ response=urllib3.urlopen(req) + if response.getcode() == 200: + return response.read() + else: +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100 +@@ -340,8 +340,8 @@ + + + _urllib_error_moved_attributes = [ +- MovedAttribute("URLError", "urllib2", "urllib.error"), +- MovedAttribute("HTTPError", "urllib2", "urllib.error"), ++ MovedAttribute("URLError", "urllib3", "urllib.error"), ++ MovedAttribute("HTTPError", "urllib3", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), + ] + for attr in _urllib_error_moved_attributes: +@@ -359,34 +359,34 @@ + + + _urllib_request_moved_attributes = [ +- MovedAttribute("urlopen", "urllib2", "urllib.request"), +- MovedAttribute("install_opener", "urllib2", "urllib.request"), +- MovedAttribute("build_opener", "urllib2", "urllib.request"), ++ MovedAttribute("urlopen", "urllib3", "urllib.request"), ++ MovedAttribute("install_opener", "urllib3", "urllib.request"), ++ MovedAttribute("build_opener", "urllib3", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), +- MovedAttribute("Request", "urllib2", "urllib.request"), +- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), +- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), +- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), +- MovedAttribute("BaseHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), +- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), +- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), +- MovedAttribute("FileHandler", "urllib2", "urllib.request"), +- MovedAttribute("FTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), +- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), +- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), ++ MovedAttribute("Request", "urllib3", "urllib.request"), ++ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"), ++ MovedAttribute("BaseHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"), ++ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"), ++ MovedAttribute("FileHandler", "urllib3", "urllib.request"), ++ MovedAttribute("FTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"), ++ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"), ++ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), +diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py +--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100 +@@ -24,7 +24,7 @@ + + install_requires = [ + 'colorama>=0.2.5,<=0.3.3', +- 'jmespath>=0.7.0,<=0.7.1', ++ 'jmespath>=0.7.0', + ] + def main(): + setup( diff --git a/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch new file mode 100644 index 0000000..9c23ffa --- /dev/null +++ b/SOURCES/aws-vpc-move-ip-1-avoid-false-positive-monitor.patch @@ -0,0 +1,39 @@ +From 7632a85bcf642b484df52a25dbffbfa0031421bc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michal=20Koutn=C3=BD?= +Date: Mon, 6 Aug 2018 18:04:52 +0200 +Subject: [PATCH] aws-vpc-move-ip: Use ip utility to check address + +When pinging the assigned address during initial monitor (probe) on one +node we may actually ping the reachable address when the resource is +running on another node. This yields false positive monitor result on +the pinging node. Avoid this by merely checking the assignment of the +address to an interface. +--- + heartbeat/aws-vpc-move-ip | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index cefa38e03..3bbbed474 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,15 +167,15 @@ ec2ip_monitor() { + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi + +- cmd="ping -W 1 -c 1 $OCF_RESKEY_ip" ++ cmd="ip addr show to '$OCF_RESKEY_ip' up" + ocf_log debug "executing command: $cmd" +- $cmd > /dev/null +- if [ "$?" -gt 0 ]; then +- ocf_log warn "IP $OCF_RESKEY_ip not locally reachable via ping on this system" ++ RESULT=$($cmd | grep '$OCF_RESKEY_ip') ++ if [ -z "$RESULT" ]; then ++ ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface" + return $OCF_NOT_RUNNING + fi + +- ocf_log debug "route in VPC and locally reachable" ++ ocf_log debug "route in VPC and address assigned" + return $OCF_SUCCESS + } + diff --git a/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch new file mode 100644 index 0000000..4d1cbf9 --- /dev/null +++ b/SOURCES/aws-vpc-move-ip-2-avoid-false-positive-monitor.patch @@ -0,0 +1,31 @@ +From 42dccdd20aff3ebf134c8041f79ab0a658975e69 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michal=20Koutn=C3=BD?= +Date: Thu, 30 Aug 2018 18:02:51 +0200 +Subject: [PATCH] aws-vpc-move-ip: Fix broken shell quoting + +The argument 4th to `ip` is passed with single quotes around which +cannot be parsed as valid IP address. Furthermore, we need to expand the +$OCF_RESKEY_ip for grep. This breaks correct detection of the assigned +address. + +Fixes 7632a85bcf642b484df52a25dbffbfa0031421bc. +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3bbbed474..ce3fc6b9a 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,9 +167,9 @@ ec2ip_monitor() { + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi + +- cmd="ip addr show to '$OCF_RESKEY_ip' up" ++ cmd="ip addr show to $OCF_RESKEY_ip up" + ocf_log debug "executing command: $cmd" +- RESULT=$($cmd | grep '$OCF_RESKEY_ip') ++ RESULT=$($cmd | grep "$OCF_RESKEY_ip") + if [ -z "$RESULT" ]; then + ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface" + return $OCF_NOT_RUNNING diff --git a/SOURCES/build-add-missing-manpages.patch b/SOURCES/build-add-missing-manpages.patch new file mode 100644 index 0000000..6ac1c2d --- /dev/null +++ b/SOURCES/build-add-missing-manpages.patch @@ -0,0 +1,43 @@ +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2018-07-27 13:05:09.611188363 +0200 ++++ b/doc/man/Makefile.am 2018-07-27 13:05:43.981806873 +0200 +@@ -97,6 +97,7 @@ + ocf_heartbeat_WinPopup.7 \ + ocf_heartbeat_Xen.7 \ + ocf_heartbeat_Xinetd.7 \ ++ ocf_heartbeat_ZFS.7 \ + ocf_heartbeat_aliyun-vpc-move-ip.7 \ + ocf_heartbeat_anything.7 \ + ocf_heartbeat_apache.7 \ +@@ -136,6 +137,7 @@ + ocf_heartbeat_lxd-info.7 \ + ocf_heartbeat_machine-info.7 \ + ocf_heartbeat_mariadb.7 \ ++ ocf_heartbeat_minio.7 \ + ocf_heartbeat_mysql.7 \ + ocf_heartbeat_mysql-proxy.7 \ + ocf_heartbeat_nagios.7 \ +@@ -150,6 +152,7 @@ + ocf_heartbeat_oracle.7 \ + ocf_heartbeat_oralsnr.7 \ + ocf_heartbeat_ovsmonitor.7 \ ++ ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ + ocf_heartbeat_pingd.7 \ + ocf_heartbeat_portblock.7 \ +@@ -158,6 +161,7 @@ + ocf_heartbeat_proftpd.7 \ + ocf_heartbeat_rabbitmq-cluster.7 \ + ocf_heartbeat_redis.7 \ ++ ocf_heartbeat_rkt.7 \ + ocf_heartbeat_rsyncd.7 \ + ocf_heartbeat_rsyslog.7 \ + ocf_heartbeat_scsi2reservation.7 \ +@@ -172,6 +176,7 @@ + ocf_heartbeat_varnish.7 \ + ocf_heartbeat_vdo-vol.7 \ + ocf_heartbeat_vmware.7 \ ++ ocf_heartbeat_vsftpd.7 \ + ocf_heartbeat_zabbixserver.7 + + if USE_IPV6ADDR_AGENT diff --git a/SOURCES/bz1471182-crypt-1-new-ra.patch b/SOURCES/bz1471182-crypt-1-new-ra.patch new file mode 100644 index 0000000..7ed08b5 --- /dev/null +++ b/SOURCES/bz1471182-crypt-1-new-ra.patch @@ -0,0 +1,415 @@ +From 019c3108feff48d8ad496cd0759349c46170dc2d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 6 Apr 2020 10:23:51 +0200 +Subject: [PATCH 1/2] crypt: new resource agent + +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/crypt | 337 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 339 insertions(+) + create mode 100755 heartbeat/crypt + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 478fbe4f8..53c9975ec 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -105,6 +105,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_azure-lb.7 \ + ocf_heartbeat_clvm.7 \ + ocf_heartbeat_conntrackd.7 \ ++ ocf_heartbeat_crypt.7 \ + ocf_heartbeat_db2.7 \ + ocf_heartbeat_dhcpd.7 \ + ocf_heartbeat_docker.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 893115810..bbc9590ac 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -101,6 +101,7 @@ ocf_SCRIPTS = AoEtarget \ + azure-lb \ + clvm \ + conntrackd \ ++ crypt \ + db2 \ + dhcpd \ + dnsupdate \ +diff --git a/heartbeat/crypt b/heartbeat/crypt +new file mode 100755 +index 000000000..6bffdff89 +--- /dev/null ++++ b/heartbeat/crypt +@@ -0,0 +1,337 @@ ++#!/bin/sh ++# ++# crypt/LUKS OCF RA. Manages cryptsetup devices. ++# ++# Copyright (c) 2020 Red Hat GmbH, Heinz Mauelshagen ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Parameter defaults ++OCF_RESKEY_encrypted_dev_default="" ++OCF_RESKEY_crypt_dev_default="" ++OCF_RESKEY_key_file_default="" ++OCF_RESKEY_crypt_type_default="" ++OCF_RESKEY_force_stop_default="false" ++ ++: ${OCF_RESKEY_encrypted_dev=${OCF_RESKEY_encrypted_dev_default}} ++: ${OCF_RESKEY_crypt_dev=${OCF_RESKEY_crypt_dev_default}} ++: ${OCF_RESKEY_key_file=${OCF_RESKEY_key_file_default}} ++: ${OCF_RESKEY_crypt_type=${OCF_RESKEY_crypt_type_default}} ++: ${OCF_RESKEY_force_stop=${OCF_RESKEY_force_stop_default}} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++This is a LUKS/crypt Resource Agent managing encrypted devices via cryptsetup(8). ++The agent imposes limitations on device types supported: luks, luks[1..N]. ++ ++LUKS/crypt resource agent ++ ++ ++ ++ ++ ++Encrypted backing device, which should be defined by UUID, ++36 characters including '-'s as reported by blkid(8). ++ ++Although it can be defined as a block device path (e.g. /dev/sdh), ++the UUID should be preferred over the block device path to allow for the ++unique discovery of the crypt backing device given the volatile nature of ++/dev entries (e.g. /dev/sdh on one node may be /dev/sdg on another). ++ ++Only define as block device path if you know what you are doing. ++ ++Encrypted device ++ ++ ++ ++ ++ ++Encrypted device name, no path. I.e. the one given in "cryptsetup open name ...". ++The resulting block device path is /dev/mapper/name. ++ ++Encrypted device ++ ++ ++ ++ ++ ++Key file path containing the encryption passphrase ++(aka key; see cryptsetup(8)). For LUKS, the passphrase as of the key_file ++parameter is used to decrypt a randomly selected key when the device was created. ++ ++Key file ++ ++ ++ ++ ++ ++Encryption (device) type (e.g. "luks" or "luks2"). ++ ++This parameter affirms the encryption format as of the crypt metadata ++thus allowing for safety measures when starting the encrypted resource. ++ ++Encryption type ++ ++ ++ ++ ++ ++If processes or kernel threads are using the crypt device, it cannot ++be stopped. We will try to stop processes, first by sending TERM and ++then, if that doesn't help in $PROC_CLEANUP_TIME seconds, using KILL. ++The lsof(8) program is required to get the list of array users. ++Of course, the kernel threads cannot be stopped this way. ++If the processes are critical for data integrity, then set this ++parameter to false. Note that in that case the stop operation ++will fail and the node will be fenced. ++ ++force stop processes using the crpyt device ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++# Disable cryptsetup auto-recovery if cloned. ++disable_locks="" ++ocf_is_clone && disable_locks="--disable-locks" ++ ++crypt_usage() { ++ cat </dev/null ++ if [ $? -eq 0 ] && [ -z "$crypt_dev" ]; then ++ ocf_exit_reason "Crypt device \"$crypt_dev\" name has to at least 1 character long and without path" ++ return $OCF_ERR_ARGS ++ fi ++ if [ ! -r "$key_file" ]; then ++ ocf_exit_reason "Hash key file $key_file not accessible" ++ return $OCF_ERR_ARGS ++ fi ++ if ! ocf_is_true "$force_stop" && "$force_stop" != "false" ]]; then ++ ocf_exit_reason "Bogus force_stop=\"$force_stop\" attribute" ++ return $OCF_ERR_CONFIGURED ++ fi ++ if "$force_stop" = "true" && ! have_binary lsof; then ++ ocf_exit_reason "Force stop requested, please install lsof(8)" ++ return $OCF_ERR_INSTALLED ++ fi ++ cryptsetup isLuks $encrypted_dev 2>/dev/null ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "$encrypted_dev is not a Luks formatted device" ++ return $OCF_ERR_CONFIGURED ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++get_users_pids() { ++ ocf_log debug "running lsof to list \"$crypt_dev\" users..." ++ ocf_run -warn 'lsof $crypt_dev_path | tail -n +2 | awk "{print $2}" | sort -u' ++} ++ ++stop_crypt_users() { ++ local pids=`get_users_pids` ++ ++ if [ -z "$pids" ]; then ++ ocf_log warn "lsof reported no users holding arrays" ++ return 2 ++ fi ++ ++ ocf_stop_processes TERM $PROC_CLEANUP_TIME $pids ++} ++ ++show_users() { ++ local dm_dev ++ ++ ocf_log info "running lsof to list \"$crypt_dev\" users..." ++ ocf_run -warn lsof $crypt_dev_path ++ ++ dm_dev=$(basename $(realpath $crypt_dev_path)) ++ if [ -d /sys/block/$dm_dev/holders ]; then ++ ocf_log debug "ls -l /sys/block/$dm_dev/holders" ++ ocf_run -warn ls -l /sys/block/$dm_dev/holders ++ fi ++} ++ ++crypt_stop_one() { ++ cryptsetup close $crypt_dev $disable_locks ++} ++ ++####################################################################### ++# ++# Action: START an encrypted resource ++# ++crypt_start() { ++ local rc ++ ++ cryptsetup open $encrypted_dev $crypt_dev --type $crypt_type $disable_locks --key-file=$key_file ++ rc=$? ++ if [ $rc -eq 0 ];then ++ crypt_monitor ++ rc=$? ++ else ++ rc=$OCF_ERR_GERNERIC ++ fi ++ [ $rc -ne $OCF_SUCCESS ] ocf_exit_reason "Failed to start encrypted device \"$crypt_dev\"" ++ ++ return $rc ++} ++ ++# ++# Action: STOP an encrypted resource ++# ++crypt_stop() { ++ local rc ++ ++ crypt_monitor ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ crypt_stop_one ++ crypt_monitor ++ rc=$? ++ fi ++ if [ $rc -ne $OCF_NOT_RUNNING ] && ocf_is_true $FORCESTOP; then ++ stop_crypt_users ++ case $? in ++ 2) rc=$OCF_SUCCESS;; ++ *) crypt_stop_one ++ crypt_monitor ++ rc=$?;; ++ esac ++ fi ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log warn "Couldn't stop crypt device \"$crypt_dev\" (rc=$rc)" ++ show_users ++ ocf_exit_reason "Failed to stop crypt device \"$crypt_dev\"!" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++# ++# Action: MONITOR an encrypted resource ++# ++crypt_monitor() { ++ cryptsetup status $crypt_dev $disable_locks &>/dev/null ++ if [ $? -eq 0 ]; then ++ [ -L $crypt_dev_path ] && return $OCF_SUCCESS ++ return $OCF_ERR_GENERIC ++ fi ++ ++ [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_exit_reason "Crypt resource not running" ++ return $OCF_NOT_RUNNING ++} ++ ++# Check for stange argument count. ++if [ $# -ne 1 ]; then ++ usage ++ exit $OCF_ERR_ARGS ++fi ++ ++case "$__OCF_ACTION" in ++meta-data) meta_data ++ exit $OCF_SUCCESS;; ++usage|help) crypt_usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# XME: remove once pacemaker is fixed and calls this action ++crypt_validate_all ++rc=$? ++[ $rc -ne $OCF_SUCCESS ] && exit $rc ++ ++case "$__OCF_ACTION" in ++start) crypt_start; rc=$?;; ++stop) crypt_stop; rc=$?;; ++monitor) crypt_monitor; rc=$?;; ++validate-all) rc=$OCF_SUCCESS;; # crypt_validate_all would have errored out above already. ++*) crypt_usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc + +From 5e0d35f8db967419ea9f1234ab621b88babcf3ea Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 7 Apr 2020 12:39:24 +0200 +Subject: [PATCH 2/2] crypt: force_stop check fixes + +--- + heartbeat/crypt | 8 ++------ + 1 file changed, 2 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 6bffdff89..8bfa1094d 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -190,11 +190,7 @@ crypt_validate_all() { + ocf_exit_reason "Hash key file $key_file not accessible" + return $OCF_ERR_ARGS + fi +- if ! ocf_is_true "$force_stop" && "$force_stop" != "false" ]]; then +- ocf_exit_reason "Bogus force_stop=\"$force_stop\" attribute" +- return $OCF_ERR_CONFIGURED +- fi +- if "$force_stop" = "true" && ! have_binary lsof; then ++ if ocf_is_true "$force_stop" && ! have_binary lsof; then + ocf_exit_reason "Force stop requested, please install lsof(8)" + return $OCF_ERR_INSTALLED + fi +@@ -273,7 +269,7 @@ crypt_stop() { + crypt_monitor + rc=$? + fi +- if [ $rc -ne $OCF_NOT_RUNNING ] && ocf_is_true $FORCESTOP; then ++ if [ $rc -ne $OCF_NOT_RUNNING ] && ocf_is_true $force_stop; then + stop_crypt_users + case $? in + 2) rc=$OCF_SUCCESS;; diff --git a/SOURCES/bz1471182-crypt-2-fix-bashism.patch b/SOURCES/bz1471182-crypt-2-fix-bashism.patch new file mode 100644 index 0000000..dace36f --- /dev/null +++ b/SOURCES/bz1471182-crypt-2-fix-bashism.patch @@ -0,0 +1,22 @@ +From 2915fa336e95b609d3d738d335799f015022c493 Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Sat, 13 Jun 2020 08:47:36 +0200 +Subject: [PATCH] crypt: fix bashism + +--- + heartbeat/crypt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 8bfa1094d..2727b5b23 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -292,7 +292,7 @@ crypt_stop() { + # Action: MONITOR an encrypted resource + # + crypt_monitor() { +- cryptsetup status $crypt_dev $disable_locks &>/dev/null ++ cryptsetup status $crypt_dev $disable_locks >/dev/null 2>&1 + if [ $? -eq 0 ]; then + [ -L $crypt_dev_path ] && return $OCF_SUCCESS + return $OCF_ERR_GENERIC diff --git a/SOURCES/bz1471182-crypt-3-fix-missing-and.patch b/SOURCES/bz1471182-crypt-3-fix-missing-and.patch new file mode 100644 index 0000000..8a0deaf --- /dev/null +++ b/SOURCES/bz1471182-crypt-3-fix-missing-and.patch @@ -0,0 +1,22 @@ +From 635c344fb85ef225b8a0c094687d2838b0b0cd2c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 26 Oct 2020 16:36:06 +0100 +Subject: [PATCH] crypt: fix missing && to set exit_reason + +--- + heartbeat/crypt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 2727b5b23..0e49b6c2d 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -251,7 +251,7 @@ crypt_start() { + else + rc=$OCF_ERR_GERNERIC + fi +- [ $rc -ne $OCF_SUCCESS ] ocf_exit_reason "Failed to start encrypted device \"$crypt_dev\"" ++ [ $rc -ne $OCF_SUCCESS ] && ocf_exit_reason "Failed to start encrypted device \"$crypt_dev\"" + + return $rc + } diff --git a/SOURCES/bz1552330-vdo-vol.patch b/SOURCES/bz1552330-vdo-vol.patch new file mode 100644 index 0000000..2aa093d --- /dev/null +++ b/SOURCES/bz1552330-vdo-vol.patch @@ -0,0 +1,285 @@ +From 8b07d095acbbb1069c1fb44142ccfdd0aeed075f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 16 May 2018 14:10:49 +0200 +Subject: [PATCH] vdo-vol: new resource agent + +--- + doc/man/Makefile.am | 3 +- + heartbeat/Makefile.am | 1 + + heartbeat/vdo-vol | 234 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 237 insertions(+), 1 deletion(-) + create mode 100755 heartbeat/vdo-vol + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index c59126d1..8d94c10c 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -158,11 +158,12 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_slapd.7 \ + ocf_heartbeat_sybaseASE.7 \ + ocf_heartbeat_sg_persist.7 \ +- ocf_heartbeat_mpathpersist.7 \ ++ ocf_heartbeat_mpathpersist.7 \ + ocf_heartbeat_symlink.7 \ + ocf_heartbeat_syslog-ng.7 \ + ocf_heartbeat_tomcat.7 \ + ocf_heartbeat_varnish.7 \ ++ ocf_heartbeat_vdo-vol.7 \ + ocf_heartbeat_vmware.7 \ + ocf_heartbeat_zabbixserver.7 + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 4f5059e2..a68fa978 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -162,6 +162,7 @@ ocf_SCRIPTS = AoEtarget \ + syslog-ng \ + tomcat \ + varnish \ ++ vdo-vol \ + vmware \ + vsftpd \ + zabbixserver +diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol +new file mode 100755 +index 00000000..074339db +--- /dev/null ++++ b/heartbeat/vdo-vol +@@ -0,0 +1,234 @@ ++#!/bin/sh ++# ++# License: GNU General Public License (GPL) ++# (c) 2018 O. Albrigtsen ++# and Linux-HA contributors ++# ++# ----------------------------------------------------------------------------- ++# O C F R E S O U R C E S C R I P T S P E C I F I C A T I O N ++# ----------------------------------------------------------------------------- ++# ++# NAME ++# vdo-vol : OCF resource agent script for VDO (Virtual Data Optimizer) ++# ++ ++# Initialization: ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Defaults ++OCF_RESKEY_volume_default="" ++ ++: ${OCF_RESKEY_volume=${OCF_RESKEY_volume_default}} ++ ++ ++vdo_usage() { ++ cat < ++ ++ ++0.75 ++ ++ ++OCF Resource script for VDO (Virtual Data Optimizer) volume(s). It manages VDO volume(s) as a HA resource. ++ ++The configuration file needs to be synced to all nodes, and the systemd vdo service must be disabled when ++using this agent. ++ ++VDO resource agent ++ ++ ++ ++ ++ Configuration file ++ Config file ++ ++ ++ ++ ++ VDO Volume (leave empty for all) ++ Volume (empty for all) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++ ++rebuild() { ++ ocf_log warn "${OCF_RESKEY_volume} is in $MODE mode, starting in rebuild mode" ++ ++ vdo stop $OPTIONS ++ ++ while vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ vdo start $OPTIONS --forceRebuild ++ ++ while ! vdo_monitor; do ++ sleep 1 ++ done ++ ++ return $? ++} ++ ++vdo_start() { ++ # if resource is already running,no need to continue code after this. ++ if vdo_monitor; then ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} is already active" ++ return $OCF_SUCCESS ++ fi ++ ++ vdo activate $OPTIONS ++ vdo start $OPTIONS ++ ++ while ! vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}') ++ if [ $(echo "$MODE" | grep -v "normal" | wc -l) -gt 0 ]; then ++ rebuild ++ fi ++ ++ if [ $? -eq $OCF_SUCCESS ]; then ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} activated" ++ return ${OCF_SUCCESS} ++ fi ++ ++ return $? ++} ++ ++vdo_stop() { ++ vdo_monitor skiprocheck ++ if [ $? -ne $OCF_SUCCESS ]; then ++ # Currently not running. Nothing to do. ++ ocf_log info "VDO volume(s): ${OCF_RESKEY_volume} already deactivated" ++ ++ return $OCF_SUCCESS ++ fi ++ ++ vdo stop $OPTIONS ++ vdo deactivate $OPTIONS ++ ++ # Wait for process to stop ++ while vdo_monitor skiprocheck; do ++ sleep 1 ++ done ++ ++ return $OCF_SUCCESS ++} ++ ++vdo_monitor(){ ++ status=$(vdo status $OPTIONS 2>&1) ++ MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}') ++ ++ case "$status" in ++ *"Device mapper status: not available"*) ++ return $OCF_NOT_RUNNING ++ ;; ++ *"Device mapper status: "*online*) ++ if [ "$MODE" = "read-only" ] && [ "$1" != "skiprocheck" ]; then ++ ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} is in $MODE mode." ++ return $OCF_ERR_GENERIC ++ else ++ return $OCF_SUCCESS ++ fi ++ ;; ++ *) ++ ocf_log err "VDO volume(s): ${OCF_RESKEY_volume} failed\n$status" ++ return $OCF_ERR_GENERIC;; ++ esac ++} ++ ++vdo_validate_all(){ ++ check_binary "vdo" ++ ++ if systemctl is-enabled vdo > /dev/null 2>&1; then ++ ocf_exit_reason "systemd service vdo needs to be disabled" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ if [ -n "${OCF_RESKEY_config}" ] && [ ! -f "${OCF_RESKEY_config}" ]; then ++ ocf_exit_reason "Configuration file: ${OCF_RESKEY_config} not found" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++ ++# **************************** MAIN SCRIPT ************************************ ++ ++# Make sure meta-data and usage always succeed ++case $__OCF_ACTION in ++ meta-data) ++ vdo_meta_data ++ exit $OCF_SUCCESS ++ ;; ++ usage|help) ++ vdo_usage ++ exit $OCF_SUCCESS ++ ;; ++esac ++ ++# This OCF agent script need to be run as root user. ++if ! ocf_is_root; then ++ echo "$0 agent script need to be run as root user." ++ ocf_log debug "$0 agent script need to be run as root user." ++ exit $OCF_ERR_GENERIC ++fi ++ ++if [ -z "${OCF_RESKEY_volume}" ]; then ++ OPTIONS="-a" ++else ++ OPTIONS="-n ${OCF_RESKEY_volume}" ++fi ++ ++if [ -n "${OCF_RESKEY_config}" ]; then ++ OPTIONS="$OPTIONS -f ${OCF_RESKEY_config}" ++fi ++ ++# Translate each action into the appropriate function call ++case $__OCF_ACTION in ++ start) ++ vdo_validate_all ++ vdo_start;; ++ stop) ++ vdo_stop;; ++ status|monitor) ++ vdo_monitor;; ++ validate-all) ++ ;; ++ *) ++ vdo_usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ ++exit $? ++ ++# End of this script +-- +2.17.1 + diff --git a/SOURCES/bz1607607-podman.patch b/SOURCES/bz1607607-podman.patch new file mode 100644 index 0000000..572e761 --- /dev/null +++ b/SOURCES/bz1607607-podman.patch @@ -0,0 +1,538 @@ +From 07d283a6e20b8e559257c9694f7e36e155075014 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Sun, 22 Jul 2018 17:54:29 +0200 +Subject: [PATCH] Initial podman support + +Tested with the following container: + podman container set: test_bundle [docker.io/sdelrio/docker-minimal-nginx] + test_bundle-podman-0 (ocf::heartbeat:podman): Started nodea + test_bundle-podman-1 (ocf::heartbeat:podman): Started nodeb + test_bundle-podman-2 (ocf::heartbeat:podman): Started nodec + +Tested a couple of stop/start cycles successfully. Needs the +corresponding pacemaker support https://github.com/ClusterLabs/pacemaker/pull/1564 +--- + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/podman | 488 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 490 insertions(+) + create mode 100755 heartbeat/podman + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 145e5fd50..0bef88740 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -151,6 +151,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ + ocf_heartbeat_pingd.7 \ ++ ocf_heartbeat_podman.7 \ + ocf_heartbeat_portblock.7 \ + ocf_heartbeat_postfix.7 \ + ocf_heartbeat_pound.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index e7a3a4fac..993bff042 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -146,6 +146,7 @@ ocf_SCRIPTS = AoEtarget \ + pgagent \ + pgsql \ + pingd \ ++ podman \ + portblock \ + postfix \ + pound \ +diff --git a/heartbeat/podman b/heartbeat/podman +new file mode 100755 +index 000000000..88475f1df +--- /dev/null ++++ b/heartbeat/podman +@@ -0,0 +1,488 @@ ++#!/bin/sh ++# ++# The podman HA resource agent creates and launches a podman container ++# based off a supplied podman image. Containers managed by this agent ++# are both created and removed upon the agent's start and stop actions. ++# ++# Copyright (c) 2014 David Vossel ++# Michele Baldessari ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++ ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++meta_data() ++{ ++ cat < ++ ++ ++1.0 ++ ++ ++The podman HA resource agent creates and launches a podman container ++based off a supplied podman image. Containers managed by this agent ++are both created and removed upon the agent's start and stop actions. ++ ++Podman container resource agent. ++ ++ ++ ++ ++The podman image to base this container off of. ++ ++podman image ++ ++ ++ ++ ++ ++The name to give the created container. By default this will ++be that resource's instance name. ++ ++podman container name ++ ++ ++ ++ ++ ++Allow the image to be pulled from the configured podman registry when ++the image does not exist locally. NOTE, this can drastically increase ++the time required to start the container if the image repository is ++pulled over the network. ++ ++Allow pulling non-local images ++ ++ ++ ++ ++ ++Add options to be appended to the 'podman run' command which is used ++when creating the container during the start action. This option allows ++users to do things such as setting a custom entry point and injecting ++environment variables into the newly created container. Note the '-d' ++option is supplied regardless of this value to force containers to run ++in the background. ++ ++NOTE: Do not explicitly specify the --name argument in the run_opts. This ++agent will set --name using either the resource's instance or the name ++provided in the 'name' argument of this agent. ++ ++ ++run options ++ ++ ++ ++ ++ ++Specify a command to launch within the container once ++it has initialized. ++ ++run command ++ ++ ++ ++ ++ ++A comma separated list of directories that the container is expecting to use. ++The agent will ensure they exist by running 'mkdir -p' ++ ++Required mount points ++ ++ ++ ++ ++ ++Specify the full path of a command to launch within the container to check ++the health of the container. This command must return 0 to indicate that ++the container is healthy. A non-zero return code will indicate that the ++container has failed and should be recovered. ++ ++If 'podman exec' is supported, it is used to execute the command. If not, ++nsenter is used. ++ ++Note: Using this method for monitoring processes inside a container ++is not recommended, as containerd tries to track processes running ++inside the container and does not deal well with many short-lived ++processes being spawned. Ensure that your container monitors its ++own processes and terminates on fatal error rather than invoking ++a command from the outside. ++ ++monitor command ++ ++ ++ ++ ++ ++Kill a container immediately rather than waiting for it to gracefully ++shutdown ++ ++force kill ++ ++ ++ ++ ++ ++Allow the container to be reused after stopping the container. By default ++containers are removed after stop. With the reuse option containers ++will persist after the container stops. ++ ++reuse container ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++REQUIRE_IMAGE_PULL=0 ++ ++podman_usage() ++{ ++ cat </dev/null 2>&1; then ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? ++ else ++ out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) ++ rc=$? ++ fi ++ ++ if [ $rc -eq 127 ]; then ++ ocf_log err "monitor cmd failed (rc=$rc), output: $out" ++ ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." ++ # there is no recovering from this, exit immediately ++ exit $OCF_ERR_ARGS ++ elif [ $rc -ne 0 ]; then ++ ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" ++ rc=$OCF_ERR_GENERIC ++ else ++ ocf_log debug "monitor cmd passed: exit code = $rc" ++ fi ++ ++ return $rc ++} ++ ++container_exists() ++{ ++ podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++} ++ ++remove_container() ++{ ++ if ocf_is_true "$OCF_RESKEY_reuse"; then ++ # never remove the container if we have reuse enabled. ++ return 0 ++ fi ++ ++ container_exists ++ if [ $? -ne 0 ]; then ++ # don't attempt to remove a container that doesn't exist ++ return 0 ++ fi ++ ocf_log notice "Cleaning up inactive container, ${CONTAINER}." ++ ocf_run podman rm $CONTAINER ++} ++ ++podman_simple_status() ++{ ++ local val ++ ++ container_exists ++ if [ $? -ne 0 ]; then ++ return $OCF_NOT_RUNNING ++ fi ++ ++ # retrieve the 'Running' attribute for the container ++ val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) ++ if [ $? -ne 0 ]; then ++ #not running as a result of container not being found ++ return $OCF_NOT_RUNNING ++ fi ++ ++ if ocf_is_true "$val"; then ++ # container exists and is running ++ return $OCF_SUCCESS ++ fi ++ ++ return $OCF_NOT_RUNNING ++} ++ ++podman_monitor() ++{ ++ local rc=0 ++ ++ podman_simple_status ++ rc=$? ++ ++ if [ $rc -ne 0 ]; then ++ return $rc ++ fi ++ ++ monitor_cmd_exec ++} ++ ++podman_create_mounts() { ++ oldIFS="$IFS" ++ IFS="," ++ for directory in $OCF_RESKEY_mount_points; do ++ mkdir -p "$directory" ++ done ++ IFS="$oldIFS" ++} ++ ++podman_start() ++{ ++ podman_create_mounts ++ local run_opts="-d --name=${CONTAINER}" ++ # check to see if the container has already started ++ podman_simple_status ++ if [ $? -eq $OCF_SUCCESS ]; then ++ return $OCF_SUCCESS ++ fi ++ ++ if [ -n "$OCF_RESKEY_run_opts" ]; then ++ run_opts="$run_opts $OCF_RESKEY_run_opts" ++ fi ++ ++ if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then ++ ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}" ++ podman pull "${OCF_RESKEY_image}" ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}" ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then ++ ocf_log info "starting existing container $CONTAINER." ++ ocf_run podman start $CONTAINER ++ else ++ # make sure any previous container matching our container name is cleaned up first. ++ # we already know at this point it wouldn't be running ++ remove_container ++ ocf_log info "running container $CONTAINER for the first time" ++ ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd ++ fi ++ ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "podman failed to launch container" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ ++ # wait for monitor to pass before declaring that the container is started ++ while true; do ++ podman_simple_status ++ if [ $? -ne $OCF_SUCCESS ]; then ++ ocf_exit_reason "Newly created podman container exited after start" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ monitor_cmd_exec ++ if [ $? -eq $OCF_SUCCESS ]; then ++ ocf_log notice "Container $CONTAINER started successfully" ++ return $OCF_SUCCESS ++ fi ++ ++ ocf_exit_reason "waiting on monitor_cmd to pass after start" ++ sleep 1 ++ done ++} ++ ++podman_stop() ++{ ++ local timeout=60 ++ podman_simple_status ++ if [ $? -eq $OCF_NOT_RUNNING ]; then ++ remove_container ++ return $OCF_SUCCESS ++ fi ++ ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 )) ++ if [ $timeout -lt 10 ]; then ++ timeout=10 ++ fi ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_force_kill"; then ++ ocf_run podman kill $CONTAINER ++ else ++ ocf_log debug "waiting $timeout second[s] before killing container" ++ ocf_run podman stop -t=$timeout $CONTAINER ++ fi ++ ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ remove_container ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++image_exists() ++{ ++ # if no tag was specified, use default "latest" ++ local COLON_FOUND=0 ++ local SLASH_FOUND=0 ++ local SERVER_NAME="" ++ local IMAGE_NAME="${OCF_RESKEY_image}" ++ local IMAGE_TAG="latest" ++ ++ SLASH_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o '/' | grep -c .)" ++ ++ if [ ${SLASH_FOUND} -ge 1 ]; then ++ SERVER_NAME="$(echo ${IMAGE_NAME} | cut -d / -f 1-${SLASH_FOUND})" ++ IMAGE_NAME="$(echo ${IMAGE_NAME} | awk -F'/' '{print $NF}')" ++ fi ++ ++ COLON_FOUND="$(echo "${IMAGE_NAME}" | grep -o ':' | grep -c .)" ++ if [ ${COLON_FOUND} -ge 1 ]; then ++ IMAGE_TAG="$(echo ${IMAGE_NAME} | awk -F':' '{print $NF}')" ++ IMAGE_NAME="$(echo ${IMAGE_NAME} | cut -d : -f 1-${COLON_FOUND})" ++ fi ++ ++ # IMAGE_NAME might be following formats: ++ # - image ++ # - repository:port/image ++ # - docker.io/image (some distro will display "docker.io/" as prefix) ++ podman images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$" ++ if [ $? -eq 0 ]; then ++ # image found ++ return 0 ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_allow_pull"; then ++ REQUIRE_IMAGE_PULL=1 ++ ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start" ++ return 0 ++ fi ++ # image not found. ++ return 1 ++} ++ ++podman_validate() ++{ ++ check_binary podman ++ if [ -z "$OCF_RESKEY_image" ]; then ++ ocf_exit_reason "'image' option is required" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ if [ -n "$OCF_RESKEY_monitor_cmd" ]; then ++ podman exec --help >/dev/null 2>&1 ++ if [ ! $? ]; then ++ ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified" ++ check_binary nsenter ++ fi ++ fi ++ ++ image_exists ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ return $OCF_SUCCESS ++} ++ ++# TODO : ++# When a user starts plural clones in a node in globally-unique, a user cannot appoint plural name parameters. ++# When a user appoints reuse, the resource agent cannot connect plural clones with a container. ++ ++if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then ++ if [ -n "$OCF_RESKEY_name" ]; then ++ if [ -n "$OCF_RESKEY_CRM_meta_clone_node_max" ] && [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] ++ then ++ ocf_exit_reason "Cannot make plural clones from the same name parameter." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ if [ -n "$OCF_RESKEY_CRM_meta_master_node_max" ] && [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ] ++ then ++ ocf_exit_reason "Cannot make plural master from the same name parameter." ++ exit $OCF_ERR_CONFIGURED ++ fi ++ fi ++ : ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`} ++else ++ : ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}} ++fi ++ ++CONTAINER=$OCF_RESKEY_name ++ ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS;; ++start) ++ podman_validate ++ podman_start;; ++stop) podman_stop;; ++monitor) podman_monitor;; ++validate-all) podman_validate;; ++usage|help) podman_usage ++ exit $OCF_SUCCESS ++ ;; ++*) podman_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc diff --git a/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch new file mode 100644 index 0000000..850c318 --- /dev/null +++ b/SOURCES/bz1631291-systemd-tmpfiles-configurable-path.patch @@ -0,0 +1,48 @@ +From c70924b69af760ec3762b01594afb6ff82c3820c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 19 Sep 2018 16:13:43 +0200 +Subject: [PATCH] systemd-tmpfiles: configure path with --with-rsctmpdir + +--- + configure.ac | 3 ++- + systemd/resource-agents.conf | 1 - + systemd/resource-agents.conf.in | 1 + + 3 files changed, 3 insertions(+), 2 deletions(-) + delete mode 100644 systemd/resource-agents.conf + create mode 100644 systemd/resource-agents.conf.in + +diff --git a/configure.ac b/configure.ac +index b7ffb99f3..e34d125e9 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -431,7 +431,7 @@ AC_SUBST(HA_VARRUNDIR) + + # Expand $prefix + eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" +-AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resouce agents keep state files) ++AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files) + AC_SUBST(HA_RSCTMPDIR) + + dnl Eventually move out of the heartbeat dir tree and create symlinks when needed +@@ -911,6 +911,7 @@ heartbeat/Makefile \ + heartbeat/ocf-shellfuncs \ + heartbeat/shellfuncs \ + systemd/Makefile \ ++ systemd/resource-agents.conf \ + tools/Makefile \ + tools/ocf-tester \ + tools/ocft/Makefile \ +diff --git a/systemd/resource-agents.conf b/systemd/resource-agents.conf +deleted file mode 100644 +index 1cb129c18..000000000 +--- a/systemd/resource-agents.conf ++++ /dev/null +@@ -1 +0,0 @@ +-d /var/run/resource-agents/ 1755 root root +diff --git a/systemd/resource-agents.conf.in b/systemd/resource-agents.conf.in +new file mode 100644 +index 000000000..7bd157ec0 +--- /dev/null ++++ b/systemd/resource-agents.conf.in +@@ -0,0 +1 @@ ++d @HA_RSCTMPDIR@ 1755 root root diff --git a/SOURCES/bz1633251-gcp-pd-move-1.patch b/SOURCES/bz1633251-gcp-pd-move-1.patch new file mode 100644 index 0000000..c7cbe8e --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-1.patch @@ -0,0 +1,425 @@ +From dedf420b8aa7e7e64fa56eeda2d7aeb5b2a5fcd9 Mon Sep 17 00:00:00 2001 +From: Gustavo Serra Scalet +Date: Mon, 17 Sep 2018 12:29:51 -0300 +Subject: [PATCH] Add gcp-pd-move python script + +--- + configure.ac | 1 + + doc/man/Makefile.am | 1 + + heartbeat/Makefile.am | 1 + + heartbeat/gcp-pd-move.in | 370 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 373 insertions(+) + create mode 100755 heartbeat/gcp-pd-move.in + +diff --git a/configure.ac b/configure.ac +index 10f5314da..b7ffb99f3 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -958,6 +958,7 @@ AC_CONFIG_FILES([heartbeat/conntrackd], [chmod +x heartbeat/conntrackd]) + AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) ++AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index 0bef88740..0235c9af6 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -115,6 +115,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_fio.7 \ + ocf_heartbeat_galera.7 \ + ocf_heartbeat_garbd.7 \ ++ ocf_heartbeat_gcp-pd-move.7 \ + ocf_heartbeat_gcp-vpc-move-ip.7 \ + ocf_heartbeat_gcp-vpc-move-vip.7 \ + ocf_heartbeat_gcp-vpc-move-route.7 \ +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 993bff042..843186c98 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -111,6 +111,7 @@ ocf_SCRIPTS = AoEtarget \ + fio \ + galera \ + garbd \ ++ gcp-pd-move \ + gcp-vpc-move-ip \ + gcp-vpc-move-vip \ + gcp-vpc-move-route \ +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +new file mode 100755 +index 000000000..f9f6c3163 +--- /dev/null ++++ b/heartbeat/gcp-pd-move.in +@@ -0,0 +1,370 @@ ++#!@PYTHON@ -tt ++# - *- coding: utf- 8 - *- ++# ++# --------------------------------------------------------------------- ++# Copyright 2018 Google Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++# --------------------------------------------------------------------- ++# Description: Google Cloud Platform - Disk attach ++# --------------------------------------------------------------------- ++ ++import json ++import logging ++import os ++import re ++import sys ++import time ++ ++OCF_FUNCTIONS_DIR = "%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++sys.path.append(OCF_FUNCTIONS_DIR) ++ ++import ocf ++ ++try: ++ import googleapiclient.discovery ++except ImportError: ++ pass ++ ++if sys.version_info >= (3, 0): ++ # Python 3 imports. ++ import urllib.parse as urlparse ++ import urllib.request as urlrequest ++else: ++ # Python 2 imports. ++ import urllib as urlparse ++ import urllib2 as urlrequest ++ ++ ++CONN = None ++PROJECT = None ++ZONE = None ++REGION = None ++LIST_DISK_ATTACHED_INSTANCES = None ++INSTANCE_NAME = None ++ ++PARAMETERS = { ++ 'disk_name': None, ++ 'disk_scope': None, ++ 'disk_csek_file': None, ++ 'mode': None, ++ 'device_name': None, ++} ++ ++MANDATORY_PARAMETERS = ['disk_name', 'disk_scope'] ++ ++METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' ++METADATA_HEADERS = {'Metadata-Flavor': 'Google'} ++METADATA = ''' ++ ++ ++1.0 ++ ++Resource Agent that can attach or detach a regional/zonal disk on current GCP ++instance. ++Requirements : ++- Disk has to be properly created as regional/zonal in order to be used ++correctly. ++ ++Attach/Detach a persistent disk on current GCP instance ++ ++ ++The name of the GCP disk. ++Disk name ++ ++ ++ ++Disk scope ++Network name ++ ++ ++ ++Path to a Customer-Supplied Encryption Key (CSEK) key file ++Customer-Supplied Encryption Key file ++ ++ ++ ++Attachment mode (rw, ro) ++Attachment mode ++ ++ ++ ++An optional name that indicates the disk name the guest operating system will see. ++Optional device name ++ ++ ++ ++ ++ ++ ++ ++ ++ ++''' ++ ++ ++def get_metadata(metadata_key, params=None, timeout=None): ++ """Performs a GET request with the metadata headers. ++ ++ Args: ++ metadata_key: string, the metadata to perform a GET request on. ++ params: dictionary, the query parameters in the GET request. ++ timeout: int, timeout in seconds for metadata requests. ++ ++ Returns: ++ HTTP response from the GET request. ++ ++ Raises: ++ urlerror.HTTPError: raises when the GET request fails. ++ """ ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open(request, timeout=timeout * 1.1).read() ++ ++ ++def populate_vars(): ++ global CONN ++ global INSTANCE_NAME ++ global PROJECT ++ global ZONE ++ global REGION ++ global LIST_DISK_ATTACHED_INSTANCES ++ ++ global PARAMETERS ++ ++ # Populate global vars ++ try: ++ CONN = googleapiclient.discovery.build('compute', 'v1') ++ except Exception as e: ++ logger.error('Couldn\'t connect with google api: ' + str(e)) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ ++ for param in PARAMETERS: ++ value = os.environ.get('OCF_RESKEY_%s' % param, None) ++ if not value and param in MANDATORY_PARAMETERS: ++ logger.error('Missing %s mandatory parameter' % param) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ PARAMETERS[param] = value ++ ++ try: ++ INSTANCE_NAME = get_metadata('instance/name') ++ except Exception as e: ++ logger.error( ++ 'Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ sys.exit(ocf.OCF_ERR_CONFIGURED) ++ ++ PROJECT = get_metadata('project/project-id') ++ ZONE = get_metadata('instance/zone').split('/')[-1] ++ REGION = ZONE[:-2] ++ LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( ++ PARAMETERS['disk_name']) ++ ++ ++def configure_logs(): ++ # Prepare logging ++ global logger ++ logging.getLogger('googleapiclient').setLevel(logging.WARN) ++ logging_env = os.environ.get('OCF_RESKEY_stackdriver_logging') ++ if logging_env: ++ logging_env = logging_env.lower() ++ if any(x in logging_env for x in ['yes', 'true', 'enabled']): ++ try: ++ import google.cloud.logging.handlers ++ client = google.cloud.logging.Client() ++ handler = google.cloud.logging.handlers.CloudLoggingHandler( ++ client, name=INSTANCE_NAME) ++ handler.setLevel(logging.INFO) ++ formatter = logging.Formatter('gcp:alias "%(message)s"') ++ handler.setFormatter(formatter) ++ ocf.log.addHandler(handler) ++ logger = logging.LoggerAdapter( ++ ocf.log, {'OCF_RESOURCE_INSTANCE': ocf.OCF_RESOURCE_INSTANCE}) ++ except ImportError: ++ logger.error('Couldn\'t import google.cloud.logging, ' ++ 'disabling Stackdriver-logging support') ++ ++ ++def wait_for_operation(operation): ++ while True: ++ result = CONN.zoneOperations().get( ++ project=PROJECT, ++ zone=ZONE, ++ operation=operation['name']).execute() ++ ++ if result['status'] == 'DONE': ++ if 'error' in result: ++ raise Exception(result['error']) ++ return ++ time.sleep(1) ++ ++ ++def get_disk_attached_instances(disk): ++ def get_users_list(): ++ fl = 'name="%s"' % disk ++ request = CONN.disks().aggregatedList(project=PROJECT, filter=fl) ++ while request is not None: ++ response = request.execute() ++ locations = response.get('items', {}) ++ for location in locations.values(): ++ for d in location.get('disks', []): ++ if d['name'] == disk: ++ return d.get('users', []) ++ request = CONN.instances().aggregatedList_next( ++ previous_request=request, previous_response=response) ++ raise Exception("Unable to find disk %s" % disk) ++ ++ def get_only_instance_name(user): ++ return re.sub('.*/instances/', '', user) ++ ++ return map(get_only_instance_name, get_users_list()) ++ ++ ++def is_disk_attached(instance): ++ return instance in LIST_DISK_ATTACHED_INSTANCES ++ ++ ++def detach_disk(instance, disk_name): ++ # Python API misses disk-scope argument. ++ ++ # Detaching a disk is only possible by using deviceName, which is retrieved ++ # as a disk parameter when listing the instance information ++ request = CONN.instances().get( ++ project=PROJECT, zone=ZONE, instance=instance) ++ response = request.execute() ++ ++ device_name = None ++ for disk in response['disks']: ++ if disk_name in disk['source']: ++ device_name = disk['deviceName'] ++ break ++ ++ if not device_name: ++ logger.error("Didn't find %(d)s deviceName attached to %(i)s" % { ++ 'd': disk_name, ++ 'i': instance, ++ }) ++ return ++ ++ request = CONN.instances().detachDisk( ++ project=PROJECT, zone=ZONE, instance=instance, deviceName=device_name) ++ wait_for_operation(request.execute()) ++ ++ ++def attach_disk(instance, disk_name): ++ location = 'zones/%s' % ZONE ++ if PARAMETERS['disk_scope'] == 'regional': ++ location = 'regions/%s' % REGION ++ prefix = 'https://www.googleapis.com/compute/v1' ++ body = { ++ 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { ++ 'prefix': prefix, ++ 'project': PROJECT, ++ 'location': location, ++ 'disk': disk_name, ++ }, ++ } ++ ++ # Customer-Supplied Encryption Key (CSEK) ++ if PARAMETERS['disk_csek_file']: ++ with open(PARAMETERS['disk_csek_file']) as csek_file: ++ body['diskEncryptionKey'] = { ++ 'rawKey': csek_file.read(), ++ } ++ ++ if PARAMETERS['device_name']: ++ body['deviceName'] = PARAMETERS['device_name'] ++ ++ if PARAMETERS['mode']: ++ body['mode'] = PARAMETERS['mode'] ++ ++ force_attach = None ++ if PARAMETERS['disk_scope'] == 'regional': ++ # Python API misses disk-scope argument. ++ force_attach = True ++ else: ++ # If this disk is attached to some instance, detach it first. ++ for other_instance in LIST_DISK_ATTACHED_INSTANCES: ++ logger.info("Detaching disk %(disk_name)s from other instance %(i)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'i': other_instance, ++ }) ++ detach_disk(other_instance, PARAMETERS['disk_name']) ++ ++ request = CONN.instances().attachDisk( ++ project=PROJECT, zone=ZONE, instance=instance, body=body, ++ forceAttach=force_attach) ++ wait_for_operation(request.execute()) ++ ++ ++def fetch_data(): ++ configure_logs() ++ populate_vars() ++ ++ ++def gcp_pd_move_start(): ++ fetch_data() ++ if not is_disk_attached(INSTANCE_NAME): ++ logger.info("Attaching disk %(disk_name)s to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ attach_disk(INSTANCE_NAME, PARAMETERS['disk_name']) ++ ++ ++def gcp_pd_move_stop(): ++ fetch_data() ++ if is_disk_attached(INSTANCE_NAME): ++ logger.info("Detaching disk %(disk_name)s to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ detach_disk(INSTANCE_NAME, PARAMETERS['disk_name']) ++ ++ ++def gcp_pd_move_status(): ++ fetch_data() ++ if is_disk_attached(INSTANCE_NAME): ++ logger.info("Disk %(disk_name)s is correctly attached to %(instance)s" % { ++ 'disk_name': PARAMETERS['disk_name'], ++ 'instance': INSTANCE_NAME, ++ }) ++ else: ++ sys.exit(ocf.OCF_NOT_RUNNING) ++ ++ ++def main(): ++ if len(sys.argv) < 2: ++ logger.error('Missing argument') ++ return ++ ++ command = sys.argv[1] ++ if 'meta-data' in command: ++ print(METADATA) ++ return ++ ++ if command in 'start': ++ gcp_pd_move_start() ++ elif command in 'stop': ++ gcp_pd_move_stop() ++ elif command in ('monitor', 'status'): ++ gcp_pd_move_status() ++ else: ++ configure_logs() ++ logger.error('no such function %s' % str(command)) ++ ++ ++if __name__ == "__main__": ++ main() diff --git a/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch b/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch new file mode 100644 index 0000000..9a9681c --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch @@ -0,0 +1,18 @@ +commit cbe0e6507992b50afbaebc46dfaf8955cc02e5ec +Author: Oyvind Albrigtsen + + Python agents: use OCF_FUNCTIONS_DIR env variable when available + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index f9f6c316..c5007a43 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -25,7 +25,7 @@ import re + import sys + import time + +-OCF_FUNCTIONS_DIR = "%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + import ocf diff --git a/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch b/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch new file mode 100644 index 0000000..5819b94 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch @@ -0,0 +1,48 @@ +From 4fa41a1d7b4bee31526649c40cc4c58bc6333917 Mon Sep 17 00:00:00 2001 +From: masaki-tamura +Date: Wed, 2 Oct 2019 17:12:42 +0900 +Subject: [PATCH 1/2] add parameter stackdriver_logging + +--- + heartbeat/gcp-pd-move.in | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index c5007a43c..fac5c9744 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -102,6 +102,11 @@ correctly. + Optional device name + + ++ ++Use stackdriver_logging output to global resource (yes, true, enabled) ++Use stackdriver_logging ++ ++ + + + + +From f762ce3da00e1775587a04751a8828ba004fb534 Mon Sep 17 00:00:00 2001 +From: masaki-tamura +Date: Wed, 2 Oct 2019 17:44:30 +0900 +Subject: [PATCH 2/2] defautl no + +--- + heartbeat/gcp-pd-move.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index fac5c9744..7fabc80dc 100755 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -105,7 +105,7 @@ correctly. + + Use stackdriver_logging output to global resource (yes, true, enabled) + Use stackdriver_logging +- ++ + + + diff --git a/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch b/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch new file mode 100644 index 0000000..79e1bc0 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-4-fixes-and-improvements.patch @@ -0,0 +1,176 @@ +From 9dedf4d4ad3a94e4ce75e0f29ffdd018e3709ae3 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 28 May 2020 11:39:20 +0200 +Subject: [PATCH] gcp-pd-move: fixes and improvements + +- Fixed Python 3 encoding issue +- Improved metadata +- Change monitor loglevel to debug +- Removed "regional" functionality that doesnt work with attachDisk() +- Updated rw/ro to READ_WRITE/READ_ONLY in metadata/default value +--- + heartbeat/gcp-pd-move.in | 63 ++++++++++++++++++++-------------------- + 1 file changed, 32 insertions(+), 31 deletions(-) + mode change 100755 => 100644 heartbeat/gcp-pd-move.in + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +old mode 100755 +new mode 100644 +index 7fabc80dc..f82bd25e5 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -29,6 +29,7 @@ OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os. + sys.path.append(OCF_FUNCTIONS_DIR) + + import ocf ++from ocf import logger + + try: + import googleapiclient.discovery +@@ -48,16 +49,16 @@ else: + CONN = None + PROJECT = None + ZONE = None +-REGION = None + LIST_DISK_ATTACHED_INSTANCES = None + INSTANCE_NAME = None + + PARAMETERS = { +- 'disk_name': None, +- 'disk_scope': None, +- 'disk_csek_file': None, +- 'mode': None, +- 'device_name': None, ++ 'disk_name': '', ++ 'disk_scope': 'detect', ++ 'disk_csek_file': '', ++ 'mode': "READ_WRITE", ++ 'device_name': '', ++ 'stackdriver_logging': 'no', + } + + MANDATORY_PARAMETERS = ['disk_name', 'disk_scope'] +@@ -80,32 +81,32 @@ correctly. + + The name of the GCP disk. + Disk name +- ++ + +- +-Disk scope ++ ++Disk scope + Network name +- ++ + +- ++ + Path to a Customer-Supplied Encryption Key (CSEK) key file + Customer-Supplied Encryption Key file +- ++ + +- +-Attachment mode (rw, ro) ++ ++Attachment mode (READ_WRITE, READ_ONLY) + Attachment mode +- ++ + +- ++ + An optional name that indicates the disk name the guest operating system will see. + Optional device name +- ++ + +- ++ + Use stackdriver_logging output to global resource (yes, true, enabled) + Use stackdriver_logging +- ++ + + + +@@ -114,7 +115,9 @@ correctly. + + + +-''' ++'''.format(PARAMETERS['disk_name'], PARAMETERS['disk_scope'], ++ PARAMETERS['disk_csek_file'], PARAMETERS['mode'], PARAMETERS['device_name'], ++ PARAMETERS['stackdriver_logging']) + + + def get_metadata(metadata_key, params=None, timeout=None): +@@ -137,7 +140,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def populate_vars(): +@@ -145,11 +148,8 @@ def populate_vars(): + global INSTANCE_NAME + global PROJECT + global ZONE +- global REGION + global LIST_DISK_ATTACHED_INSTANCES + +- global PARAMETERS +- + # Populate global vars + try: + CONN = googleapiclient.discovery.build('compute', 'v1') +@@ -158,11 +158,12 @@ def populate_vars(): + sys.exit(ocf.OCF_ERR_CONFIGURED) + + for param in PARAMETERS: +- value = os.environ.get('OCF_RESKEY_%s' % param, None) ++ value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param]) + if not value and param in MANDATORY_PARAMETERS: + logger.error('Missing %s mandatory parameter' % param) + sys.exit(ocf.OCF_ERR_CONFIGURED) +- PARAMETERS[param] = value ++ elif value: ++ PARAMETERS[param] = value + + try: + INSTANCE_NAME = get_metadata('instance/name') +@@ -172,8 +173,10 @@ def populate_vars(): + sys.exit(ocf.OCF_ERR_CONFIGURED) + + PROJECT = get_metadata('project/project-id') +- ZONE = get_metadata('instance/zone').split('/')[-1] +- REGION = ZONE[:-2] ++ if PARAMETERS['disk_scope'] in ['detect', 'regional']: ++ ZONE = get_metadata('instance/zone').split('/')[-1] ++ else: ++ ZONE = PARAMETERS['disk_scope'] + LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( + PARAMETERS['disk_name']) + +@@ -270,8 +273,6 @@ def detach_disk(instance, disk_name): + + def attach_disk(instance, disk_name): + location = 'zones/%s' % ZONE +- if PARAMETERS['disk_scope'] == 'regional': +- location = 'regions/%s' % REGION + prefix = 'https://www.googleapis.com/compute/v1' + body = { + 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { +@@ -342,7 +343,7 @@ def gcp_pd_move_stop(): + def gcp_pd_move_status(): + fetch_data() + if is_disk_attached(INSTANCE_NAME): +- logger.info("Disk %(disk_name)s is correctly attached to %(instance)s" % { ++ logger.debug("Disk %(disk_name)s is correctly attached to %(instance)s" % { + 'disk_name': PARAMETERS['disk_name'], + 'instance': INSTANCE_NAME, + }) diff --git a/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch b/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch new file mode 100644 index 0000000..6d6b244 --- /dev/null +++ b/SOURCES/bz1633251-gcp-pd-move-5-bundle.patch @@ -0,0 +1,10 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/gcp-pd-move.in 2020-05-28 14:46:28.396220588 +0200 ++++ /home/oalbrigt/src/resource-agents/gcp-pd-move.rhel8 2020-05-28 14:16:25.845308597 +0200 +@@ -32,6 +32,7 @@ + from ocf import logger + + try: ++ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp/google-cloud-sdk/lib/third_party') + import googleapiclient.discovery + except ImportError: + pass diff --git a/SOURCES/bz1635785-redis-pidof-basename.patch b/SOURCES/bz1635785-redis-pidof-basename.patch new file mode 100644 index 0000000..32c57eb --- /dev/null +++ b/SOURCES/bz1635785-redis-pidof-basename.patch @@ -0,0 +1,61 @@ +From 2462caf264c487810805c40a546a4dc3f953c340 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 3 Oct 2018 18:07:31 +0200 +Subject: [PATCH] Do not use the absolute path in redis' pidof calls + +The reason for this is that newer kernels (we saw this on a 4.18 kernel) +can limit access to /proc//{cwd,exe,root} and so pidof will fail to +identify the process when using the full path names. +This access limitation happens even with the root user: +()[root@ra1 /]$ ls -l /proc/32/ |grep redis-server +ls: cannot read symbolic link '/proc/32/cwd': Permission denied +ls: cannot read symbolic link '/proc/32/root': Permission denied +ls: cannot read symbolic link '/proc/32/exe': Permission denied + +For this reason the 'pidof /usr/bin/redis-server' calls will fail +when running inside containers that have this kernel protection +mechanism. + +We tested this change and successfuly obtained a running redis cluster: + podman container set: redis-bundle [192.168.222.1:5000/redis:latest] + Replica[0] + redis-bundle-podman-0 (ocf::heartbeat:podman): Started ra1 + redis-bundle-0 (ocf::pacemaker:remote): Started ra1 + redis (ocf::heartbeat:redis): Master redis-bundle-0 + Replica[1] + redis-bundle-podman-1 (ocf::heartbeat:podman): Started ra2 + redis-bundle-1 (ocf::pacemaker:remote): Started ra2 + redis (ocf::heartbeat:redis): Slave redis-bundle-1 + Replica[2] + redis-bundle-podman-2 (ocf::heartbeat:podman): Started ra3 + redis-bundle-2 (ocf::pacemaker:remote): Started ra3 + redis (ocf::heartbeat:redis): Slave redis-bundle-2 + +Signed-off-By: Damien Ciabrini +Signed-off-by: Michele Baldessari +--- + heartbeat/redis.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index ddc62d8a7..1dff067e9 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -316,7 +316,7 @@ simple_status() { + fi + + pid="$(<"$REDIS_PIDFILE")" +- pidof "$REDIS_SERVER" | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING ++ pidof $(basename "$REDIS_SERVER") | grep -q "\<$pid\>" || return $OCF_NOT_RUNNING + + ocf_log debug "monitor: redis-server running under pid $pid" + +@@ -465,7 +465,7 @@ redis_start() { + break + elif (( info[loading] == 1 )); then + sleep "${info[loading_eta_seconds]}" +- elif pidof "$REDIS_SERVER" >/dev/null; then ++ elif pidof $(basename "$REDIS_SERVER") >/dev/null; then + # unknown error, but the process still exists. + # This check is mainly because redis daemonizes before it starts listening, causing `redis-cli` to fail + # See https://github.com/antirez/redis/issues/2368 diff --git a/SOURCES/bz1640587-pgsql-ignore-masters-re-promote.patch b/SOURCES/bz1640587-pgsql-ignore-masters-re-promote.patch new file mode 100644 index 0000000..b371857 --- /dev/null +++ b/SOURCES/bz1640587-pgsql-ignore-masters-re-promote.patch @@ -0,0 +1,40 @@ +From 355cd29f2dee828bfe0a4ab64f425827aba7dd3b Mon Sep 17 00:00:00 2001 +From: Hideo Yamauchi +Date: Wed, 17 Oct 2018 09:54:37 +0900 +Subject: [PATCH] Mid: pgsql: Fix to ignore Master's re-promote. + +--- + heartbeat/pgsql | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 380866da1..38f6ceeb7 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -680,6 +680,7 @@ pgsql_start() { + + #pgsql_promote: Promote PostgreSQL + pgsql_promote() { ++ local output + local target + local rc + +@@ -687,6 +688,18 @@ pgsql_promote() { + ocf_exit_reason "Not in a replication mode." + return $OCF_ERR_CONFIGURED + fi ++ ++ output=`exec_sql "${CHECK_MS_SQL}"` ++ if [ $? -ne 0 ]; then ++ report_psql_error $rc $loglevel "Can't get PostgreSQL recovery status on promote." ++ return $OCF_ERR_GENERIC ++ fi ++ ++ if [ "$output" = "f" ]; then ++ ocf_log info "PostgreSQL is alredy Master. Don't execute promote." ++ return $OCF_SUCCESS ++ fi ++ + rm -f ${XLOG_NOTE_FILE}.* + + for target in $NODE_LIST; do diff --git a/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch new file mode 100644 index 0000000..838a8aa --- /dev/null +++ b/SOURCES/bz1642027-nfsserver-var-lib-nfs-fix.patch @@ -0,0 +1,43 @@ +diff -uNr a/heartbeat/nfsserver b/heartbeat/nfsserver +--- a/heartbeat/nfsserver 2018-10-10 17:02:47.873199077 +0200 ++++ b/heartbeat/nfsserver 2018-10-11 15:24:41.782048475 +0200 +@@ -402,7 +402,6 @@ + return + fi + +- [ -d "$fp" ] || mkdir -p $fp + [ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir + [ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery + +@@ -437,10 +436,21 @@ + return + fi + ++ [ -d "$fp" ] || mkdir -p $fp ++ + if is_bound /var/lib/nfs; then + ocf_log debug "$fp is already bound to /var/lib/nfs" + return 0 + fi ++ ++ case $EXEC_MODE in ++ [23]) if nfs_exec status var-lib-nfs-rpc_pipefs.mount > /dev/null 2>&1; then ++ ocf_log debug "/var/lib/nfs/rpc_pipefs already mounted. Unmounting in preparation to bind mount nfs dir" ++ systemctl stop var-lib-nfs-rpc_pipefs.mount ++ fi ++ ;; ++ esac ++ + mount --bind $fp /var/lib/nfs + [ $SELINUX_ENABLED -eq 0 ] && restorecon /var/lib/nfs + } +@@ -612,8 +622,8 @@ + fi + + is_redhat_based && set_env_args +- prepare_directory + bind_tree ++ prepare_directory + + if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then + mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir diff --git a/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch new file mode 100644 index 0000000..0b7d485 --- /dev/null +++ b/SOURCES/bz1643307-LVM-activate-dont-fail-initial-probe.patch @@ -0,0 +1,24 @@ +From 848d62c32b355a03c2ad8d246eb3e34b04af07ca Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 9 Jan 2019 16:49:41 +0100 +Subject: [PATCH] LVM-activate: dont fail initial probe + +--- + heartbeat/LVM-activate | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..49ab717a3 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -323,6 +323,10 @@ lvmlockd_check() + + # Good: lvmlockd is running, and clvmd is not running + if ! pgrep lvmlockd >/dev/null 2>&1 ; then ++ if ocf_is_probe; then ++ exit $OCF_NOT_RUNNING ++ fi ++ + ocf_exit_reason "lvmlockd daemon is not running!" + exit $OCF_ERR_CONFIGURED + fi diff --git a/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch new file mode 100644 index 0000000..5b975a1 --- /dev/null +++ b/SOURCES/bz1658664-LVM-activate-dont-require-locking_type.patch @@ -0,0 +1,27 @@ +From 4f122cd0cf46c1fdc1badb22049607a6abf0c885 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 4 Feb 2019 17:04:59 +0100 +Subject: [PATCH] LVM-activate: only check locking_type when LVM < v2.03 + +--- + heartbeat/LVM-activate | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index c2239d881..3c462c75c 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -311,7 +311,12 @@ config_verify() + lvmlockd_check() + { + config_verify "global/use_lvmlockd" "1" +- config_verify "global/locking_type" "1" ++ ++ # locking_type was removed from config in v2.03 ++ ocf_version_cmp "$(lvmconfig --version | awk '/LVM ver/ {sub(/\(.*/, "", $3); print $3}')" "2.03" ++ if [ "$?" -eq 0 ]; then ++ config_verify "global/locking_type" "1" ++ fi + + # We recommend to activate one LV at a time so that this specific volume + # binds to a proper filesystem to protect the data diff --git a/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch new file mode 100644 index 0000000..58b13ce --- /dev/null +++ b/SOURCES/bz1662466-vdo-vol-fix-monitor-action.patch @@ -0,0 +1,12 @@ +diff -uNr a/heartbeat/vdo-vol b/heartbeat/vdo-vol +--- a/heartbeat/vdo-vol 2018-11-07 09:11:23.037835110 +0100 ++++ b/heartbeat/vdo-vol 2018-11-07 09:12:41.322373901 +0100 +@@ -145,7 +145,7 @@ + + vdo_monitor(){ + status=$(vdo status $OPTIONS 2>&1) +- MODE=$(vdostats vdo_vol --verbose | grep "operating mode" | awk '{print $NF}') ++ MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}') + + case "$status" in + *"Device mapper status: not available"*) diff --git a/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch new file mode 100644 index 0000000..571196b --- /dev/null +++ b/SOURCES/bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch @@ -0,0 +1,59 @@ +From b42ef7555de86cc29d165ae17682c223bfb23b6e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 5 Nov 2018 16:38:01 +0100 +Subject: [PATCH 1/2] tomcat: use systemd on RHEL when catalina.sh is + unavailable + +--- + heartbeat/tomcat | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/tomcat b/heartbeat/tomcat +index 4812a0133..833870038 100755 +--- a/heartbeat/tomcat ++++ b/heartbeat/tomcat +@@ -613,7 +613,6 @@ TOMCAT_NAME="${OCF_RESKEY_tomcat_name-tomcat}" + TOMCAT_CONSOLE="${OCF_RESKEY_script_log-/var/log/$TOMCAT_NAME.log}" + RESOURCE_TOMCAT_USER="${OCF_RESKEY_tomcat_user-root}" + RESOURCE_STATUSURL="${OCF_RESKEY_statusurl-http://127.0.0.1:8080}" +-OCF_RESKEY_force_systemd_default=0 + + JAVA_HOME="${OCF_RESKEY_java_home}" + JAVA_OPTS="${OCF_RESKEY_java_opts}" +@@ -630,6 +629,13 @@ if [ -z "$CATALINA_PID" ]; then + CATALINA_PID="${HA_RSCTMP}/${TOMCAT_NAME}_tomcatstate/catalina.pid" + fi + ++# Only default to true for RedHat systems without catalina.sh ++if [ -e "$CATALINA_HOME/bin/catalina.sh" ] || ! is_redhat_based; then ++ OCF_RESKEY_force_systemd_default=0 ++else ++ OCF_RESKEY_force_systemd_default=1 ++fi ++ + MAX_STOP_TIME="${OCF_RESKEY_max_stop_time}" + + : ${OCF_RESKEY_force_systemd=${OCF_RESKEY_force_systemd_default}} + +From 9cb2b142a9ecb3a2d5a51cdd51b4005f08b9a97b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 5 Nov 2018 17:09:43 +0100 +Subject: [PATCH 2/2] ocf-distro: add regex for RedHat version + +--- + heartbeat/ocf-distro | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-distro b/heartbeat/ocf-distro +index 530ee57ed..f69910c98 100644 +--- a/heartbeat/ocf-distro ++++ b/heartbeat/ocf-distro +@@ -39,7 +39,7 @@ get_os_ver() { + VER=$(cat $_DEBIAN_VERSION_FILE) + elif [ -f $_REDHAT_RELEASE_FILE ]; then + OS=RedHat # redhat or similar +- VER= # here some complex sed script ++ VER=$(sed "s/.* release \([^ ]\+\).*/\1/" $_REDHAT_RELEASE_FILE) + else + OS=$(uname -s) + VER=$(uname -r) diff --git a/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch new file mode 100644 index 0000000..af1974c --- /dev/null +++ b/SOURCES/bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch @@ -0,0 +1,23 @@ +From 13511f843b2b0fa1b8b306beac041e0855be05a6 Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Tue, 15 Jan 2019 15:45:03 +0100 +Subject: [PATCH] LVM-activate: make vgname not uniqe + +If activating one lvname at a time, vgname will not be unique. +--- + heartbeat/LVM-activate | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..bc448c9c1 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -102,7 +102,7 @@ because some DLM lockspaces might be in use and cannot be closed automatically. + This agent activates/deactivates logical volumes. + + +- ++ + + The volume group name. + diff --git a/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch new file mode 100644 index 0000000..5911e0e --- /dev/null +++ b/SOURCES/bz1667414-2-LVM-activate-only-count-volumes.patch @@ -0,0 +1,29 @@ +From ee9a47f97dd8b0cb51033db7879a79588aab409c Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Tue, 15 Jan 2019 15:40:01 +0100 +Subject: [PATCH] LVM-activate: fix dmsetup check + +When there are no devices in the system dmsetup outputs one line: + + # dmsetup info -c + No devices found +--- + heartbeat/LVM-activate | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index f46932c1c..c3225e1cb 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -715,9 +715,9 @@ lvm_status() { + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | wc -l ) ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found') + else +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" 2>/dev/null | wc -l ) ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found') + fi + + if [ $dm_count -eq 0 ]; then diff --git a/SOURCES/bz1669140-Route-make-family-parameter-optional.patch b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch new file mode 100644 index 0000000..81ab09d --- /dev/null +++ b/SOURCES/bz1669140-Route-make-family-parameter-optional.patch @@ -0,0 +1,31 @@ +From d95765aba205ea59dcb99378bed4c6d0593ebdb4 Mon Sep 17 00:00:00 2001 +From: fpicot +Date: Fri, 11 Jan 2019 11:38:18 -0500 +Subject: [PATCH] Route: make family parameter optional + +--- + heartbeat/Route | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index 67bdf6bfc..2da58bce1 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -124,7 +124,7 @@ The routing table to be configured for the route. + + + +- ++ + + The address family to be used for the route + ip4 IP version 4 +@@ -132,7 +132,7 @@ ip6 IP version 6 + detect Detect from 'destination' address. + + Address Family +- ++ + + + diff --git a/SOURCES/bz1683548-redis-mute-password-warning.patch b/SOURCES/bz1683548-redis-mute-password-warning.patch new file mode 100644 index 0000000..b3b89e0 --- /dev/null +++ b/SOURCES/bz1683548-redis-mute-password-warning.patch @@ -0,0 +1,62 @@ +From 6303448af77d2ed64c7436a84b30cf7fa4941e19 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 30 Jan 2019 21:36:17 +0100 +Subject: [PATCH] redis: Filter warning from stderr when calling 'redis-cli -a' + +In some versions of redis (starting with 4.0.10) we have commits [1] and +[2] which add a warning on stderr which will be printed out every single +time a monitor operation takes place: + + foo pacemaker-remoted[57563]: notice: redis_monitor_20000:1930:stderr + [ Warning: Using a password with '-a' option on the command line interface may not be safe. ] + +Later on commit [3] (merged with 5.0rc4) was merged which added the option +'--no-auth-warning' to disable said warning since it broke a bunch of +scripts [4]. I tried to forcibly either try the command twice (first +with --no-auth-warning and then without in case of errors) but it is +impossible to distinguish between error due to missing param and other +errors. + +So instead of inspecting the version of the redis-cli tool and do the following: +- >= 5.0.0 use --no-auth-warning all the time +- >= 4.0.10 & < 5.0.0 filter the problematic line from stderr only +- else do it like before + +We simply filter out from stderr the 'Using a password' message +unconditionally while making sure we keep stdout just the same. + +Tested on a redis 4.0.10 cluster and confirmed that it is working as +intended. + +All this horror and pain is due to the fact that redis does not support +any other means to pass a password (we could in theory first connect to +the server and then issue an AUTH command, but that seems even more +complex and error prone). See [5] for more info (or [6] for extra fun) + +[1] https://github.com/antirez/redis/commit/c082221aefbb2a472c7193dbdbb90900256ce1a2 +[2] https://github.com/antirez/redis/commit/ef931ef93e909b4f504e8c6fbed350ed70c1c67c +[3] https://github.com/antirez/redis/commit/a4ef94d2f71a32f73ce4ebf154580307a144b48f +[4] https://github.com/antirez/redis/issues/5073 +[5] https://github.com/antirez/redis/issues/3483 +[6] https://github.com/antirez/redis/pull/2413 + +Signed-off-by: Michele Baldessari +--- + heartbeat/redis.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 1dff067e9..e257bcc5e 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -302,7 +302,9 @@ set_score() + redis_client() { + ocf_log debug "redis_client: '$REDIS_CLIENT' -s '$REDIS_SOCKET' $*" + if [ -n "$clientpasswd" ]; then +- "$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" | sed 's/\r//' ++ # Starting with 4.0.10 there is a warning on stderr when using a pass ++ # Once we stop supporting versions < 5.0.0 we can add --no-auth-warning here ++ ("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//' + else + "$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//' + fi diff --git a/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch new file mode 100644 index 0000000..1ebb942 --- /dev/null +++ b/SOURCES/bz1689184-Squid-1-fix-pidfile-issue.patch @@ -0,0 +1,70 @@ +From d228d41c61f57f2576dd87aa7be86f9ca26e3059 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 18 Mar 2019 16:03:14 +0100 +Subject: [PATCH] Squid: fix pid file issue due to new Squid version saving the + PID of the parent process instead of the listener child process + +--- + heartbeat/Squid.in | 21 +++++---------------- + 1 file changed, 5 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in +index a99892d75..0b3c8ea86 100644 +--- a/heartbeat/Squid.in ++++ b/heartbeat/Squid.in +@@ -96,12 +96,9 @@ for a squid instance managed by this RA. + + + +- +- +-This is a required parameter. This parameter specifies a process id file +-for a squid instance managed by this RA. +- +-Pidfile ++ ++Deprecated - do not use anymore ++deprecated - do not use anymore + + + +@@ -175,8 +172,8 @@ get_pids() + # Seek by pattern + SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN") + +- # Seek by pidfile +- SQUID_PIDS[1]=$(awk '1{print $1}' $SQUID_PIDFILE 2>/dev/null) ++ # Seek by child process ++ SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) + + if [[ -n "${SQUID_PIDS[1]}" ]]; then + typeset exe +@@ -306,7 +303,6 @@ stop_squid() + while true; do + get_pids + if is_squid_dead; then +- rm -f $SQUID_PIDFILE + return $OCF_SUCCESS + fi + (( lapse_sec = lapse_sec + 1 )) +@@ -326,7 +322,6 @@ stop_squid() + kill -KILL ${SQUID_PIDS[0]} ${SQUID_PIDS[2]} + sleep 1 + if is_squid_dead; then +- rm -f $SQUID_PIDFILE + return $OCF_SUCCESS + fi + done +@@ -389,12 +384,6 @@ if [[ ! -x "$SQUID_EXE" ]]; then + exit $OCF_ERR_CONFIGURED + fi + +-SQUID_PIDFILE="${OCF_RESKEY_squid_pidfile}" +-if [[ -z "$SQUID_PIDFILE" ]]; then +- ocf_exit_reason "SQUID_PIDFILE is not defined" +- exit $OCF_ERR_CONFIGURED +-fi +- + SQUID_PORT="${OCF_RESKEY_squid_port}" + if [[ -z "$SQUID_PORT" ]]; then + ocf_exit_reason "SQUID_PORT is not defined" diff --git a/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch new file mode 100644 index 0000000..bb6a894 --- /dev/null +++ b/SOURCES/bz1689184-Squid-2-dont-run-pgrep-without-PID.patch @@ -0,0 +1,24 @@ +From e370845f41d39d93f76fa34502d62e2513d5eb73 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 May 2019 14:07:46 +0200 +Subject: [PATCH] Squid: dont run pgrep -P without PID + +--- + heartbeat/Squid.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in +index 0b3c8ea86..e62e7ee66 100644 +--- a/heartbeat/Squid.in ++++ b/heartbeat/Squid.in +@@ -173,7 +173,9 @@ get_pids() + SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN") + + # Seek by child process +- SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) ++ if [[ -n "${SQUID_PIDS[0]}" ]]; then ++ SQUID_PIDS[1]=$(pgrep -P ${SQUID_PIDS[0]}) ++ fi + + if [[ -n "${SQUID_PIDS[1]}" ]]; then + typeset exe diff --git a/SOURCES/bz1691456-gcloud-dont-detect-python2.patch b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch new file mode 100644 index 0000000..9abbd09 --- /dev/null +++ b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch @@ -0,0 +1,29 @@ +diff -uNr a/bundled/gcp/google-cloud-sdk/bin/gcloud b/bundled/gcp/google-cloud-sdk/bin/gcloud +--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 2019-04-04 12:01:28.838027640 +0200 ++++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2019-04-04 12:03:21.577089065 +0200 +@@ -74,24 +74,7 @@ + + # if CLOUDSDK_PYTHON is empty + if [ -z "$CLOUDSDK_PYTHON" ]; then +- # if python2 exists then plain python may point to a version != 2 +- if _cloudsdk_which python2 >/dev/null; then +- CLOUDSDK_PYTHON=python2 +- elif _cloudsdk_which python2.7 >/dev/null; then +- # this is what some OS X versions call their built-in Python +- CLOUDSDK_PYTHON=python2.7 +- elif _cloudsdk_which python >/dev/null; then +- # Use unversioned python if it exists. +- CLOUDSDK_PYTHON=python +- elif _cloudsdk_which python3 >/dev/null; then +- # We support python3, but only want to default to it if nothing else is +- # found. +- CLOUDSDK_PYTHON=python3 +- else +- # This won't work because it wasn't found above, but at this point this +- # is our best guess for the error message. +- CLOUDSDK_PYTHON=python +- fi ++ CLOUDSDK_PYTHON="/usr/libexec/platform-python" + fi + + # $PYTHONHOME can interfere with gcloud. Users should use diff --git a/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch b/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch new file mode 100644 index 0000000..d50b231 --- /dev/null +++ b/SOURCES/bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch @@ -0,0 +1,31 @@ +From 9273b83edf6ee72a59511f307e168813ca3d31fd Mon Sep 17 00:00:00 2001 +From: colttt +Date: Fri, 12 Oct 2018 15:29:48 +0200 +Subject: [PATCH] possible fix for #1026 + +add an if-condition and remove an useless 'targetcli create' +--- + heartbeat/iSCSITarget.in | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/iSCSITarget.in b/heartbeat/iSCSITarget.in +index e49a79016..9128fdc55 100644 +--- a/heartbeat/iSCSITarget.in ++++ b/heartbeat/iSCSITarget.in +@@ -340,13 +340,13 @@ iSCSITarget_start() { + ocf_take_lock $TARGETLOCKFILE + ocf_release_lock_on_exit $TARGETLOCKFILE + ocf_run targetcli /iscsi set global auto_add_default_portal=false || exit $OCF_ERR_GENERIC +- ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC ++ if ! [ -d /sys/kernel/config/target/iscsi/${OCF_RESKEY_iqn} ] ; then ++ ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC ++ fi + for portal in ${OCF_RESKEY_portals}; do + if [ $portal != ${OCF_RESKEY_portals_default} ] ; then + IFS=':' read -a sep_portal <<< "$portal" + ocf_run targetcli /iscsi/${OCF_RESKEY_iqn}/tpg1/portals create "${sep_portal[0]}" "${sep_portal[1]}" || exit $OCF_ERR_GENERIC +- else +- ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC + fi + done + # in lio, we can set target parameters by manipulating diff --git a/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch b/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch new file mode 100644 index 0000000..a349e46 --- /dev/null +++ b/SOURCES/bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch @@ -0,0 +1,24 @@ +From 0d53e80957a00016418080967892337b1b13f99d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 30 Jul 2019 11:23:07 +0200 +Subject: [PATCH] iSCSILogicalUnit: only create acls if it doesnt exist + +--- + heartbeat/iSCSILogicalUnit.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/iSCSILogicalUnit.in b/heartbeat/iSCSILogicalUnit.in +index 0fe85b593..02045d754 100644 +--- a/heartbeat/iSCSILogicalUnit.in ++++ b/heartbeat/iSCSILogicalUnit.in +@@ -420,8 +420,8 @@ iSCSILogicalUnit_start() { + + if [ -n "${OCF_RESKEY_allowed_initiators}" ]; then + for initiator in ${OCF_RESKEY_allowed_initiators}; do +- ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC +- ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC ++ [ -d "/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/acls" ] || ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC ++ [ -d "/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/acls/${initiator}" ] || ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC + done + fi + diff --git a/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch b/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch new file mode 100644 index 0000000..16f6caa --- /dev/null +++ b/SOURCES/bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch @@ -0,0 +1,93 @@ +From db6d12f4b7b10e214526512abe35307270f81c03 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 8 Aug 2019 14:48:13 +0200 +Subject: [PATCH] mysql/mariadb/galera: use runuser/su to avoid using SELinux + DAC_OVERRIDE + +--- + heartbeat/galera | 11 ++++++----- + heartbeat/mysql-common.sh | 16 ++++++++++++---- + 2 files changed, 18 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 9b9fe5569..056281fb8 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -624,8 +624,7 @@ detect_last_commit() + local recover_args="--defaults-file=$OCF_RESKEY_config \ + --pid-file=$OCF_RESKEY_pid \ + --socket=$OCF_RESKEY_socket \ +- --datadir=$OCF_RESKEY_datadir \ +- --user=$OCF_RESKEY_user" ++ --datadir=$OCF_RESKEY_datadir" + local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p' + local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p' + +@@ -654,7 +653,8 @@ detect_last_commit() + + ocf_log info "now attempting to detect last commit version using 'mysqld_safe --wsrep-recover'" + +- ${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null" + + last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" + if [ -z "$last_commit" ]; then +@@ -670,8 +670,9 @@ detect_last_commit() + # we can only rollback the transaction, but that's OK + # since the DB will get resynchronized anyway + ocf_log warn "local node <${NODENAME}> was not shutdown properly. Rollback stuck transaction with --tc-heuristic-recover" +- ${OCF_RESKEY_binary} $recover_args --wsrep-recover \ +- --tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} $recover_args --wsrep-recover \ ++ --tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null" + + last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" + if [ ! -z "$last_commit" ]; then +diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh +index d5ac972cd..65db9bf85 100755 +--- a/heartbeat/mysql-common.sh ++++ b/heartbeat/mysql-common.sh +@@ -2,6 +2,13 @@ + + ####################################################################### + ++# Use runuser if available for SELinux. ++if [ -x /sbin/runuser ]; then ++ SU=runuser ++else ++ SU=su ++fi ++ + # Attempt to detect a default binary + OCF_RESKEY_binary_default=$(which mysqld_safe 2> /dev/null) + if [ "$OCF_RESKEY_binary_default" = "" ]; then +@@ -207,7 +214,7 @@ mysql_common_prepare_dirs() + # already existed, check whether it is writable by the configured + # user + for dir in $pid_dir $socket_dir; do +- if ! su -s /bin/sh - $OCF_RESKEY_user -c "test -w $dir"; then ++ if ! $SU -s /bin/sh - $OCF_RESKEY_user -c "test -w $dir"; then + ocf_exit_reason "Directory $dir is not writable by $OCF_RESKEY_user" + exit $OCF_ERR_PERM; + fi +@@ -219,14 +226,15 @@ mysql_common_start() + local mysql_extra_params="$1" + local pid + +- ${OCF_RESKEY_binary} --defaults-file=$OCF_RESKEY_config \ ++ $SU - $OCF_RESKEY_user -s /bin/sh -c \ ++ "${OCF_RESKEY_binary} --defaults-file=$OCF_RESKEY_config \ + --pid-file=$OCF_RESKEY_pid \ + --socket=$OCF_RESKEY_socket \ + --datadir=$OCF_RESKEY_datadir \ + --log-error=$OCF_RESKEY_log \ +- --user=$OCF_RESKEY_user $OCF_RESKEY_additional_parameters \ ++ $OCF_RESKEY_additional_parameters \ + $mysql_extra_params >/dev/null 2>&1 & +- pid=$! ++ pid=$!" + + # Spin waiting for the server to come up. + # Let the CRM/LRM time us out if required. diff --git a/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch new file mode 100644 index 0000000..8899055 --- /dev/null +++ b/SOURCES/bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch @@ -0,0 +1,104 @@ +From 57f695d336cab33c61e754e463654ad6400f7b58 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Tue, 27 Nov 2018 17:06:05 +0000 +Subject: [PATCH 1/4] Enable --query flag in DescribeRouteTable API call to + avoid race condition with grep + +--- + heartbeat/aws-vpc-move-ip | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 9b2043aca..d2aed7490 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,9 +167,10 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table" ++ cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId'' + ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_ip | awk '{ print $3 }')" ++ ROUTE_TO_INSTANCE=$($cmd) ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + if [ -z "$ROUTE_TO_INSTANCE" ]; then + ROUTE_TO_INSTANCE="" + fi + +From 4d6371aca5dca35b902a480e07a08c1dc3373ca5 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 11:39:26 +0000 +Subject: [PATCH 2/4] aws-vpc-move-ip: Fixed outer quotes and removed inner + quotes + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index d2aed7490..ced69bd13 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,7 +167,7 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd=''$OCF_RESKEY_awscli' --profile '$OCF_RESKEY_profile' --output text ec2 describe-route-tables --route-table-ids '$OCF_RESKEY_routing_table' --query 'RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId'' ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE=$($cmd) + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + +From 09f4b061690a0e681aaf7314f1fc3e6f4e597cc8 Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 11:55:05 +0000 +Subject: [PATCH 3/4] aws-vpc-move-ip: Replaced indentation spaces with tabs + for consistency with the rest of the code + +--- + heartbeat/aws-vpc-move-ip | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index ced69bd13..3e827283e 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,10 +167,10 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" + ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE=$($cmd) +- ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" ++ ROUTE_TO_INSTANCE=$($cmd) ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" + if [ -z "$ROUTE_TO_INSTANCE" ]; then + ROUTE_TO_INSTANCE="" + fi + +From fcf85551ce70cb4fb7ce24e21c361fdbe6fcce6b Mon Sep 17 00:00:00 2001 +From: gguifelixamz +Date: Thu, 29 Nov 2018 13:07:32 +0000 +Subject: [PATCH 4/4] aws-vpc-move-ip: In cmd variable on ec2ip_monitor(): + replaced _address with _ip and modified to use single quotes + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3e827283e..331ee184f 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -167,7 +167,7 @@ ec2ip_validate() { + ec2ip_monitor() { + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock==\`$OCF_RESKEY_address/32\`].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE=$($cmd) + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" diff --git a/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch b/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch new file mode 100644 index 0000000..05e00fb --- /dev/null +++ b/SOURCES/bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch @@ -0,0 +1,82 @@ +From 4ee9a7026d7ed15b0b5cd26f06a21d04fc05d14e Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Mon, 1 Apr 2019 22:57:26 +0800 +Subject: [PATCH 1/2] LVM-activate: return OCF_NOT_RUNNING on initial probe + +In the use case of lvm on top of cluster md/raid. When the fenced node +rejoins to the cluster, Pacemaker will run the monitor action for the +probe operation. At that time, LVM PV and VG won't exist before cluster +md/raid get assembled, and the probe should return $OCF_NOT_RUNNING +instead of $OCF_ERR_CONFIGURED. + +Signed-off-by: Roger Zhou +--- + heartbeat/LVM-activate | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 3c462c75c..91ac05c34 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -329,6 +329,7 @@ lvmlockd_check() + # Good: lvmlockd is running, and clvmd is not running + if ! pgrep lvmlockd >/dev/null 2>&1 ; then + if ocf_is_probe; then ++ ocf_log info "initial probe: lvmlockd is not running yet." + exit $OCF_NOT_RUNNING + fi + +@@ -481,6 +482,11 @@ lvm_validate() { + exit $OCF_SUCCESS + fi + ++ if ocf_is_probe; then ++ ocf_log info "initial probe: VG [${VG}] is not found on any block device yet." ++ exit $OCF_NOT_RUNNING ++ fi ++ + ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!" + exit $OCF_ERR_CONFIGURED + fi + +From df2f58c400b1f6f239f9e1c1fdf6ce0875639b43 Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Mon, 1 Apr 2019 23:02:54 +0800 +Subject: [PATCH 2/2] LVM-activate: align dmsetup report command to standard + +Namely to change 'vgname/lvname' to 'vg_name/lv_name'. The dmsetup +report command follows lvm2 selection criteria field name standard. +- dmsetup v1.02.86 (lvm2 v2_02_107) - 23rd June 2014 + "Add dmsetup -S/--select to define selection criteria" +- dmsetup info -c -S help + +Signed-off-by: Roger Zhou +--- + heartbeat/LVM-activate | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 91ac05c34..730d9a09d 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -707,7 +707,7 @@ tagging_deactivate() { + # method: + # + # lv_count=$(vgs --foreign -o lv_count --noheadings ${VG} 2>/dev/null | tr -d '[:blank:]') +-# dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-") ++# dm_count=$(dmsetup --noheadings info -c -S "vg_name=${VG}" 2>/dev/null | grep -c "${VG}-") + # test $lv_count -eq $dm_count + # + # It works, but we cannot afford to use LVM command in lvm_status. LVM command is expensive +@@ -730,9 +730,9 @@ lvm_status() { + if [ -n "${LV}" ]; then + # dmsetup ls? It cannot accept device name. It's + # too heavy to list all DM devices. +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found') ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG} && lv_name=${LV}" | grep -c -v '^No devices found') + else +- dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found') ++ dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG}" | grep -c -v '^No devices found') + fi + + if [ $dm_count -eq 0 ]; then diff --git a/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch new file mode 100644 index 0000000..9ad4c1d --- /dev/null +++ b/SOURCES/bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch @@ -0,0 +1,46 @@ +From 17fe1dfeef1534b270e4765277cb8d7b42c4a9c4 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 5 Apr 2019 09:15:40 +0200 +Subject: [PATCH] gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding + issue + +--- + heartbeat/gcp-vpc-move-route.in | 2 +- + heartbeat/gcp-vpc-move-vip.in | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 591b97b1c..7dd47150d 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -193,7 +193,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def validate(ctx): +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index bd6cf86cd..953d61ed7 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -106,7 +106,7 @@ def get_metadata(metadata_key, params=None, timeout=None): + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -162,7 +162,7 @@ def get_alias(project, zone, instance): + + def get_localhost_alias(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) +- net_iface = json.loads(net_iface.decode('utf-8')) ++ net_iface = json.loads(net_iface) + try: + return net_iface[0]['ipAliases'][0] + except (KeyError, IndexError): diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch new file mode 100644 index 0000000..b724aa3 --- /dev/null +++ b/SOURCES/bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch @@ -0,0 +1,122 @@ +--- a/heartbeat/aws-vpc-move-ip 2019-05-20 10:54:01.527329668 +0200 ++++ b/heartbeat/aws-vpc-move-ip 2019-05-20 11:33:35.386089091 +0200 +@@ -93,11 +93,19 @@ + + + ++ ++ ++Deprecated IP address param. Use the ip param instead. ++ ++Deprecated VPC private IP Address ++ ++ ++ + + +-Name of the routing table, where the route for the IP address should be changed, i.e. rtb-... ++Name of the routing table(s), where the route for the IP address should be changed. If declaring multiple routing tables they should be separated by comma. Example: rtb-XXXXXXXX,rtb-YYYYYYYYY + +-routing table name ++routing table name(s) + + + +@@ -129,6 +137,13 @@ + END + } + ++ec2ip_set_address_param_compat(){ ++ # Include backward compatibility for the deprecated address parameter ++ if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then ++ OCF_RESKEY_ip="$OCF_RESKEY_address" ++ fi ++} ++ + ec2ip_validate() { + for cmd in aws ip curl; do + check_binary "$cmd" +@@ -150,20 +165,29 @@ + } + + ec2ip_monitor() { +- if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ]; then +- ocf_log info "monitor: check routing table (API call)" +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE=$($cmd) +- ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +- if [ -z "$ROUTE_TO_INSTANCE" ]; then +- ROUTE_TO_INSTANCE="" +- fi ++ MON_RES="" ++ if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then ++ for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do ++ ocf_log info "monitor: check routing table (API call) - $rtb" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ ocf_log debug "executing command: $cmd" ++ ROUTE_TO_INSTANCE="$($cmd)" ++ ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" ++ if [ -z "$ROUTE_TO_INSTANCE" ]; then ++ ROUTE_TO_INSTANCE="" ++ fi ++ ++ if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb" ++ MON_RES="$MON_RES $rtb" ++ fi ++ sleep 1 ++ done + +- if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ];then +- ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" ++ if [ ! -z "$MON_RES" ]; then + return $OCF_NOT_RUNNING + fi ++ + else + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi +@@ -195,19 +219,23 @@ + } + + ec2ip_get_and_configure() { +- # Adjusting the routing table +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile ec2 replace-route --route-table-id $OCF_RESKEY_routing_table --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" +- ocf_log debug "executing command: $cmd" +- $cmd +- rc=$? +- if [ "$rc" != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi ++ for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" ++ ocf_log debug "executing command: $cmd" ++ $cmd ++ rc=$? ++ if [ "$rc" != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done + + # Reconfigure the local ip address + ec2ip_drop +- ip addr add "${OCF_RESKEY_ip}/32" dev $OCF_RESKEY_interface ++ cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface" ++ ocf_log debug "executing command: $cmd" ++ $cmd + rc=$? + if [ $rc != 0 ]; then + ocf_log warn "command failed, rc: $rc" +@@ -289,6 +317,8 @@ + exit $OCF_ERR_PERM + fi + ++ec2ip_set_address_param_compat ++ + ec2ip_validate + + case $__OCF_ACTION in diff --git a/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch new file mode 100644 index 0000000..c283801 --- /dev/null +++ b/SOURCES/bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch @@ -0,0 +1,221 @@ +From 9f2b9cc09f7e2df163ff95585374f860f3dc58eb Mon Sep 17 00:00:00 2001 +From: Tomas Krojzl +Date: Tue, 16 Apr 2019 18:40:29 +0200 +Subject: [PATCH 1/6] Fix for VM having multiple network interfaces + +--- + heartbeat/aws-vpc-move-ip | 22 +++++++++++++++++++++- + 1 file changed, 21 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 090956434..a91c2dd11 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -219,8 +219,28 @@ ec2ip_drop() { + } + + ec2ip_get_and_configure() { ++ cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" ++ ocf_log debug "executing command: $cmd" ++ EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + ocf_log debug "executing command: $cmd" + $cmd + rc=$? + +From a871a463134ebb2456b5f37a343bf9034f5f4074 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Tue, 16 Apr 2019 18:49:32 +0200 +Subject: [PATCH 2/6] Fixing indentation + +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index a91c2dd11..a46d10d30 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -227,7 +227,7 @@ ec2ip_get_and_configure() { + ocf_log warn "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" + ocf_log debug "executing command: $cmd" +@@ -237,7 +237,7 @@ ec2ip_get_and_configure() { + ocf_log warn "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +- ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" + + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + +From 068680427dff620a948ae25f090bc154b02f17b9 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Wed, 17 Apr 2019 14:22:31 +0200 +Subject: [PATCH 3/6] Requested fix to avoid using AWS API + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index a46d10d30..2910552f2 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -229,7 +229,7 @@ ec2ip_get_and_configure() { + fi + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-instances --instance-ids $EC2_INSTANCE_ID --query 'Reservations[*].Instances[*].NetworkInterfaces[*].[NetworkInterfaceId,MacAddress]' | grep ${MAC_ADDR} | cut -f1" ++ cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + rc=$? + +From 207a2ba66ba7196180d27674aa204980fcd25de2 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:14:21 +0200 +Subject: [PATCH 4/6] More robust approach of getting MAC address + +--- + heartbeat/aws-vpc-move-ip | 29 +++++++++++++++++++++-------- + 1 file changed, 21 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 2910552f2..3a848b7e3 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -219,15 +219,28 @@ ec2ip_drop() { + } + + ec2ip_get_and_configure() { +- cmd="ip -br link show dev $OCF_RESKEY_interface | tr -s ' ' | cut -d' ' -f3" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC ++ MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" ++ if [ -f $MAC_FILE ]; then ++ cmd="cat ${MAC_FILE}" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ else ++ cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + + cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + +From cdcc12a9c1431125b0d5298176e5242bfc9fbe29 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:20:09 +0200 +Subject: [PATCH 5/6] Moving shared part outside if + +--- + heartbeat/aws-vpc-move-ip | 25 +++++++++---------------- + 1 file changed, 9 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3a848b7e3..bfe23e5bf 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -222,26 +222,19 @@ ec2ip_get_and_configure() { + MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" + if [ -f $MAC_FILE ]; then + cmd="cat ${MAC_FILE}" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + else + cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi +- ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + fi + ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ + cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + +From c3fc114fc64f6feb015c5342923fd2afc367ae28 Mon Sep 17 00:00:00 2001 +From: krojzl +Date: Fri, 19 Apr 2019 11:22:55 +0200 +Subject: [PATCH 6/6] Linting adjustment + +--- + heartbeat/aws-vpc-move-ip | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index bfe23e5bf..2757c27d0 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -225,7 +225,6 @@ ec2ip_get_and_configure() { + else + cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" + fi +- + ocf_log debug "executing command: $cmd" + MAC_ADDR="$(eval $cmd)" + rc=$? diff --git a/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch new file mode 100644 index 0000000..4de33f1 --- /dev/null +++ b/SOURCES/bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch @@ -0,0 +1,32 @@ +From aae26ca70ef910e83485778c1fb450941fe79e8a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Mon, 3 Dec 2018 16:48:14 +0100 +Subject: [PATCH] Do not log at debug log level when HA_debug is unset + +There might be situations (e.g. bundles) where the HA_debug variable +is unset. It makes little sense to enable debug logging when the HA_debug env +variable is unset. +So let's skip debug logs when HA_debug is set to 0 or is unset. + +Tested inside a bundle and observed that previously seen 'ocf_log debug' +calls are now correctly suppressed (w/ HA_debug being unset inside the +container) + +Signed-off-by: Michele Baldessari +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 043ab9bf2..b17297e1a 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -257,7 +257,7 @@ ha_log() + + ha_debug() { + +- if [ "x${HA_debug}" = "x0" ] ; then ++ if [ "x${HA_debug}" = "x0" ] || [ -z "${HA_debug}" ] ; then + return 0 + fi + if tty >/dev/null; then diff --git a/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch new file mode 100644 index 0000000..00e34b8 --- /dev/null +++ b/SOURCES/bz1707969-2-ocf_is_true-add-True-to-regexp.patch @@ -0,0 +1,22 @@ +From 73b35b74b743403aeebab43205475be6f2938cd5 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 12 Jun 2019 10:11:07 +0200 +Subject: [PATCH] ocf_is_true: add True to regexp + +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index b17297e1a..7a97558a5 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -101,7 +101,7 @@ ocf_is_decimal() { + + ocf_is_true() { + case "$1" in +- yes|true|1|YES|TRUE|ja|on|ON) true ;; ++ yes|true|1|YES|TRUE|True|ja|on|ON) true ;; + *) false ;; + esac + } diff --git a/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch new file mode 100644 index 0000000..9cf643e --- /dev/null +++ b/SOURCES/bz1717759-Filesystem-remove-notify-action-from-metadata.patch @@ -0,0 +1,21 @@ +From d1fc6920718284431a2c2cc28562498d6c8ea792 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 19 Jun 2019 11:12:33 +0200 +Subject: [PATCH] Filesystem: remove removed notify-action from metadata + +--- + heartbeat/Filesystem | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 780ba63a4..c46ec3cca 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -221,7 +221,6 @@ block if unresponsive nfs mounts are in use on the system. + + + +- + + + diff --git a/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch b/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch new file mode 100644 index 0000000..5aeada6 --- /dev/null +++ b/SOURCES/bz1718219-podman-1-avoid-double-inspect-call.patch @@ -0,0 +1,46 @@ +From d8400a30604229d349f36855c30a6a438204023b Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 11:29:17 +0200 +Subject: [PATCH] Avoid double call to podman inspect in podman_simple_status() + +Right now podman_simple_status() does the following: +- It calls container_exists() which then calls "podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1" +- Then it calls "podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null" + +This duplication is unnecessary and we can rely on the second podman inspect +call. We need to do this because podman inspect calls are very expensive as +soon as moderate I/O kicks in. + +Tested as follows: +1) Injected the change on an existing bundle-based cluster +2) Observed that monitoring operations kept working okay +3) Verified by adding set -x that only a single podman inspect per monitor + operation was called (as opposed to two before) +4) Restarted a bundle with an OCF resource inside correctly +5) Did a podman stop of a bundle and correctly observed that: +5.a) It was detected as non running: +* haproxy-bundle-podman-1_monitor_60000 on controller-0 'not running' (7): call=192, status=complete, exitreason='', + last-rc-change='Wed Jun 12 09:22:18 2019', queued=0ms, exec=0ms +5.b) It was correctly started afterwards + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 34e11da6b..b2b3081f9 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -238,11 +238,6 @@ podman_simple_status() + { + local val + +- container_exists +- if [ $? -ne 0 ]; then +- return $OCF_NOT_RUNNING +- fi +- + # retrieve the 'Running' attribute for the container + val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then diff --git a/SOURCES/bz1718219-podman-2-improve-monitor-action.patch b/SOURCES/bz1718219-podman-2-improve-monitor-action.patch new file mode 100644 index 0000000..1537139 --- /dev/null +++ b/SOURCES/bz1718219-podman-2-improve-monitor-action.patch @@ -0,0 +1,63 @@ +From 9685e8e6bf2896377a9cf0e07a85de5dd5fcf2df Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 12:00:31 +0200 +Subject: [PATCH] Simplify podman_monitor() + +Before this change podman_monitor() does two things: +\-> podman_simple_status() + \-> podman inspect {{.State.Running}} +\-> if podman_simple_status == 0 then monitor_cmd_exec() + \-> if [ -z "$OCF_RESKEY_monitor_cmd" ]; then # so if OCF_RESKEY_monitor_cmd is empty we just return SUCCESS + return $rc + fi + # if OCF_RESKEY_monitor_cmd is set to something we execute it + podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd + +Let's actually only rely on podman exec as invoked inside monitor_cmd_exec +when $OCF_RESKEY_monitor_cmd is non empty (which is the default as it is set to "/bin/true"). +When there is no monitor_cmd command defined then it makes sense to rely on podman inspect +calls container in podman_simple_status(). + +Tested as follows: +1) Injected the change on an existing bundle-based cluster +2) Observed that monitoring operations kept working okay +3) Restarted rabbitmq-bundle and galera-bundle successfully +4) Killed a container and we correctly detected the monitor failure +Jun 12 09:52:12 controller-0 pacemaker-controld[25747]: notice: controller-0-haproxy-bundle-podman-1_monitor_60000:230 [ ocf-exit-reason:monitor cmd failed (rc=125), output: cannot exec into container that is not running\n ] +5) Container correctly got restarted after the monitor failure: + haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 +6) Stopped and removed a container and pcmk detected it correctly: +Jun 12 09:55:15 controller-0 podman(haproxy-bundle-podman-1)[841411]: ERROR: monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container +Jun 12 09:55:15 controller-0 pacemaker-execd[25744]: notice: haproxy-bundle-podman-1_monitor_60000:841411:stderr [ ocf-exit-reason:monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container ] +7) pcmk was able to start the container that was stopped and removed: +Jun 12 09:55:16 controller-0 pacemaker-controld[25747]: notice: Result of start operation for haproxy-bundle-podman-1 on controller-0: 0 (ok) +8) Added 'set -x' to the RA and correctly observed that no 'podman inspect' has been invoked during monitoring operations + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 11 +++-------- + 1 file changed, 3 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index b2b3081f9..a9bd57dea 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -255,15 +255,10 @@ podman_simple_status() + + podman_monitor() + { +- local rc=0 +- +- podman_simple_status +- rc=$? +- +- if [ $rc -ne 0 ]; then +- return $rc ++ if [ -z "$OCF_RESKEY_monitor_cmd" ]; then ++ podman_simple_status ++ return $? + fi +- + monitor_cmd_exec + } + diff --git a/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch b/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch new file mode 100644 index 0000000..56f7302 --- /dev/null +++ b/SOURCES/bz1718219-podman-3-remove-docker-remnant.patch @@ -0,0 +1,34 @@ +From 69c5d35a7a5421d4728db824558007bbb91a9d4a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 12 Jun 2019 12:02:06 +0200 +Subject: [PATCH] Remove unneeded podman exec --help call + +There are no podman releases that do not have the exec argument, so +let's just drop this remnant that came from the docker RA. + +Signed-off-by: Michele Baldessari +--- + heartbeat/podman | 10 ++-------- + 1 file changed, 2 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index a9bd57dea..858023555 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -190,14 +190,8 @@ monitor_cmd_exec() + return $rc + fi + +- if podman exec --help >/dev/null 2>&1; then +- out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) +- rc=$? +- else +- out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) +- rc=$? +- fi +- ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? + if [ $rc -eq 127 ]; then + ocf_log err "monitor cmd failed (rc=$rc), output: $out" + ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." diff --git a/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch b/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch new file mode 100644 index 0000000..351207f --- /dev/null +++ b/SOURCES/bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch @@ -0,0 +1,161 @@ +From 6016283dfdcb45bf750f96715fc653a4c0904bca Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Fri, 28 Jun 2019 13:34:40 +0200 +Subject: [PATCH] podman: only use exec to manage container's lifecycle + +Under heavy IO load, podman may be impacted and take a long time +to execute some actions. If that takes more than the default +20s container monitoring timeout, containers will restart unexpectedly. + +Replace all IO-sensitive podman calls (inspect, exists...) by +equivalent "podman exec" calls, because the latter command seems +less prone to performance degradation under IO load. + +With this commit, the resource agent now requires podman 1.0.2+, +because it relies on of two different patches [1,2] that improve +IO performance and enable to distinguish "container stopped" +"container doesn't exist" error codes. + +Tested on an OpenStack environment with podman 1.0.2, with the +following scenario: + . regular start/stop/monitor operations + . probe operations (pcs resource cleanup/refresh) + . unmanage/manage operations + . reboot + +[1] https://github.com/containers/libpod/commit/90b835db69d589de559462d988cb3fae5cf1ef49 +[2] https://github.com/containers/libpod/commit/a19975f96d2ee7efe186d9aa0be42285cfafa3f4 +--- + heartbeat/podman | 75 ++++++++++++++++++++++++------------------------ + 1 file changed, 37 insertions(+), 38 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 51f6ba883..8fc2c4695 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -129,9 +129,6 @@ the health of the container. This command must return 0 to indicate that + the container is healthy. A non-zero return code will indicate that the + container has failed and should be recovered. + +-If 'podman exec' is supported, it is used to execute the command. If not, +-nsenter is used. +- + Note: Using this method for monitoring processes inside a container + is not recommended, as containerd tries to track processes running + inside the container and does not deal well with many short-lived +@@ -192,17 +189,13 @@ monitor_cmd_exec() + local rc=$OCF_SUCCESS + local out + +- if [ -z "$OCF_RESKEY_monitor_cmd" ]; then +- return $rc +- fi +- + out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) + rc=$? +- if [ $rc -eq 127 ]; then +- ocf_log err "monitor cmd failed (rc=$rc), output: $out" +- ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container." +- # there is no recovering from this, exit immediately +- exit $OCF_ERR_ARGS ++ # 125: no container with name or ID ${CONTAINER} found ++ # 126: container state improper (not running) ++ # 127: any other error ++ if [ $rc -eq 125 ] || [ $rc -eq 126 ]; then ++ rc=$OCF_NOT_RUNNING + elif [ $rc -ne 0 ]; then + ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" + rc=$OCF_ERR_GENERIC +@@ -215,7 +208,16 @@ monitor_cmd_exec() + + container_exists() + { +- podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++ local rc ++ local out ++ ++ out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) ++ rc=$? ++ # 125: no container with name or ID ${CONTAINER} found ++ if [ $rc -ne 125 ]; then ++ return 0 ++ fi ++ return 1 + } + + remove_container() +@@ -236,30 +238,30 @@ remove_container() + + podman_simple_status() + { +- local val +- +- # retrieve the 'Running' attribute for the container +- val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) +- if [ $? -ne 0 ]; then +- #not running as a result of container not being found +- return $OCF_NOT_RUNNING +- fi ++ local rc + +- if ocf_is_true "$val"; then +- # container exists and is running +- return $OCF_SUCCESS ++ # simple status is implemented via podman exec ++ # everything besides success is considered "not running" ++ monitor_cmd_exec ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ rc=$OCF_NOT_RUNNING; + fi +- +- return $OCF_NOT_RUNNING ++ return $rc + } + + podman_monitor() + { +- if [ -z "$OCF_RESKEY_monitor_cmd" ]; then +- podman_simple_status +- return $? +- fi ++ # We rely on running podman exec to monitor the container ++ # state because that command seems to be less prone to ++ # performance issue under IO load. ++ # ++ # For probes to work, we expect cmd_exec to be able to report ++ # when a container is not running. Here, we're not interested ++ # in distinguishing whether it's stopped or non existing ++ # (there's function container_exists for that) + monitor_cmd_exec ++ return $? + } + + podman_create_mounts() { +@@ -416,14 +418,6 @@ podman_validate() + exit $OCF_ERR_CONFIGURED + fi + +- if [ -n "$OCF_RESKEY_monitor_cmd" ]; then +- podman exec --help >/dev/null 2>&1 +- if [ ! $? ]; then +- ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified" +- check_binary nsenter +- fi +- fi +- + image_exists + if [ $? -ne 0 ]; then + ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found." +@@ -457,6 +451,11 @@ fi + + CONTAINER=$OCF_RESKEY_name + ++# Note: we currently monitor podman containers by with the "podman exec" ++# command, so make sure that invocation is always valid by enforcing the ++# exec command to be non-empty ++: ${OCF_RESKEY_monitor_cmd:=/bin/true} ++ + case $__OCF_ACTION in + meta-data) meta_data + exit $OCF_SUCCESS;; diff --git a/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch new file mode 100644 index 0000000..82a46c1 --- /dev/null +++ b/SOURCES/bz1719684-dhcpd-keep-SELinux-context-chroot.patch @@ -0,0 +1,28 @@ +From c8c073ed81884128b0b3955fb0b0bd23661044a2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 12 Jun 2019 12:45:08 +0200 +Subject: [PATCH] dhcpd: keep SELinux context + +--- + heartbeat/dhcpd | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/dhcpd b/heartbeat/dhcpd +index 8b2d8b695..46027b39b 100755 +--- a/heartbeat/dhcpd ++++ b/heartbeat/dhcpd +@@ -337,12 +337,12 @@ dhcpd_initialize_chroot() { + done | sort -u` + for i in $cplibs ; do + if [ -s "$i" ]; then +- cp -pL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" || ++ cp -aL "$i" "${OCF_RESKEY_chrooted_path}/$libdir/" || + { ocf_exit_reason "could not copy $i to chroot jail"; return $OCF_ERR_GENERIC; } + fi + done + +- return $OCF_SUCCESS ++ return $OCF_SUCCESS + } + + # Initialize a non-chroot environment diff --git a/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch b/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch new file mode 100644 index 0000000..fe1bcd8 --- /dev/null +++ b/SOURCES/bz1730455-LVM-activate-fix-monitor-hang.patch @@ -0,0 +1,22 @@ +From ef37f8a2461b5763f4510d51e08d27d8b1f76937 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 23 Jul 2019 15:47:17 +0200 +Subject: [PATCH] LVM-activate: fix monitor might hang due to lvm_validate + which was added by accident + +--- + heartbeat/LVM-activate | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 9c7c721bf..3df40c894 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -852,7 +852,6 @@ stop) + lvm_stop + ;; + monitor) +- lvm_validate + lvm_status + ;; + validate-all) diff --git a/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch b/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch new file mode 100644 index 0000000..cb13c0a --- /dev/null +++ b/SOURCES/bz1732867-CTDB-1-explicitly-use-bash-shell.patch @@ -0,0 +1,39 @@ +From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Fri, 27 Jul 2018 16:02:26 +0200 +Subject: [PATCH] CTDB: explicitly use bash shell + +Upcoming recovery lock substring processing is bash specific. + +Signed-off-by: David Disseldorp +--- + configure.ac | 1 + + heartbeat/{CTDB => CTDB.in} | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + rename heartbeat/{CTDB => CTDB.in} (99%) + +diff --git a/configure.ac b/configure.ac +index 039b4942c..10f5314da 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -978,6 +978,7 @@ AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd]) + AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE]) + AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng]) + AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd]) ++AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB]) + AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh]) + AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh]) + AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh]) +diff --git a/heartbeat/CTDB b/heartbeat/CTDB.in +similarity index 99% +rename from heartbeat/CTDB +rename to heartbeat/CTDB.in +index 28e58cea0..7d87a4ef7 100755 +--- a/heartbeat/CTDB ++++ b/heartbeat/CTDB.in +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!@BASH_SHELL@ + # + # OCF Resource Agent for managing CTDB + # diff --git a/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch b/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch new file mode 100644 index 0000000..c30bfee --- /dev/null +++ b/SOURCES/bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch @@ -0,0 +1,40 @@ +From 61f7cb5954d1727f58fab6d642a124ef342c8641 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 20 Feb 2019 11:24:28 +0100 +Subject: [PATCH] CTDB: add ctdb_max_open_files parameter + +--- + heartbeat/CTDB.in | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 0d58c850a..bbf8ef627 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -288,6 +288,14 @@ What debug level to run at (0-10). Higher means more verbose. + + + ++ ++ ++Maximum number of open files (for ulimit -n) ++ ++Max open files ++ ++ ++ + + + Path to default samba config file. Only necessary if CTDB +@@ -611,6 +619,11 @@ ctdb_start() { + start_as_disabled="--start-as-disabled" + ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled="" + ++ # set nofile ulimit for ctdbd process ++ if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then ++ ulimit -n "$OCF_RESKEY_ctdb_max_open_files" ++ fi ++ + # Start her up + "$OCF_RESKEY_ctdbd_binary" \ + --reclock="$OCF_RESKEY_ctdb_recovery_lock" \ diff --git a/SOURCES/bz1732867-CTDB-3-fixes.patch b/SOURCES/bz1732867-CTDB-3-fixes.patch new file mode 100644 index 0000000..813bf81 --- /dev/null +++ b/SOURCES/bz1732867-CTDB-3-fixes.patch @@ -0,0 +1,131 @@ +From 8c61f2019d11781b737251b5cf839437b25fc53f Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 25 Jul 2018 23:15:10 +0200 +Subject: [PATCH 1/3] CTDB: fix incorrect db corruption reports (bsc#1101668) + +If a database was disconnected during an active transaction, then +tdbdump may fail with e.g.: +> /usr/bin/tdbdump /var/lib/ctdb/persistent/secrets.tdb.1 +Failed to open /var/lib/ctdb/persistent/secrets.tdb.1 +tdb(/var/lib/ctdb/persistent/secrets.tdb.1): FATAL: +tdb_transaction_recover: attempt to recover read only database + +This does *not* indicate corruption, only that tdbdump, which opens the +database readonly, isn't able to perform recovery. + +Using tdbtool check, instead of tdbdump, passes: +> tdbtool /var/lib/ctdb/persistent/secrets.tdb.1 check +tdb_transaction_recover: recovered 2146304 byte database +Database integrity is OK and has 2 records. + +Drop the tdbdump checks, and instead rely on the core ctdb event script, +which performs the same checks with tdbtool. + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 18 ++++-------------- + 1 file changed, 4 insertions(+), 14 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 1456ea32b..28e58cea0 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -392,6 +392,8 @@ enable_event_scripts() { + local event_dir + event_dir=$OCF_RESKEY_ctdb_config_dir/events.d + ++ chmod u+x "$event_dir/00.ctdb" # core database health check ++ + if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then + chmod u+x "$event_dir/10.interface" + else +@@ -563,17 +565,6 @@ ctdb_start() { + rv=$? + [ $rv -ne 0 ] && return $rv + +- # Die if databases are corrupted +- persistent_db_dir="${OCF_RESKEY_ctdb_dbdir}/persistent" +- mkdir -p $persistent_db_dir 2>/dev/null +- for pdbase in $persistent_db_dir/*.tdb.[0-9]; do +- [ -f "$pdbase" ] || break +- /usr/bin/tdbdump "$pdbase" >/dev/null 2>/dev/null || { +- ocf_exit_reason "Persistent database $pdbase is corrupted! CTDB will not start." +- return $OCF_ERR_GENERIC +- } +- done +- + # Add necessary configuration to smb.conf + init_smb_conf + if [ $? -ne 0 ]; then +@@ -737,9 +728,8 @@ ctdb_monitor() { + + + ctdb_validate() { +- # Required binaries (full path to tdbdump is intentional, as that's +- # what's used in ctdb_start, which was lifted from the init script) +- for binary in pkill /usr/bin/tdbdump; do ++ # Required binaries ++ for binary in pkill; do + check_binary $binary + done + + +From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Fri, 27 Jul 2018 16:02:26 +0200 +Subject: [PATCH 2/3] CTDB: explicitly use bash shell + +Upcoming recovery lock substring processing is bash specific. + +Signed-off-by: David Disseldorp +--- + configure.ac | 1 + + heartbeat/CTDB.in | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 7d87a4ef7..f9b5c564f 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -134,8 +134,8 @@ For more information see http://linux-ha.org/wiki/CTDB_(resource_agent) + + + +-The location of a shared lock file, common across all nodes. +-This must be on shared storage, e.g.: /shared-fs/samba/ctdb.lock ++The location of a shared lock file or helper binary, common across all nodes. ++See CTDB documentation for details. + + CTDB shared lock file + +@@ -757,13 +757,24 @@ ctdb_validate() { + return $OCF_ERR_CONFIGURED + fi + +- lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock") +- touch "$lock_dir/$$" 2>/dev/null +- if [ $? != 0 ]; then +- ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable." +- return $OCF_ERR_ARGS ++ if [ "${OCF_RESKEY_ctdb_recovery_lock:0:1}" == '!' ]; then ++ # '!' prefix means recovery lock is handled via a helper binary ++ binary="${OCF_RESKEY_ctdb_recovery_lock:1}" ++ binary="${binary%% *}" # trim any parameters ++ if [ -z "$binary" ]; then ++ ocf_exit_reason "ctdb_recovery_lock invalid helper" ++ return $OCF_ERR_CONFIGURED ++ fi ++ check_binary "${binary}" ++ else ++ lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock") ++ touch "$lock_dir/$$" 2>/dev/null ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable." ++ return $OCF_ERR_ARGS ++ fi ++ rm "$lock_dir/$$" + fi +- rm "$lock_dir/$$" + + return $OCF_SUCCESS + } diff --git a/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch b/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch new file mode 100644 index 0000000..a3332ef --- /dev/null +++ b/SOURCES/bz1732867-CTDB-4-add-v4.9-support.patch @@ -0,0 +1,452 @@ +From 30b9f55325d2acfba27aa6859c7360e10b7201d7 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 5 Jun 2019 00:41:13 +0200 +Subject: [PATCH 1/3] CTDB: support Samba 4.9+ + +With Samba 4.9+, all ctdbd parameters have moved to config files. +Generate a new /etc/ctdb/ctdb.conf file during ctdb startup, based on RA +configuration. + +Event scripts in Samba 4.9+ are also no longer enabled/disabled based on +file mode. Use the "ctdb event script enable/disable" helpers, which now +work without a running ctdbd. + +Fixes: https://github.com/ClusterLabs/resource-agents/issues/1196 +Signed-off-by: David Disseldorp +Signed-off-by: Noel Power +Signed-off-by: Amitay Isaacs +--- + heartbeat/CTDB.in | 214 ++++++++++++++++++++++++++++++++++++---------- + 1 file changed, 167 insertions(+), 47 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 4dd646896..79a2f97e7 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -143,6 +143,10 @@ OCF_RESKEY_smb_fileid_algorithm_default="" + + ####################################################################### + ++ctdb_version() { ++ $OCF_RESKEY_ctdb_binary version | awk '{print $NF}' | sed "s/[-\.]\?[[:alpha:]].*//" ++} ++ + meta_data() { + cat < +@@ -256,7 +260,7 @@ host any public ip addresses. + + The directory containing various CTDB configuration files. + The "nodes" and "notify.sh" scripts are expected to be +-in this directory, as is the "events.d" subdirectory. ++in this directory. + + CTDB config file directory + +@@ -282,8 +286,10 @@ Full path to the CTDB cluster daemon binary. + + Full path to the domain socket that ctdbd will create, used for + local clients to attach and communicate with the ctdb daemon. ++With CTDB 4.9.0 and later the socket path is hardcoded at build ++time, so this parameter is ignored. + +-CTDB socket location ++CTDB socket location (ignored with CTDB 4.9+) + + + +@@ -421,16 +427,28 @@ invoke_ctdb() { + timeout=$((OCF_RESKEY_CRM_meta_timeout/1000)) + timelimit=$((OCF_RESKEY_CRM_meta_timeout/1000)) + fi +- $OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \ +- -t $timeout -T $timelimit \ +- "$@" ++ ++ local vers=$(ctdb_version) ++ ocf_version_cmp "$vers" "4.9.0" ++ ++ # if version < 4.9.0 specify '--socket' otherwise it's ++ # a compiled option ++ if [ "$?" -eq "0" ]; then ++ $OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \ ++ -t $timeout -T $timelimit \ ++ "$@" ++ else ++ $OCF_RESKEY_ctdb_binary \ ++ -t $timeout -T $timelimit \ ++ "$@" ++ fi + } + + # Enable any event scripts that are explicitly required. + # Any others will ultimately be invoked or not based on how they ship + # with CTDB, but will generally have no effect, beacuase the relevant + # CTDB_MANAGES_* options won't be set in /etc/sysconfig/ctdb. +-enable_event_scripts() { ++enable_event_scripts_chmod() { + local event_dir + event_dir=$OCF_RESKEY_ctdb_config_dir/events.d + +@@ -454,6 +472,36 @@ enable_event_scripts() { + fi + } + ++enable_event_scripts_symlink() { ++ # event scripts are symlinked once enabled, with the link source in... ++ mkdir -p "$OCF_RESKEY_ctdb_config_dir/events/legacy" 2>/dev/null ++ ++ invoke_ctdb event script enable legacy 00.ctdb ++ ++ if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then ++ invoke_ctdb event script enable legacy 10.interface ++ else ++ invoke_ctdb event script disable legacy 10.interface ++ fi ++ if [ -f "${OCF_RESKEY_ctdb_config_dir}/static-routes" ]; then ++ invoke_ctdb event script enable legacy 11.routing ++ else ++ invoke_ctdb event script disable legacy 11.routing ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_ctdb_manages_winbind"; then ++ invoke_ctdb event script enable legacy 49.winbind ++ else ++ invoke_ctdb event script disable legacy 49.winbind ++ fi ++ ++ if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba"; then ++ invoke_ctdb event script enable legacy 50.samba ++ else ++ invoke_ctdb event script disable legacy 50.samba ++ fi ++} ++ + # This function has no effect (currently no way to set CTDB_SET_*) + # but remains here in case we need it in future. + set_ctdb_variables() { +@@ -556,6 +604,46 @@ append_ctdb_sysconfig() { + [ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG" + } + ++generate_ctdb_config() { ++ local ctdb_config="$OCF_RESKEY_ctdb_config_dir/ctdb.conf" ++ ++ # Backup existing config if we're not already using an auto-generated one ++ grep -qa '# CTDB-RA: Auto-generated' $ctdb_config || cp -p $ctdb_config ${ctdb_config}.ctdb-ra-orig ++ if [ $? -ne 0 ]; then ++ ocf_log warn "Unable to backup $ctdb_config to ${ctdb_config}.ctdb-ra-orig" ++ fi ++ ++ local log_option="file:$OCF_RESKEY_ctdb_logfile" ++ if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then ++ log_option="syslog" ++ fi ++ ++ local start_as_disabled="false" ++ ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" && start_as_disabled="true" ++ ++ local dbdir_volatile="$OCF_RESKEY_ctdb_dbdir/volatile" ++ [ -d "$dbdir_volatile" ] || mkdir -p "$dbdir_volatile" 2>/dev/null ++ local dbdir_persistent="$OCF_RESKEY_ctdb_dbdir/persistent" ++ [ -d "$dbdir_persistent" ] || mkdir -p "$dbdir_persistent" 2>/dev/null ++ local dbdir_state="$OCF_RESKEY_ctdb_dbdir/state" ++ [ -d "$dbdir_state" ] || mkdir -p "$dbdir_state" 2>/dev/null ++ ++cat >$ctdb_config </dev/null + +- # public addresses file (should not be present, but need to set for correctness if it is) +- local pub_addr_option +- pub_addr_option="" +- [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ] && \ +- pub_addr_option="--public-addresses=${OCF_RESKEY_ctdb_config_dir}/public_addresses" +- # start as disabled +- local start_as_disabled +- start_as_disabled="--start-as-disabled" +- ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled="" +- + # set nofile ulimit for ctdbd process + if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then + ulimit -n "$OCF_RESKEY_ctdb_max_open_files" + fi + + # Start her up +- "$OCF_RESKEY_ctdbd_binary" \ +- --reclock="$OCF_RESKEY_ctdb_recovery_lock" \ +- --nlist="$OCF_RESKEY_ctdb_config_dir/nodes" \ +- --socket="$OCF_RESKEY_ctdb_socket" \ +- --dbdir="$OCF_RESKEY_ctdb_dbdir" \ +- --dbdir-persistent="$OCF_RESKEY_ctdb_dbdir/persistent" \ +- --event-script-dir="$OCF_RESKEY_ctdb_config_dir/events.d" \ +- --notification-script="$OCF_RESKEY_ctdb_config_dir/notify.sh" \ +- --transport=tcp \ +- $start_as_disabled $log_option $pub_addr_option \ +- -d "$OCF_RESKEY_ctdb_debuglevel" ++ invoke_ctdbd "$version" ++ + if [ $? -ne 0 ]; then + # cleanup smb.conf + cleanup_smb_conf +@@ -688,7 +808,7 @@ ctdb_start() { + if [ $? -ne 0 ]; then + # CTDB will be running, kill it before returning + ctdb_stop +- ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary --socket=$OCF_RESKEY_ctdb_socket status" ++ ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary status" + return $OCF_ERR_GENERIC + fi + if ! echo "$status" | grep -qs 'UNHEALTHY (THIS'; then +@@ -725,7 +845,7 @@ ctdb_stop() { + [ $count -gt 10 ] && { + ocf_log info "killing ctdbd " + pkill -9 -f "$OCF_RESKEY_ctdbd_binary" +- pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events.d/" ++ pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events" + } + done + + +From b4753b7cb46045bb9e7ed5e3a0a20f6104264b12 Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Wed, 10 Jul 2019 17:11:50 +0200 +Subject: [PATCH 2/3] CTDB: generate script.options file for 4.9+ + +Event scripts in CTDB 4.9+ ignore sysconfig configuration and instead +parse parameters in ctdb_config_dir/script.options . + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 35 ++++++++++++++++++++++++++++++----- + 1 file changed, 30 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 79a2f97e7..0906f3da9 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -242,6 +242,7 @@ If the amount of free memory drops below this value the node will + become unhealthy and ctdb and all managed services will be shutdown. + Once this occurs, the administrator needs to find the reason for the + OOM situation, rectify it and restart ctdb with "service ctdb start". ++With CTDB 4.4.0 and later this parameter is ignored. + + Minimum amount of free memory (MB) + +@@ -600,8 +601,10 @@ cleanup_smb_conf() { + mv "$OCF_RESKEY_smb_conf.$$" "$OCF_RESKEY_smb_conf" + } + +-append_ctdb_sysconfig() { +- [ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG" ++append_conf() { ++ local file_path="$1" ++ shift ++ [ -n "$2" ] && echo "$1=$2" >> "$file_path" + } + + generate_ctdb_config() { +@@ -644,6 +647,25 @@ cat >$ctdb_config <$script_options < +Date: Wed, 10 Jul 2019 17:54:01 +0200 +Subject: [PATCH 3/3] CTDB: drop sysconfig presence check during validate + +There are two reasons to avoid this check: +- for ctdb versions prior to 4.9.0, the sysconfig file is generated by + the resource agent start hook *after* ctdb_validate() is called. +- post 4.9.0 versions don't use the sysconfig file. + +Signed-off-by: David Disseldorp +--- + heartbeat/CTDB.in | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in +index 0906f3da9..15d78902e 100755 +--- a/heartbeat/CTDB.in ++++ b/heartbeat/CTDB.in +@@ -925,11 +925,6 @@ ctdb_validate() { + check_binary $binary + done + +- if [ -z "$CTDB_SYSCONFIG" ]; then +- ocf_exit_reason "Can't find CTDB config file (expecting /etc/sysconfig/ctdb, /etc/default/ctdb or similar)" +- return $OCF_ERR_INSTALLED +- fi +- + if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" && [ ! -f "$OCF_RESKEY_smb_conf" ]; then + ocf_exit_reason "Samba config file '$OCF_RESKEY_smb_conf' does not exist." + return $OCF_ERR_INSTALLED diff --git a/SOURCES/bz1736746-podman-drop-in-support.patch b/SOURCES/bz1736746-podman-drop-in-support.patch new file mode 100644 index 0000000..8c4be1a --- /dev/null +++ b/SOURCES/bz1736746-podman-drop-in-support.patch @@ -0,0 +1,193 @@ +From 462ada6164cb77c81f5291d88287d68506d38056 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Tue, 9 Jul 2019 23:14:21 +0200 +Subject: [PATCH] Generate addition drop-in dependencies for podman containers + +When podman creates a container, it creates two additional systemd +scope files dynamically: + + - libpod-conmon-.scope - runs a conmon process that + tracks a container's pid1 into a dedicated pidfile. + - libpod-.scope - created dynamically by runc, + for cgroups accounting + +On shutdown, it can happen that systemd stops those scope early, +which in turn sends a SIGTERM to pacemaker-managed containers +before pacemaker has scheduled any stop operation. That +confuses the cluster and may break shutdown. + +Add a new option in the resource-agent to inject additional +dependencies into the dynamically created scope files, so that +systemd is not allowed to stop scopes before the pacemaker +service itself is stopped. + +When that option is enabled, the scopes look like: + + # podman ps | grep galera + c329819a1227 192.168.122.8:8787/rhosp15/openstack-mariadb:latest dumb-init -- /bin... About an hour ago Up About an hour ago galera-bundle-podman-0 + + # systemctl cat libpod*c329819a1227* + # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope + # This is a transient unit file, created programmatically via the systemd API. Do not edit. + [Scope] + Slice=machine.slice + Delegate=yes + + [Unit] + DefaultDependencies=no + + # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf + [Unit] + Before=pacemaker.service + + # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope + # This is a transient unit file, created programmatically via the systemd API. Do not edit. + [Unit] + Description=libcontainer container c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b + + [Scope] + Slice=machine.slice + Delegate=yes + MemoryAccounting=yes + CPUAccounting=yes + BlockIOAccounting=yes + + [Unit] + DefaultDependencies=no + + # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf + [Unit] + Before=pacemaker.service + +Effectively, this prevents systemd from managing the shutdown of any +pacemaker-managed podman container. + +Related: rhbz#1726442 +--- + heartbeat/podman | 82 +++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 81 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 8fc2c4695..8a916eb8c 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -158,6 +158,16 @@ to have the particular one persist when this happens. + reuse container + + ++ ++ ++ ++Use transient drop-in files to add extra dependencies to the systemd ++scopes associated to the container. During reboot, this prevents systemd ++to stop the container before pacemaker. ++ ++drop-in dependency ++ ++ + + + +@@ -273,8 +283,57 @@ podman_create_mounts() { + IFS="$oldIFS" + } + ++podman_container_id() ++{ ++ # Retrieve the container ID by doing a "podman ps" rather than ++ # a "podman inspect", because the latter has performance issues ++ # under IO load. ++ # We could have run "podman start $CONTAINER" to get the ID back ++ # but if the container is stopped, the command will return a ++ # name instead of a container ID. This would break us. ++ podman ps --no-trunc --format '{{.ID}} {{.Names}}' | grep -F -w -m1 "$CONTAINER" | cut -d' ' -f1 ++} ++ ++ ++create_transient_drop_in_dependency() ++{ ++ local cid=$1 ++ local rc=$OCF_SUCCESS ++ ++ if [ -z "$cid" ]; then ++ ocf_log error "Container ID not found for \"$CONTAINER\". Not creating drop-in dependency" ++ return $OCF_ERR_GENERIC ++ fi ++ ++ ocf_log info "Creating drop-in dependency for \"$CONTAINER\" ($cid)" ++ for scope in "libpod-$cid.scope.d" "libpod-conmon-$cid.scope.d"; do ++ if [ $rc -eq $OCF_SUCCESS ] && [ ! -d /run/systemd/transient/"$scope" ]; then ++ mkdir -p /run/systemd/transient/"$scope" && \ ++ echo -e "[Unit]\nBefore=pacemaker.service" > /run/systemd/transient/"$scope"/dep.conf && \ ++ chmod ago+r /run/systemd/transient/"$scope" /run/systemd/transient/"$scope"/dep.conf ++ rc=$? ++ fi ++ done ++ ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log error "Could not create drop-in dependency for \"$CONTAINER\" ($cid)" ++ else ++ systemctl daemon-reload ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ ocf_log error "Could not refresh service definition after creating drop-in for \"$CONTAINER\"" ++ fi ++ fi ++ ++ return $rc ++} ++ ++ + podman_start() + { ++ local cid ++ local rc ++ + podman_create_mounts + local run_opts="-d --name=${CONTAINER}" + # check to see if the container has already started +@@ -306,8 +365,17 @@ podman_start() + ocf_log info "running container $CONTAINER for the first time" + ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd + fi ++ rc=$? + +- if [ $? -ne 0 ]; then ++ # if the container was stopped or didn't exist before, systemd ++ # removed the libpod* scopes. So always try to recreate the drop-ins ++ if [ $rc -eq 0 ] && ocf_is_true "$OCF_RESKEY_drop_in_dependency"; then ++ cid=$(podman_container_id) ++ create_transient_drop_in_dependency "$cid" ++ rc=$? ++ fi ++ ++ if [ $rc -ne 0 ]; then + ocf_exit_reason "podman failed to launch container" + return $OCF_ERR_GENERIC + fi +@@ -353,6 +421,8 @@ podman_stop() + else + ocf_log debug "waiting $timeout second[s] before killing container" + ocf_run podman stop -t=$timeout $CONTAINER ++ # on stop, systemd will automatically delete any transient ++ # drop-in conf that has been created earlier + fi + + if [ $? -ne 0 ]; then +@@ -456,6 +526,16 @@ CONTAINER=$OCF_RESKEY_name + # exec command to be non-empty + : ${OCF_RESKEY_monitor_cmd:=/bin/true} + ++# When OCF_RESKEY_drop_in_dependency is not populated, we ++# look at another file-based way of enabling the option. ++# Otherwise, consider it disabled. ++if [ -z "$OCF_RESKEY_drop_in_dependency" ]; then ++ if [ -f "/etc/sysconfig/podman_drop_in" ] || \ ++ [ -f "/etc/default/podman_drop_in" ]; then ++ OCF_RESKEY_drop_in_dependency=yes ++ fi ++fi ++ + case $__OCF_ACTION in + meta-data) meta_data + exit $OCF_SUCCESS;; diff --git a/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch b/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch new file mode 100644 index 0000000..4725d8e --- /dev/null +++ b/SOURCES/bz1738428-LVM-activate-detect-volume-without-reboot.patch @@ -0,0 +1,48 @@ +From 6c24147ebe0e979c48db93a5f8ec6094b8707591 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 26 Sep 2019 12:52:39 +0200 +Subject: [PATCH] LVM-activate: move pvscan --cache to validate + +It needs to be called before validate attempts to look at the VG. +--- + configure.ac | 2 +- + heartbeat/LVM-activate | 6 +++++- + 2 files changed, 6 insertions(+), 2 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 97dac7cf8..1eb65cf34 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -21,7 +21,7 @@ dnl checks for system services + + AC_INIT([resource-agents], + m4_esyscmd([make/git-version-gen .tarball-version]), +- [to_be_defined@foobar.org]) ++ [developers@clusterlabs.org]) + + AC_USE_SYSTEM_EXTENSIONS + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 3df40c894..9b7c0aa7f 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -489,6 +489,11 @@ lvm_validate() { + check_binary lvm + check_binary dmsetup + ++ # This is necessary when using system ID to update lvm hints, ++ # or in older versions of lvm, this is necessary to update the ++ # lvmetad cache. ++ pvscan --cache ++ + if ! vgs --foreign ${VG} >/dev/null 2>&1 ; then + # stop action exits successfully if the VG cannot be accessed... + if [ $__OCF_ACTION = "stop" ]; then +@@ -627,7 +632,6 @@ clvmd_activate() { + systemid_activate() { + local cur_systemid + +- pvscan --cache + cur_systemid=$(vgs --foreign --noheadings -o systemid ${VG} | tr -d '[:blank:]') + + # Put our system ID on the VG diff --git a/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch b/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch new file mode 100644 index 0000000..f713613 --- /dev/null +++ b/SOURCES/bz1741042-IPaddr2-add-noprefixroute-parameter.patch @@ -0,0 +1,66 @@ +From 34b46b172857babbb2bca5e012c7827ed6a26b01 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 6 Nov 2019 10:00:31 +0100 +Subject: [PATCH] IPaddr2: add noprefixroute parameter + +--- + heartbeat/IPaddr2 | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 1d39ae514..6f8e8c734 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -88,6 +88,7 @@ OCF_RESKEY_arp_sender_default="" + OCF_RESKEY_send_arp_opts_default="" + OCF_RESKEY_flush_routes_default="false" + OCF_RESKEY_run_arping_default=false ++OCF_RESKEY_noprefixroute_default="false" + OCF_RESKEY_preferred_lft_default="forever" + OCF_RESKEY_network_namespace_default="" + +@@ -109,6 +110,7 @@ OCF_RESKEY_network_namespace_default="" + : ${OCF_RESKEY_send_arp_opts=${OCF_RESKEY_send_arp_opts_default}} + : ${OCF_RESKEY_flush_routes=${OCF_RESKEY_flush_routes_default}} + : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}} ++: ${OCF_RESKEY_noprefixroute=${OCF_RESKEY_noprefixroute_default}} + : ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}} + : ${OCF_RESKEY_network_namespace=${OCF_RESKEY_network_namespace_default}} + +@@ -377,6 +379,14 @@ Whether or not to run arping for IPv4 collision detection check. + + + ++ ++ ++Use noprefixroute flag (see 'man ip-address'). ++ ++Use noprefixroute flag ++ ++ ++ + + + For IPv6, set the preferred lifetime of the IP address. +@@ -397,8 +407,8 @@ the namespace. + Network namespace to use + + +- + ++ + + + +@@ -640,6 +650,11 @@ add_interface () { + msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface" + fi + ++ if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then ++ cmd="$cmd noprefixroute" ++ msg="${msg} (with noprefixroute)" ++ fi ++ + if [ ! -z "$label" ]; then + cmd="$cmd label $label" + msg="${msg} (with label $label)" diff --git a/SOURCES/bz1741843-LVM-activate-partial-activation.patch b/SOURCES/bz1741843-LVM-activate-partial-activation.patch new file mode 100644 index 0000000..1eec112 --- /dev/null +++ b/SOURCES/bz1741843-LVM-activate-partial-activation.patch @@ -0,0 +1,69 @@ +diff -uNr a/heartbeat/LVM-activate b/heartbeat/LVM-activate +--- a/heartbeat/LVM-activate 2019-10-08 12:10:11.755991580 +0200 ++++ b/heartbeat/LVM-activate 2019-10-08 12:14:38.388288176 +0200 +@@ -42,6 +42,11 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Parameter defaults ++OCF_RESKEY_partial_activation_default="false" ++ ++: ${OCF_RESKEY_partial_activation=${OCF_RESKEY_partial_activation_default}} ++ + # If LV is given, only activate this named LV; otherwise, activate all + # LVs in the named VG. + VG=${OCF_RESKEY_vgname} +@@ -150,6 +155,16 @@ + + + ++ ++ ++If set, the volume group will be activated partially even with some ++physical volumes missing. It helps to set to true when using mirrored ++logical volumes. ++ ++Activate VG partially when missing PVs ++ ++ ++ + + + +@@ -486,6 +501,25 @@ + exit $OCF_ERR_CONFIGURED + fi + ++ # Inconsistency might be due to missing physical volumes, which doesn't ++ # automatically mean we should fail. If partial_activation=true then ++ # we should let start try to handle it, or if no PVs are listed as ++ # "unknown device" then another node may have marked a device missing ++ # where we have access to all of them and can start without issue. ++ case $(vgs -o attr --noheadings $VG | tr -d ' ') in ++ ???p??*) ++ if ! ocf_is_true "$OCF_RESKEY_partial_activation" ; then ++ # We are missing devices and cannot activate partially ++ ocf_exit_reason "Volume group [$VG] has devices missing. Consider partial_activation=true to attempt to activate partially" ++ exit $OCF_ERR_GENERIC ++ else ++ # We are missing devices but are allowed to activate partially. ++ # Assume that caused the vgck failure and carry on ++ ocf_log warn "Volume group inconsistency detected with missing device(s) and partial_activation enabled. Proceeding with requested action." ++ fi ++ ;; ++ esac ++ + # Get the access mode from VG metadata and check if it matches the input + # value. Skip to check "tagging" mode because there's no reliable way to + # automatically check if "tagging" mode is being used. +@@ -545,6 +579,10 @@ + do_activate() { + local activate_opt=$1 + ++ if ocf_is_true "$OCF_RESKEY_partial_activation" ; then ++ activate_opt="${activate_opt} --partial" ++ fi ++ + # Only activate the specific LV if it's given + if [ -n "$LV" ]; then + ocf_run lvchange $activate_opt ${VG}/${LV} diff --git a/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch b/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch new file mode 100644 index 0000000..a57e5b0 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-1-monitor-symlink-support.patch @@ -0,0 +1,39 @@ +From 2aa8015bc4ff0bd61eca13eceb59aaa672335b76 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 30 Aug 2018 18:36:11 -0700 +Subject: [PATCH] Filesystem: Support symlink as mountpoint directory + +Filesystem monitor operation fails when the `directory` attribute is a +symlink. + +The monitor operation calls the `list_mounts` function, which cats +`/proc/mounts` if it exists, else cats `/etc/mtab` if it exists, else +runs the `mount` command. It then greps for `" $MOUNTPOINT "` in the +output, where `$MOUNTPOINT` is the value of the `directory` attribute. + +`/proc/mounts`, `/etc/mtab`, and the `mount` command resolve symlinks +to their canonical targets. So while the monitor operation greps for +the symlink path (surrounded by spaces) as defined in the directory +attribute, the symlink will not be present in the `list_mounts` output. +Only the symlink's target will be present. + +This patch uses `readlink -f $MOUNTPOINT` to resolve the symlink to its +canonical name before using it as a grep pattern in the +`Filesystem_status` function. +--- + heartbeat/Filesystem | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 7c73b0b97..fc4b8fcd5 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $MOUNTPOINT " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $(readlink -f $MOUNTPOINT) " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else diff --git a/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch b/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch new file mode 100644 index 0000000..d5cf49f --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-2-add-symlink-support.patch @@ -0,0 +1,43 @@ +From e2c3ec91cdd123b8afc6010f45ecd22ee6d8ecf7 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Mon, 3 Sep 2018 00:30:01 -0700 +Subject: [PATCH] Filesystem: Canonicalize mountpoint symlinks + +Commit 2aa8015 added support to `Filesystem_status()` for mountpoints +that are symlinks. However, it missed two other places where `readlink` +calls should have been added to canonicalize symlinks. +--- + heartbeat/Filesystem | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index fc4b8fcd5..2a43d1daa 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -278,7 +278,7 @@ determine_blockdevice() { + nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none) + : ;; + *) +- DEVICE=`list_mounts | grep " $MOUNTPOINT " | cut -d' ' -f1` ++ DEVICE=`list_mounts | grep " $(readlink -f "$MOUNTPOINT" ) " | cut -d' ' -f1` + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -396,7 +396,7 @@ fstype_supported() + Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint +- if list_mounts | grep -q -E " $MOUNTPOINT/\w+" >/dev/null 2>&1; then ++ if list_mounts | grep -q -E " $(readlink -f "$MOUNTPOINT" )/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $(readlink -f $MOUNTPOINT) " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $(readlink -f "$MOUNTPOINT" ) " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else diff --git a/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch b/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch new file mode 100644 index 0000000..1a53120 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-3-fix-umount-disk-failure.patch @@ -0,0 +1,53 @@ +From 69d607dc7568168e874f99d5a8b6bdb66b579d8b Mon Sep 17 00:00:00 2001 +From: "yusk.iida" +Date: Tue, 7 May 2019 19:37:26 +0900 +Subject: [PATCH] Low: Filesystem: Fix a problem umount is not executed in the + event of a disk failure + +--- + heartbeat/Filesystem | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 2a43d1daa..bd974f8f3 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -278,7 +278,7 @@ determine_blockdevice() { + nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none) + : ;; + *) +- DEVICE=`list_mounts | grep " $(readlink -f "$MOUNTPOINT" ) " | cut -d' ' -f1` ++ DEVICE=`list_mounts | grep " $CANONICALIZED_MOUNTPOINT " | cut -d' ' -f1` + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -396,7 +396,7 @@ fstype_supported() + Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint +- if list_mounts | grep -q -E " $(readlink -f "$MOUNTPOINT" )/\w+" >/dev/null 2>&1; then ++ if list_mounts | grep -q -E " $CANONICALIZED_MOUNTPOINT/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -580,7 +580,7 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $(readlink -f "$MOUNTPOINT" ) " >/dev/null 2>&1; then ++ if list_mounts | grep -q " $CANONICALIZED_MOUNTPOINT " >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else +@@ -804,6 +804,11 @@ if [ -z "$OCF_RESKEY_directory" ]; then + else + MOUNTPOINT=$(echo $OCF_RESKEY_directory | sed 's/\/*$//') + : ${MOUNTPOINT:=/} ++ CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi + # At this stage, $MOUNTPOINT does not contain trailing "/" unless it is "/" + # TODO: / mounted via Filesystem sounds dangerous. On stop, we'll + # kill the whole system. Is that a good idea? diff --git a/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch b/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch new file mode 100644 index 0000000..68b9313 --- /dev/null +++ b/SOURCES/bz1744103-Filesystem-4-fix-readlink-issue.patch @@ -0,0 +1,32 @@ +From 48a7ebcea5ce0522021cf3079b62107a06b530b9 Mon Sep 17 00:00:00 2001 +From: James Oakley +Date: Thu, 8 Aug 2019 05:56:14 -0700 +Subject: [PATCH] Don't call readlink on path if it does not exist + +--- + heartbeat/Filesystem | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 4bbbc06d3..738e3c08e 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -858,10 +858,14 @@ if [ -z "$OCF_RESKEY_directory" ]; then + else + MOUNTPOINT=$(echo $OCF_RESKEY_directory | sed 's/\/*$//') + : ${MOUNTPOINT:=/} +- CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") +- if [ $? -ne 0 ]; then +- ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" +- exit $OCF_ERR_GENERIC ++ if [ -e "$MOUNTPOINT" ] ; then ++ CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ else ++ CANONICALIZED_MOUNTPOINT="$MOUNTPOINT" + fi + # At this stage, $MOUNTPOINT does not contain trailing "/" unless it is "/" + # TODO: / mounted via Filesystem sounds dangerous. On stop, we'll diff --git a/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch b/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch new file mode 100644 index 0000000..1184817 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch @@ -0,0 +1,46 @@ +From b67278bc92cfb0b9947ff5fff65f46f420a42c2c Mon Sep 17 00:00:00 2001 +From: Kazutomo Nakahira +Date: Fri, 10 May 2019 14:30:51 +0900 +Subject: [PATCH] Low: Filesystem: Fix missing mount point due to corrupted + mount list + +--- + heartbeat/Filesystem | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 2a43d1daa..c38ae12d4 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -255,16 +255,26 @@ is_bind_mount() { + } + list_mounts() { + local inpf="" ++ local mount_list="" ++ local check_list="x" ++ + if [ -e "/proc/mounts" ] && ! is_bind_mount; then + inpf=/proc/mounts + elif [ -f "/etc/mtab" -a -r "/etc/mtab" ]; then + inpf=/etc/mtab + fi +- if [ "$inpf" ]; then +- cut -d' ' -f1,2,3 < $inpf +- else +- $MOUNT | cut -d' ' -f1,3,5 +- fi ++ ++ # Make sure that the mount list has not been changed while reading. ++ while [ "$mount_list" != "$check_list" ]; do ++ check_list=$mount_list ++ if [ "$inpf" ]; then ++ mount_list=$(cut -d' ' -f1,2,3 < $inpf) ++ else ++ mount_list=$($MOUNT | cut -d' ' -f1,3,5) ++ fi ++ done ++ ++ echo "$mount_list" + } + + determine_blockdevice() { diff --git a/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch b/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch new file mode 100644 index 0000000..f642548 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-2-prevent-killing-bind-mount.patch @@ -0,0 +1,52 @@ +From bfbc99003ebd96d79bbf8ad50be0b5e714a92fd7 Mon Sep 17 00:00:00 2001 +From: ytakeshita +Date: Fri, 7 Jun 2019 15:20:52 +0900 +Subject: [PATCH] Medium: Filesystem: Prevents to all root user processes are + killed when bind mounting a directory on rootfs. + +if a directory is bound mounting on rootfs and "force_umount" is not set "safe", change "force_umount" to "safe". +--- + heartbeat/Filesystem | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index c46ec3cca..1b29a08b3 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -314,6 +314,24 @@ bind_kernel_check() { + [ $? -ne 0 ] && + ocf_log warn "kernel `uname -r` cannot handle read only bind mounts" + } ++ ++bind_rootfs_check() { ++ local SOURCE ++ local TARGET ++ local ROOTFS ++ ++ SOURCE=$1 ++ TARGET=$(df --output=target $SOURCE | tail -n 1) ++ ++ ROOTFS=$(list_mounts | grep -w rootfs | cut -d' ' -f 2) ++ ++ if [ "${TARGET}" = "${ROOTFS}" ]; then ++ return 1 ++ else ++ return 0 ++ fi ++} ++ + bind_mount() { + if is_bind_mount && [ "$options" != "-o bind" ] + then +@@ -476,6 +494,11 @@ get_pids() + local procs + local mmap_procs + ++ if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_rootfs_check "$DEVICE"; then ++ ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" ++ FORCE_UNMOUNT=safe ++ fi ++ + if ocf_is_true "$FORCE_UNMOUNT"; then + if [ "X${HOSTOS}" = "XOpenBSD" ];then + fstat | grep $dir | awk '{print $3}' diff --git a/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch b/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch new file mode 100644 index 0000000..9bb2f11 --- /dev/null +++ b/SOURCES/bz1744140-Filesystem-3-improved-bind-mount-check.patch @@ -0,0 +1,42 @@ +From f8e5d2afc5b9bbf676ac20894f0f26e6ec998557 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 10 Sep 2019 15:40:12 +0200 +Subject: [PATCH] Filesystem: improve "/" check for bind mounts + +--- + heartbeat/Filesystem | 15 +++------------ + 1 file changed, 3 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 738e3c08e..e66ddc77f 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -337,17 +337,8 @@ bind_kernel_check() { + ocf_log warn "kernel `uname -r` cannot handle read only bind mounts" + } + +-bind_rootfs_check() { +- local SOURCE +- local TARGET +- local ROOTFS +- +- SOURCE=$1 +- TARGET=$(df --output=target $SOURCE | tail -n 1) +- +- ROOTFS=$(list_mounts | grep -w rootfs | cut -d' ' -f 2) +- +- if [ "${TARGET}" = "${ROOTFS}" ]; then ++bind_root_mount_check() { ++ if [ "$(df -P "$1" | awk 'END{print $6}')" = "/" ]; then + return 1 + else + return 0 +@@ -516,7 +507,7 @@ get_pids() + local procs + local mmap_procs + +- if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_rootfs_check "$DEVICE"; then ++ if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_root_mount_check "$DEVICE"; then + ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" + FORCE_UNMOUNT=safe + fi diff --git a/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch b/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch new file mode 100644 index 0000000..d11f12d --- /dev/null +++ b/SOURCES/bz1744190-pgsql-1-set-primary-standby-initial-score.patch @@ -0,0 +1,34 @@ +From f8e1b1407b613657ebd90381d53e6a567b92b241 Mon Sep 17 00:00:00 2001 +From: Kazutomo Nakahira +Date: Mon, 17 Dec 2018 14:15:24 +0900 +Subject: [PATCH] Medium: pgsql: Set initial score for primary and hot standby + in the probe. + +--- + heartbeat/pgsql | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 842dc0ac4..8ef84dd3e 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -974,11 +974,19 @@ pgsql_real_monitor() { + case "$output" in + f) ocf_log debug "PostgreSQL is running as a primary." + if [ "$OCF_RESKEY_monitor_sql" = "$OCF_RESKEY_monitor_sql_default" ]; then ++ if ocf_is_probe; then ++ # Set initial score for primary. ++ exec_with_retry 0 $CRM_MASTER -v $PROMOTE_ME ++ fi + return $OCF_RUNNING_MASTER + fi + ;; + + t) ocf_log debug "PostgreSQL is running as a hot standby." ++ if ocf_is_probe; then ++ # Set initial score for hot standby. ++ exec_with_retry 0 $CRM_MASTER -v $CAN_NOT_PROMOTE ++ fi + return $OCF_SUCCESS;; + + *) ocf_exit_reason "$CHECK_MS_SQL output is $output" diff --git a/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch b/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch new file mode 100644 index 0000000..daca241 --- /dev/null +++ b/SOURCES/bz1744190-pgsql-2-improve-start-checks.patch @@ -0,0 +1,34 @@ +From ac430f79c333d73e6cd59ae59178c7040e7dbfda Mon Sep 17 00:00:00 2001 +From: Kazunori INOUE +Date: Wed, 8 May 2019 18:23:59 +0900 +Subject: [PATCH] pgsql: enhance checks in pgsql_real_start to prevent + incorrect status gets + +--- + heartbeat/pgsql | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/pgsql b/heartbeat/pgsql +index 842dc0ac4..5d04618e6 100755 +--- a/heartbeat/pgsql ++++ b/heartbeat/pgsql +@@ -483,7 +483,7 @@ runasowner() { + "-q") + quietrun="-q" + shift 1;; +- "warn"|"err") ++ "info"|"warn"|"err") + loglevel="-$1" + shift 1;; + *) +@@ -544,7 +544,9 @@ pgsql_real_start() { + local postgres_options + local rc + +- if pgsql_status; then ++ pgsql_real_monitor info ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS -o $rc -eq $OCF_RUNNING_MASTER ]; then + ocf_log info "PostgreSQL is already running. PID=`cat $PIDFILE`" + if is_replication; then + return $OCF_ERR_GENERIC diff --git a/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch b/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch new file mode 100644 index 0000000..cb4fde4 --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch @@ -0,0 +1,202 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/IPsrcaddr 2019-08-15 16:02:10.055827624 +0200 ++++ /home/oalbrigt/src/resource-agents/heartbeat/IPsrcaddr 2019-08-15 15:45:50.690757838 +0200 +@@ -1,6 +1,6 @@ + #!/bin/sh + # +-# Description: IPsrcaddr - Preferred source address modification ++# Description: IPsrcaddr - Preferred source(/dest) address modification + # + # Author: John Sutton + # Support: users@clusterlabs.org +@@ -11,7 +11,7 @@ + # + # This script manages the preferred source address associated with + # packets which originate on the localhost and are routed through the +-# default route. By default, i.e. without the use of this script or ++# matching route. By default, i.e. without the use of this script or + # similar, these packets will carry the IP of the primary i.e. the + # non-aliased interface. This can be a nuisance if you need to ensure + # that such packets carry the same IP irrespective of which host in +@@ -27,7 +27,7 @@ + # + # NOTES: + # +-# 1) There must be one and not more than 1 default route! Mainly because ++# 1) There must be one and not more than 1 matching route! Mainly because + # I can't see why you should have more than one. And if there is more + # than one, we would have to box clever to find out which one is to be + # modified, or we would have to pass its identity as an argument. +@@ -54,16 +54,25 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults ++OCF_RESKEY_ipaddress_default="" ++OCF_RESKEY_cidr_netmask_default="" ++OCF_RESKEY_destination_default="0.0.0.0/0" + OCF_RESKEY_proto_default="" ++OCF_RESKEY_table_default="" + ++: ${OCF_RESKEY_ipaddress=${OCF_RESKEY_ipaddress_default}} ++: ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}} ++: ${OCF_RESKEY_destination=${OCF_RESKEY_destination_default}} + : ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}} ++: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}} + ####################################################################### + + [ -z "$OCF_RESKEY_proto" ] && PROTO="" || PROTO="proto $OCF_RESKEY_proto" ++[ -z "$OCF_RESKEY_table" ] && TABLE="" || TABLE="table $OCF_RESKEY_table" + + USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + +- CMDSHOW="$IP2UTIL route show to exact 0.0.0.0/0" ++ CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + + SYSTYPE="`uname -s`" +@@ -91,7 +100,7 @@ + The IP address. + + IP address +- ++ + + + +@@ -100,7 +109,15 @@ + dotted quad notation 255.255.255.0). + + Netmask +- ++ ++ ++ ++ ++ ++The destination IP/subnet for the route (default: $OCF_RESKEY_destination_default) ++ ++Destination IP/subnet ++ + + + +@@ -108,7 +125,17 @@ + Proto to match when finding network. E.g. "kernel". + + Proto +- ++ ++ ++ ++ ++ ++Table to modify. E.g. "local". ++ ++The table has to have a route matching the "destination" parameter. ++ ++Table ++ + + + +@@ -151,21 +178,22 @@ + export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress + + srca_read() { +- # Capture the default route - doublequotes prevent word splitting... +- DEFROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" +- +- # ... so we can make sure there is only 1 default route +- [ 1 -eq `echo "$DEFROUTE" | wc -l` ] || \ +- errorexit "more than 1 default route exists" ++ # Capture matching route - doublequotes prevent word splitting... ++ ROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" + +- # But there might still be no default route +- [ -z "$DEFROUTE" ] && errorexit "no default route exists" ++ # ... so we can make sure there is only 1 matching route ++ [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ ++ errorexit "more than 1 matching route exists" ++ ++ # But there might still be no matching route ++ [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \ ++ ! ocf_is_probe && errorexit "no matching route exists" + + # Sed out the source ip address if it exists +- SRCIP=`echo $DEFROUTE | sed -n "s/$MATCHROUTE/\3/p"` ++ SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"` + + # and what remains after stripping out the source ip address clause +- ROUTE_WO_SRC=`echo $DEFROUTE | sed "s/$MATCHROUTE/\1\5/"` ++ ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\5/"` + + [ -z "$SRCIP" ] && return 1 + [ $SRCIP = $1 ] && return 0 +@@ -185,11 +213,13 @@ + rc=$OCF_SUCCESS + ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" + else +- $IP2UTIL route replace $NETWORK dev $INTERFACE src $1 || \ +- errorexit "command 'ip route replace $NETWORK dev $INTERFACE src $1' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed" + +- $CMDCHANGE $ROUTE_WO_SRC src $1 || \ +- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $1' failed" ++ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ $CMDCHANGE $ROUTE_WO_SRC src $1 || \ ++ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $1' failed" ++ fi + rc=$? + fi + +@@ -201,7 +231,7 @@ + # If one exists but it's not the same as the one specified, that's + # an error. Maybe that's the wrong behaviour because if this fails + # then when IPaddr releases the associated interface (if there is one) +-# your default route will also get dropped ;-( ++# your matching route will also get dropped ;-( + # The exit code should conform to LSB exit codes. + # + +@@ -217,11 +247,13 @@ + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- $IP2UTIL route replace $NETWORK dev $INTERFACE || \ +- errorexit "command 'ip route replace $NETWORK dev $INTERFACE' failed" ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE' failed" + +- $CMDCHANGE $ROUTE_WO_SRC || \ +- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed" ++ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ $CMDCHANGE $ROUTE_WO_SRC || \ ++ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed" ++ fi + + return $? + } +@@ -406,6 +438,10 @@ + return $OCF_ERR_CONFIGURED + fi + ++ if ! echo "$OCF_RESKEY_destination" | grep -q "/"; then ++ return $OCF_ERR_CONFIGURED ++ fi ++ + + if ! [ "x$SYSTYPE" = "xLinux" ]; then + # checks after this point are only relevant for linux. +@@ -486,7 +522,11 @@ + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` +-NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then ++ NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` ++else ++ NETWORK="$OCF_RESKEY_destination" ++fi + + case $1 in + start) srca_start $ipaddress diff --git a/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch b/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch new file mode 100644 index 0000000..cca64cb --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch @@ -0,0 +1,42 @@ +From 0e73d3f474d08779b64ed99fb3f80c1e806ff1b7 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 28 Nov 2019 16:11:51 +0100 +Subject: [PATCH] IPsrcaddr: fixes to replace local rule if using local table, + and set src back to primary for device on stop + +--- + heartbeat/IPsrcaddr | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index d80b72165..f9085f082 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -75,6 +75,10 @@ USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + ++if [ "$OCF_RESKEY_table" = "local" ]; then ++ TABLE="$TABLE local" ++fi ++ + SYSTYPE="`uname -s`" + + usage() { +@@ -247,8 +251,14 @@ srca_stop() { + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE || \ +- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE' failed" ++ OPTS="" ++ if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then ++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev eth0 primary | awk '{split($4,a,"/");print a[1]}')" ++ OPTS="proto kernel scope host src $PRIMARY_IP" ++ fi ++ ++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \ ++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed" + + if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then + $CMDCHANGE $ROUTE_WO_SRC || \ diff --git a/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch b/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch new file mode 100644 index 0000000..b9f8e7e --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-3-fix-probe-issues.patch @@ -0,0 +1,45 @@ +From 7afc581f6cd8fc37c3e14ece12fb16d31f1886f9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 10 Jan 2020 14:35:56 +0100 +Subject: [PATCH] IPsrcaddr: fixes to avoid failing during probe + +--- + heartbeat/IPsrcaddr | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index f9085f082..0ef8b391f 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -75,6 +75,10 @@ USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination" + CMDCHANGE="$IP2UTIL route change to " + ++if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then ++ CMDSHOW="$CMDSHOW src $OCF_RESKEY_ipaddress" ++fi ++ + if [ "$OCF_RESKEY_table" = "local" ]; then + TABLE="$TABLE local" + fi +@@ -183,7 +187,7 @@ export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress + + srca_read() { + # Capture matching route - doublequotes prevent word splitting... +- ROUTE="`$CMDSHOW`" || errorexit "command '$CMDSHOW' failed" ++ ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" + + # ... so we can make sure there is only 1 matching route + [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ +@@ -199,6 +203,11 @@ srca_read() { + # and what remains after stripping out the source ip address clause + ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\5/"` + ++ # using "src " only returns output if there's a match ++ if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then ++ [ -z "$ROUTE" ] && return 1 || return 0 ++ fi ++ + [ -z "$SRCIP" ] && return 1 + [ $SRCIP = $1 ] && return 0 + return 2 diff --git a/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch b/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch new file mode 100644 index 0000000..e0e1d04 --- /dev/null +++ b/SOURCES/bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch @@ -0,0 +1,23 @@ +From 5f0d15ad70098510a3782d6fd18d6eacfb51b0cf Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 16 Jan 2020 14:59:26 +0100 +Subject: [PATCH] IPsrcaddr: remove hardcoded device when using destination + parameter + +--- + heartbeat/IPsrcaddr | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index 0ef8b391f..7cdc3a9fe 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -262,7 +262,7 @@ srca_stop() { + + OPTS="" + if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then +- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev eth0 primary | awk '{split($4,a,"/");print a[1]}')" ++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" + OPTS="proto kernel scope host src $PRIMARY_IP" + fi + diff --git a/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch b/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch new file mode 100644 index 0000000..fab8bfd --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch @@ -0,0 +1,57 @@ +From fcaa52bb98a8686d993550c6f4ab7867625c8059 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Wed, 29 Aug 2018 16:18:55 -0400 +Subject: [PATCH] rabbitmq-cluster: get cluster status from mnesia during + monitor + +If mnesia is not running (for example if `rabbitmqctl stop_app` has +been called, or the service has paused during partition due to the +pause_minority strategy) then the cluster_status command to +rabbitmqctl will read the cached cluster status from disk and the +command returns 0 even though the service isn't really running at all. + +Instead, force the cluster status to be read from mnesia. If mnesia +is not running due to the above or similar circumstances, the command +will catch that and properly fail the monitor action. + +Resolves: RHBZ#1595753 +--- + heartbeat/rabbitmq-cluster | 20 +++++--------------- + 1 file changed, 5 insertions(+), 15 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index a7d2db614..204917475 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -181,26 +181,16 @@ remove_pid () { + rmq_monitor() { + local rc + +- $RMQ_CTL cluster_status > /dev/null 2>&1 +- rc=$? +- case "$rc" in +- 0) ++ if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then + ocf_log debug "RabbitMQ server is running normally" + rmq_write_nodename +- ++ + return $OCF_SUCCESS +- ;; +- 2|68|69|70|75|78) +- ocf_log info "RabbitMQ server is not running" ++ else ++ ocf_log info "RabbitMQ server could not get cluster status from mnesia" + rmq_delete_nodename + return $OCF_NOT_RUNNING +- ;; +- *) +- ocf_log err "Unexpected return code from '$RMQ_CTL cluster_status' exit code: $rc" +- rmq_delete_nodename +- return $OCF_ERR_GENERIC +- ;; +- esac ++ fi + } + + rmq_init_and_wait() diff --git a/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch b/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch new file mode 100644 index 0000000..72f5ff6 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch @@ -0,0 +1,96 @@ +From cc23c5523a0185fa557a5ab9056d50a60300d12a Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Tue, 16 Oct 2018 16:21:25 -0400 +Subject: [PATCH] rabbitmq-cluster: fail monitor when node is in minority + partition + +It's possible for mnesia to still be running, but for mnesia to be +partitioned. And it's also possible to get into this state without +pacemaker seeing the node go down so no corrective action is taken. + +When monitoring, check the number of nodes that pacemaker thinks is +running, and compare to the number of nodes that mnesia thinks is +running. If mnesia only sees a minority of the total nodes, fail it +so corrective action can be taken to rejoin the cluster. + +This also adds a new function, rmq_app_running, which simply checks +whether the app is running or not and does not care about the +partition status. This is now used instead of the full monitor in a +few places where we don't care about partition state. + +Resolves: RHBZ#1639826 +--- + heartbeat/rabbitmq-cluster | 28 +++++++++++++++++++++++++--- + 1 file changed, 25 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 204917475..78b2bbadf 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -178,10 +178,31 @@ remove_pid () { + rm -f ${RMQ_PID_FILE} > /dev/null 2>&1 + } + ++rmq_app_running() { ++ if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then ++ ocf_log debug "RabbitMQ application is running" ++ return $OCF_SUCCESS ++ else ++ ocf_log debug "RabbitMQ application is stopped" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ + rmq_monitor() { + local rc + + if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then ++ pcs_running=$(rmq_join_list | wc -w) ++ ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" ++ rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') ++ ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running" ++ ++ if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then ++ ocf_log info "RabbitMQ is a minority partition, failing monitor" ++ rmq_delete_nodename ++ return $OCF_ERR_GENERIC ++ fi ++ + ocf_log debug "RabbitMQ server is running normally" + rmq_write_nodename + +@@ -215,7 +236,7 @@ rmq_init_and_wait() + return $OCF_ERR_GENERIC + fi + +- rmq_monitor ++ rmq_app_running + return $? + } + +@@ -236,6 +257,7 @@ rmq_start_first() + if [ $rc -eq 0 ]; then + rc=$OCF_SUCCESS + ocf_log info "cluster bootstrapped" ++ rmq_write_nodename + + if [ -n "$OCF_RESKEY_set_policy" ]; then + # do not quote set_policy, we are passing in arguments +@@ -492,7 +514,7 @@ rmq_stop() { + end. + " + +- rmq_monitor ++ rmq_app_running + if [ $? -eq $OCF_NOT_RUNNING ]; then + return $OCF_SUCCESS + fi +@@ -508,7 +530,7 @@ rmq_stop() { + #TODO add kill logic + stop_wait=1 + while [ $stop_wait = 1 ]; do +- rmq_monitor ++ rmq_app_running + rc=$? + if [ "$rc" -eq $OCF_NOT_RUNNING ]; then + stop_wait=0 diff --git a/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch b/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch new file mode 100644 index 0000000..8b422eb --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch @@ -0,0 +1,63 @@ +From 19ee29342f8bb573722991b8cbe4503309ad0bf9 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Fri, 2 Nov 2018 13:12:53 -0400 +Subject: [PATCH] rabbitmq-cluster: fix regression in rmq_stop + +This regression was introduced in PR#1249 (cc23c55). The stop action +was modified to use rmq_app_running in order to check the service +status, which allows for the following sequence of events: + +- service is started, unclustered +- stop_app is called +- cluster_join is attempted and fails +- stop is called + +Because stop_app was called, rmq_app_running returns $OCF_NOT_RUNNING +and the stop action is a no-op. This means the erlang VM continues +running. + +When the start action is attempted again, a new erlang VM is launched, +but this VM fails to boot because the old one is still running and is +registered with the same name (rabbit@nodename). + +This adds a new function, rmq_node_alive, which does a simple eval to +test whether the erlang VM is up, independent of the rabbit app. The +stop action now uses rmq_node_alive to check the service status, so +even if stop_app was previously called, the erlang VM will be stopped +properly. + +Resolves: RHBZ#1639826 +--- + heartbeat/rabbitmq-cluster | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 78b2bbadf..a2de9dc20 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -188,6 +188,16 @@ rmq_app_running() { + fi + } + ++rmq_node_alive() { ++ if $RMQ_CTL eval 'ok.'; then ++ ocf_log debug "RabbitMQ node is alive" ++ return $OCF_SUCCESS ++ else ++ ocf_log debug "RabbitMQ node is down" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ + rmq_monitor() { + local rc + +@@ -514,7 +524,7 @@ rmq_stop() { + end. + " + +- rmq_app_running ++ rmq_node_alive + if [ $? -eq $OCF_NOT_RUNNING ]; then + return $OCF_SUCCESS + fi diff --git a/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch b/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch new file mode 100644 index 0000000..80fe18b --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch @@ -0,0 +1,83 @@ +From 63c9449bfa9a7fecbc0f00394699a475a384671d Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Thu, 9 Aug 2018 16:33:26 +0200 +Subject: [PATCH] rabbitmq-cluster: retry start when cluster join fails + +When a node tries to join an existing cluster, it fetches a node +list to try to connect from any of those running nodes. + +If the nodes from this list become unavailable while we're joining +the cluster, the rabbitmq server will fail to get clustered and +make the start operation fail. + +Give the resource a chance to start anyway by retrying the entire +start actions until it succeeds or until the start timeout is +reached and pacemaker stops the start operation. + +Co-Authored-by: +Suggested-by: +--- + heartbeat/rabbitmq-cluster | 29 ++++++++++++++++++++++++++--- + 1 file changed, 26 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 9ff49e075..84f383460 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -31,6 +31,12 @@ + + ####################################################################### + ++# This arbitrary value here is used by the rmq_start action to ++# signify that the resource agent must retry the start process ++# It might potentially conflict with OCF assigned error code ++# in the future. ++RMQ_TRY_RESTART_ERROR_CODE=126 ++ + RMQ_SERVER=/usr/sbin/rabbitmq-server + RMQ_CTL=/usr/sbin/rabbitmqctl + RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" +@@ -354,7 +360,7 @@ rmq_notify() { + return $OCF_SUCCESS + } + +-rmq_start() { ++rmq_try_start() { + local join_list="" + local rc + +@@ -384,8 +390,16 @@ rmq_start() { + rc=$? + + if [ $rc -ne 0 ]; then +- ocf_log info "node failed to join even after reseting local data. Check SELINUX policy" +- return $OCF_ERR_GENERIC ++ # we could not join the rabbitmq cluster from any of the running nodes ++ # this might be due to a unexpected reset of those nodes. Give ourself ++ # a chance to start by retrying the entire start sequence. ++ ++ ocf_log warn "Failed to join the RabbitMQ cluster from nodes ${join_list}. Stopping local unclustered rabbitmq" ++ rmq_stop ++ ++ ocf_log warn "Re-detect available rabbitmq nodes and try to start again" ++ # return an unused OCF value to signify a "retry" condition ++ return $RMQ_TRY_RESTART_ERROR_CODE + fi + + # Restore users, user permissions, and policies (if any) +@@ -443,6 +457,15 @@ rmq_start() { + return $OCF_SUCCESS + } + ++rmq_start() { ++ local rc=$RMQ_TRY_RESTART_ERROR_CODE ++ while [ $rc -eq $RMQ_TRY_RESTART_ERROR_CODE ]; do ++ rmq_try_start ++ rc=$? ++ done ++ return $rc ++} ++ + rmq_stop() { + # Backup users, user permissions, and policies + BaseDataDir=`dirname $RMQ_DATA_DIR` diff --git a/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch b/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch new file mode 100644 index 0000000..0a25333 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch @@ -0,0 +1,42 @@ +From 8ed87936e9ad06318cc49ea767885a405dfde11e Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Wed, 5 Dec 2018 11:45:43 -0500 +Subject: [PATCH] rabbitmq-cluster: better ensure node attributes are removed + +Ensure that the attribute is removed at the end of the stop action. +Also if rmq_app_running or rmq_node_alive shows the service as down, +ensure the attribute is deleted as well. + +Resolves: RHBZ#1656368 +--- + heartbeat/rabbitmq-cluster | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 1643dd1e7..2dca3e216 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -184,6 +184,7 @@ rmq_app_running() { + return $OCF_SUCCESS + else + ocf_log debug "RabbitMQ application is stopped" ++ rmq_delete_nodename + return $OCF_NOT_RUNNING + fi + } +@@ -194,6 +195,7 @@ rmq_node_alive() { + return $OCF_SUCCESS + else + ocf_log debug "RabbitMQ node is down" ++ rmq_delete_nodename + return $OCF_NOT_RUNNING + fi + } +@@ -554,6 +556,7 @@ rmq_stop() { + sleep 1 + done + ++ rmq_delete_nodename + remove_pid + return $OCF_SUCCESS + } diff --git a/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch b/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch new file mode 100644 index 0000000..b39150a --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch @@ -0,0 +1,32 @@ +From 2b6e4a94c847129dd014a1efa733cd1b4a2448e6 Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Fri, 2 Nov 2018 10:11:41 -0400 +Subject: [PATCH] rabbitmq-cluster: debug log detailed output when mnesia query + fails + +--- + heartbeat/rabbitmq-cluster | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 78b2bbadf..fabfeedfb 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -191,7 +191,8 @@ rmq_app_running() { + rmq_monitor() { + local rc + +- if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then ++ status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) ++ if echo "${status}" | grep -q '^{ok'; then + pcs_running=$(rmq_join_list | wc -w) + ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" + rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') +@@ -209,6 +210,7 @@ rmq_monitor() { + return $OCF_SUCCESS + else + ocf_log info "RabbitMQ server could not get cluster status from mnesia" ++ ocf_log debug "${status}" + rmq_delete_nodename + return $OCF_NOT_RUNNING + fi diff --git a/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch b/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch new file mode 100644 index 0000000..8b58191 --- /dev/null +++ b/SOURCES/bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch @@ -0,0 +1,87 @@ +From 5a33171b2c40e2e1587e82aad0cb7e39abcf615d Mon Sep 17 00:00:00 2001 +From: John Eckersberg +Date: Thu, 13 Dec 2018 12:58:43 -0500 +Subject: [PATCH] rabbitmq-cluster: always use quiet flag for eval calls + +On older rabbitmq versions, rabbitmqctl appends "...done." at the end +of the output. However we expect eval without this extra output so it +can be used for further processing. The -q option to rabbitmqctl +suppresses the extra output, so ensure we always pass that when +calling eval. + +Resolves: RHBZ#1659072 +--- + heartbeat/rabbitmq-cluster | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 2dca3e216..e82ac2399 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -39,6 +39,7 @@ RMQ_TRY_RESTART_ERROR_CODE=126 + + RMQ_SERVER=/usr/sbin/rabbitmq-server + RMQ_CTL=/usr/sbin/rabbitmqctl ++RMQ_EVAL="${RMQ_CTL} eval -q" + RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" + RMQ_PID_DIR="/var/run/rabbitmq" + RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid" +@@ -179,7 +180,7 @@ remove_pid () { + } + + rmq_app_running() { +- if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then ++ if $RMQ_EVAL 'application:which_applications().' | grep -q '{rabbit,'; then + ocf_log debug "RabbitMQ application is running" + return $OCF_SUCCESS + else +@@ -190,7 +191,7 @@ rmq_app_running() { + } + + rmq_node_alive() { +- if $RMQ_CTL eval 'ok.'; then ++ if $RMQ_EVAL 'ok.'; then + ocf_log debug "RabbitMQ node is alive" + return $OCF_SUCCESS + else +@@ -203,11 +204,11 @@ rmq_node_alive() { + rmq_monitor() { + local rc + +- status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) ++ status=$($RMQ_EVAL 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1) + if echo "${status}" | grep -q '^{ok'; then + pcs_running=$(rmq_join_list | wc -w) + ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running" +- rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).') ++ rmq_running=$($RMQ_EVAL 'length(mnesia:system_info(running_db_nodes)).') + ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running" + + if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then +@@ -294,7 +295,7 @@ rmq_start_first() + + rmq_is_clustered() + { +- $RMQ_CTL eval 'rabbit_mnesia:is_clustered().' | grep -q true ++ $RMQ_EVAL 'rabbit_mnesia:is_clustered().' | grep -q true + } + + rmq_join_existing() +@@ -432,7 +433,7 @@ rmq_try_start() { + + # Restore users, user permissions, and policies (if any) + BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_CTL eval " ++ $RMQ_EVAL " + %% Run only if Mnesia is ready. + lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso + begin +@@ -497,7 +498,7 @@ rmq_start() { + rmq_stop() { + # Backup users, user permissions, and policies + BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_CTL eval " ++ $RMQ_EVAL " + %% Run only if Mnesia is still available. + lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso + begin diff --git a/SOURCES/bz1748768-docker-fix-stop-issues.patch b/SOURCES/bz1748768-docker-fix-stop-issues.patch new file mode 100644 index 0000000..d4e6f16 --- /dev/null +++ b/SOURCES/bz1748768-docker-fix-stop-issues.patch @@ -0,0 +1,88 @@ +From 5949405d0031a4aba91c81cb28c24821ad2d439a Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 3 Jan 2019 15:05:20 -0800 +Subject: [PATCH] docker: Fix issues with stop operation + +The docker RA's stop operation doesn't behave properly in some cases. + 1. It returns a false success code in case of an error response from + the daemon. + 2. It fails at `remove_container()` if the container does not exist + but another docker object of the same name does exist. + +In case #1, the `container_exists()` function returns the same exit code +(1) if the container is not found (an expected error) or if there is an +error response from the docker daemon (an unexpected error). These types +of errors should be handled differently. + +In case #2, the `docker inspect` calls do not limit their search to +containers. So if a non-container object is found with a matching name, +the RA attempts to remove a container by that name. Such a container may +not exist. + +This patch fixes these issues as follows: + 1. Match an error response in `container_exists()` against the string + "No such container". + 2. Add `--type=container` to the `docker inspect` calls to restrict + the match. +--- + heartbeat/docker | 26 ++++++++++++++++++++++---- + 1 file changed, 22 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/docker b/heartbeat/docker +index f5ba83ff2..c206344ad 100755 +--- a/heartbeat/docker ++++ b/heartbeat/docker +@@ -215,7 +215,7 @@ monitor_cmd_exec() + out=$(docker exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1) + rc=$? + else +- out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) ++ out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --type=container --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1) + rc=$? + fi + +@@ -236,7 +236,25 @@ monitor_cmd_exec() + + container_exists() + { +- docker inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1 ++ local err ++ ++ err=$(docker inspect --type=container $CONTAINER 2>&1 >/dev/null) ++ ++ if [ $? -ne $OCF_SUCCESS ]; then ++ case $err in ++ *"No such container"*) ++ # Return failure instead of exiting if container does not exist ++ return 1 ++ ;; ++ *) ++ # Exit if error running command ++ ocf_exit_reason "$err" ++ exit $OCF_ERR_GENERIC ++ ;; ++ esac ++ fi ++ ++ return $OCF_SUCCESS + } + + remove_container() +@@ -265,7 +283,7 @@ docker_simple_status() + fi + + # retrieve the 'Running' attribute for the container +- val=$(docker inspect --format {{.State.Running}} $CONTAINER 2>/dev/null) ++ val=$(docker inspect --type=container --format {{.State.Running}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then + #not running as a result of container not being found + return $OCF_NOT_RUNNING +@@ -295,7 +313,7 @@ docker_health_status() + # if starting takes longer than monitor timeout then upstream will make this fail. + while + +- val=$(docker inspect --format {{.State.Health.Status}} $CONTAINER 2>/dev/null) ++ val=$(docker inspect --type=container --format {{.State.Health.Status}} $CONTAINER 2>/dev/null) + if [ $? -ne 0 ]; then + #not healthy as a result of container not being found + return $OCF_NOT_RUNNING diff --git a/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch b/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch new file mode 100644 index 0000000..6c39248 --- /dev/null +++ b/SOURCES/bz1750261-Route-1-dont-fence-when-parameters-not-set.patch @@ -0,0 +1,35 @@ +From 1286636b768bb635e9a6b1f1fbf6267c9c3f4b03 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 19 Aug 2019 13:31:06 +0200 +Subject: [PATCH] Route: dont fence node when parameters arent set + +--- + heartbeat/Route | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index b4011e37d..9f92eff3a 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -249,18 +249,18 @@ route_validate() { + if [ "${OCF_RESKEY_CRM_meta_clone}" ]; then + if [ "${OCF_RESKEY_CRM_meta_clone_node_max}" != 1 ]; then + ocf_exit_reason "Misconfigured clone parameters. Must set meta attribute \"clone_node_max\" to 1, got ${OCF_RESKEY_CRM_meta_clone_node_max}." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + fi + # Did we get a destination? + if [ -z "${OCF_RESKEY_destination}" ]; then + ocf_exit_reason "Missing required parameter \"destination\"." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + # Did we get either a device or a gateway address? + if [ -z "${OCF_RESKEY_device}" -a -z "${OCF_RESKEY_gateway}" ]; then + ocf_exit_reason "Must specify either \"device\", or \"gateway\", or both." +- return $OCF_ERR_ARGS ++ return $OCF_ERR_CONFIGURED + fi + # If a device has been configured, is it available on this system? + if [ -n "${OCF_RESKEY_device}" ]; then diff --git a/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch b/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch new file mode 100644 index 0000000..e2d012e --- /dev/null +++ b/SOURCES/bz1750261-Route-2-validate-start-validate-all.patch @@ -0,0 +1,40 @@ +From 444bdc44fc47c65f848efc0c39c1e8e6620ce10d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 11 Oct 2019 12:12:52 +0200 +Subject: [PATCH] Route: only validate for start and validate-all actions + +--- + heartbeat/Route | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Route b/heartbeat/Route +index 9f92eff3a..8898e1afd 100755 +--- a/heartbeat/Route ++++ b/heartbeat/Route +@@ -187,6 +187,8 @@ END + } + + route_start() { ++ route_validate || exit $? ++ + route_status + status=$? + if [ $status -eq $OCF_SUCCESS ]; then +@@ -313,8 +315,6 @@ for binary in ip grep; do + check_binary $binary + done + +-route_validate || exit $? +- + case $OCF_RESKEY_family in + ip4) addr_family="-4" ;; + ip6) addr_family="-6" ;; +@@ -334,7 +334,7 @@ status|monitor) route_status;; + reload) ocf_log info "Reloading..." + route_start + ;; +-validate-all) ;; ++validate-all) route_validate;; + *) route_usage + exit $OCF_ERR_UNIMPLEMENTED + ;; diff --git a/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch b/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch new file mode 100644 index 0000000..1399a58 --- /dev/null +++ b/SOURCES/bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch @@ -0,0 +1,148 @@ +From c0b6356bbf5b9a1fb76b011486dfce258d395ef8 Mon Sep 17 00:00:00 2001 +From: Peter Lemenkov +Date: Fri, 6 Sep 2019 14:22:46 +0200 +Subject: [PATCH] Restore users/perms/policies even if starting in a single + node mode + +See https://bugzilla.redhat.com/1744467#c1 + +Signed-off-by: Peter Lemenkov +--- + heartbeat/rabbitmq-cluster | 109 ++++++++++++++++++++----------------- + 1 file changed, 58 insertions(+), 51 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index cf8ca21a6..7837e9e3c 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -114,6 +114,62 @@ rmq_wipe_data() + rm -rf $RMQ_DATA_DIR > /dev/null 2>&1 + } + ++rmq_restore_users_perms_policies() ++{ ++ # Restore users, user permissions, and policies (if any) ++ BaseDataDir=`dirname $RMQ_DATA_DIR` ++ $RMQ_EVAL " ++ %% Run only if Mnesia is ready. ++ lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso ++ begin ++ Restore = fun(Table, PostprocessFun, Filename) -> ++ case file:consult(Filename) of ++ {error, _} -> ++ ok; ++ {ok, [Result]} -> ++ lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result), ++ file:delete(Filename) ++ end ++ end, ++ ++ %% Restore users ++ ++ Upgrade = fun ++ ({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5}; ++ ({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D} ++ end, ++ ++ Downgrade = fun ++ ({internal_user, A, B, C}) -> {internal_user, A, B, C}; ++ ({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C}; ++ %% Incompatible scheme, so we will loose user's password ('B' value) during conversion. ++ %% Unfortunately, this case will require manual intervention - user have to run: ++ %% rabbitmqctl change_password ++ ({internal_user, A, B, C, _}) -> {internal_user, A, B, C} ++ end, ++ ++ %% Check db scheme first ++ [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), ++ case WildPattern of ++ %% Version < 3.6.0 ++ {internal_user,'_','_','_'} -> ++ Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\"); ++ %% Version >= 3.6.0 ++ {internal_user,'_','_','_','_'} -> ++ Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\") ++ end, ++ ++ NoOp = fun(X) -> X end, ++ ++ %% Restore user permissions ++ Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"), ++ ++ %% Restore policies ++ Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\") ++ end. ++ " ++} ++ + rmq_local_node() + { + +@@ -411,6 +467,7 @@ rmq_try_start() { + if [ -z "$join_list" ]; then + rmq_start_first + rc=$? ++ rmq_restore_users_perms_policies + return $rc + fi + +@@ -437,58 +494,8 @@ rmq_try_start() { + return $RMQ_TRY_RESTART_ERROR_CODE + fi + +- # Restore users, user permissions, and policies (if any) +- BaseDataDir=`dirname $RMQ_DATA_DIR` +- $RMQ_EVAL " +- %% Run only if Mnesia is ready. +- lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso +- begin +- Restore = fun(Table, PostprocessFun, Filename) -> +- case file:consult(Filename) of +- {error, _} -> +- ok; +- {ok, [Result]} -> +- lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result), +- file:delete(Filename) +- end +- end, ++ rmq_restore_users_perms_policies + +- %% Restore users +- +- Upgrade = fun +- ({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5}; +- ({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D} +- end, +- +- Downgrade = fun +- ({internal_user, A, B, C}) -> {internal_user, A, B, C}; +- ({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C}; +- %% Incompatible scheme, so we will loose user's password ('B' value) during conversion. +- %% Unfortunately, this case will require manual intervention - user have to run: +- %% rabbitmqctl change_password +- ({internal_user, A, B, C, _}) -> {internal_user, A, B, C} +- end, +- +- %% Check db scheme first +- [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), +- case WildPattern of +- %% Version < 3.6.0 +- {internal_user,'_','_','_'} -> +- Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\"); +- %% Version >= 3.6.0 +- {internal_user,'_','_','_','_'} -> +- Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\") +- end, +- +- NoOp = fun(X) -> X end, +- +- %% Restore user permissions +- Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"), +- +- %% Restore policies +- Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\") +- end. +- " + return $OCF_SUCCESS + } + diff --git a/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch b/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch new file mode 100644 index 0000000..74d3628 --- /dev/null +++ b/SOURCES/bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch @@ -0,0 +1,47 @@ +From 8ecfa95fff384ed047fd804016abdbbdcdd96d27 Mon Sep 17 00:00:00 2001 +From: Keisuke MORI +Date: Wed, 11 Sep 2019 15:33:37 +0900 +Subject: [PATCH] Low: IPaddr2: fix to work properly with unsanitized IPv6 + addresses + +`ip route get` shows the sanitized address at $1 or $2 depending on +the address is already assigned to the node or not. +``` +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:101::0001 +2001:db8:101::1 dev eth1 proto ra src 2001:db8:101:0:XXXX:XXXX:XXXX:XXXX metric 100 +[root@centos73-1 ~]# /sbin/ip addr add 2001:db8:101::0001/64 dev eth1 +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:101::0001 +local 2001:db8:101::1 dev lo table local proto none src 2001:db8:101::1 metric 0 +``` + +It can not be sanitized if the address is unreachable and on the recent distributions +(probably depending on the iproute package version) +``` +[root@centos73-1 ~]# /sbin/ip route get 2001:db8:201::0001 +unreachable 2001:db8:201::1 dev lo table unspec proto kernel src 2001:db8:101:0:XXXX:XXXX:XXXX:XXXX metric 429496 +``` +``` +[root@rhel80-1 ~]# /sbin/ip route get 200:db8:201::0001 +RTNETLINK answers: Network is unreachable +``` +--- + heartbeat/IPaddr2 | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 041ace3a2..4f28ddab6 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -477,6 +477,12 @@ ip_init() { + fi + else + FAMILY=inet6 ++ # address sanitization defined in RFC5952 ++ SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip | awk '$1~/:/ {print $1} $2~/:/ {print $2}') ++ if [ -n "$SANITIZED_IP" ]; then ++ OCF_RESKEY_ip="$SANITIZED_IP" ++ fi ++ + if ocf_is_true $OCF_RESKEY_lvs_support ;then + ocf_exit_reason "The IPv6 does not support lvs_support" + exit $OCF_ERR_CONFIGURED diff --git a/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch b/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch new file mode 100644 index 0000000..32f5e06 --- /dev/null +++ b/SOURCES/bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch @@ -0,0 +1,22 @@ +From 7eff4e17641cc1463e61d772af16d17264477523 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 12 Sep 2019 12:51:54 +0200 +Subject: [PATCH] IPaddr2: IPv6 return empty string when sanitation fails + +--- + heartbeat/IPaddr2 | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 4f28ddab6..1d39ae514 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -478,7 +478,7 @@ ip_init() { + else + FAMILY=inet6 + # address sanitization defined in RFC5952 +- SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip | awk '$1~/:/ {print $1} $2~/:/ {print $2}') ++ SANITIZED_IP=$($IP2UTIL route get $OCF_RESKEY_ip 2> /dev/null | awk '$1~/:/ {print $1} $2~/:/ {print $2}') + if [ -n "$SANITIZED_IP" ]; then + OCF_RESKEY_ip="$SANITIZED_IP" + fi diff --git a/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch b/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch new file mode 100644 index 0000000..e577a2e --- /dev/null +++ b/SOURCES/bz1751962-nfsserver-1-systemd-perf-improvements.patch @@ -0,0 +1,77 @@ +From 530c48138f7dedaf99ae1ca98865d2f8b7432475 Mon Sep 17 00:00:00 2001 +From: Eberhard Kuemmerle +Date: Thu, 12 Sep 2019 21:10:43 +0200 +Subject: [PATCH] nfsserver: performance improvements for systemd enabled + systems + +> I found two critical actions in the script: +> - systemctl status nfs-server (which also calls journalctl) +> - systemctl list-unit-files + +source: +https://lists.clusterlabs.org/pipermail/developers/2019-September/002214.html +--- + heartbeat/nfsserver | 37 +++++++++++++++++++------------------ + 1 file changed, 19 insertions(+), 18 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index bf59da98e..8527a90f3 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -233,24 +233,25 @@ set_exec_mode() + return 0 + fi + +- ## +- # Attempt systemd (with nfs-lock.service). +- ## + if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files | grep nfs-server > /dev/null && systemctl list-unit-files | grep nfs-lock > /dev/null; then +- EXEC_MODE=2 +- # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. +- return 0 +- fi +- fi ++ if systemctl list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then ++ ++ ## ++ # Attempt systemd (with nfs-lock.service). ++ ## ++ if systemctl list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then ++ EXEC_MODE=2 ++ # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. ++ return 0 ++ fi + +- ## +- # Attempt systemd (with rpc-statd.service). +- ## +- if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files | grep nfs-server > /dev/null && systemctl list-unit-files | grep rpc-statd > /dev/null; then +- EXEC_MODE=3 +- return 0 ++ ## ++ # Attempt systemd (with rpc-statd.service). ++ ## ++ if systemctl list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then ++ EXEC_MODE=3 ++ return 0 ++ fi + fi + fi + +@@ -272,12 +273,12 @@ nfs_exec() + 2) if ! echo $svc | grep -q "\."; then + svc="${svc}.service" + fi +- systemctl $cmd $svc ++ systemctl -n0 $cmd $svc + ;; + 3) if ! echo $svc | grep -q "\."; then + svc="${svc}.service" + fi +- systemctl $cmd $svc ++ systemctl -n0 $cmd $svc + ;; + esac + } diff --git a/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch b/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch new file mode 100644 index 0000000..bbdc807 --- /dev/null +++ b/SOURCES/bz1751962-nfsserver-2-systemd-use-no-legend.patch @@ -0,0 +1,38 @@ +From ca9d2f9c2d23a9dc783e0d52419790d0d441232c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 24 Sep 2019 09:12:47 +0200 +Subject: [PATCH] nfsserver: use "--no-legend" for systemctl "list-unit-files" + calls + +--- + heartbeat/nfsserver | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 8527a90f3..acef0147a 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -234,12 +234,12 @@ set_exec_mode() + fi + + if which systemctl > /dev/null 2>&1; then +- if systemctl list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then ++ if systemctl --no-legend list-unit-files 'nfs-*' | grep nfs-server > /dev/null; then + + ## + # Attempt systemd (with nfs-lock.service). + ## +- if systemctl list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then ++ if systemctl --no-legend list-unit-files 'nfs-*' | grep nfs-lock > /dev/null; then + EXEC_MODE=2 + # when using systemd, the nfs-lock service file handles nfsv3 locking daemons for us. + return 0 +@@ -248,7 +248,7 @@ set_exec_mode() + ## + # Attempt systemd (with rpc-statd.service). + ## +- if systemctl list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then ++ if systemctl --no-legend list-unit-files 'rpc-*' | grep rpc-statd > /dev/null; then + EXEC_MODE=3 + return 0 + fi diff --git a/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch b/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch new file mode 100644 index 0000000..9b429d7 --- /dev/null +++ b/SOURCES/bz1755760-NovaEvacuate-evacuate_delay.patch @@ -0,0 +1,50 @@ +From 8b9c49fd965f73709d5a6e2c21987ba26af4856b Mon Sep 17 00:00:00 2001 +From: Luca Miccini +Date: Wed, 25 Sep 2019 17:12:39 +0200 +Subject: [PATCH] Add a configurable delay to Nova Evacuate calls + +In case /var/lib/nova/instances resides on NFS we have seen migrations +failing with 'Failed to get "write" lock - Is another process using the +image' errors. + +This has been tracked down to grace/lease timeouts not having expired +before attempting the migration/evacuate, so in this cases it might be +desirable to delay the nova evacuate call to give the storage time to +release the locks. + +Change-Id: Ie2fe784202d754eda38092479b1ab3ff4d02136a +Resolves: rhbz#1740069 +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 810f30a..596f520 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -125,6 +125,15 @@ + + + ++ ++ ++Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean ++up eventual locks/leases. ++ ++Nova evacuate delay ++ ++ ++ + + + +@@ -216,6 +225,11 @@ + fence_agent="fence_evacuate" + fi + ++ if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then ++ ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds" ++ sleep ${OCF_RESKEY_evacuate_delay} ++ fi ++ + ocf_log notice "Initiating evacuation of $node with $fence_agent" + $fence_agent ${fence_options} -o status -n ${node} + if [ $? = 1 ]; then diff --git a/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch b/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch new file mode 100644 index 0000000..ed60aca --- /dev/null +++ b/SOURCES/bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch @@ -0,0 +1,75 @@ +From 6052e8fd37d23f46db217f915b445c7e67dccb34 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 4 Apr 2019 13:31:27 +0200 +Subject: [PATCH] IPsrcaddr: make proto optional to fix regression when used + without NetworkManager + +--- + heartbeat/IPsrcaddr | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr +index 4ca3d2364..5a447196e 100755 +--- a/heartbeat/IPsrcaddr ++++ b/heartbeat/IPsrcaddr +@@ -50,12 +50,17 @@ + + ####################################################################### + # Initialization: +- + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Defaults ++OCF_RESKEY_proto_default="" ++ ++: ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}} + ####################################################################### + ++[ -z "$OCF_RESKEY_proto" ] && PROTO="" || PROTO="proto $OCF_RESKEY_proto" ++ + USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + + CMDSHOW="$IP2UTIL route show to exact 0.0.0.0/0" +@@ -97,6 +102,14 @@ dotted quad notation 255.255.255.0). + Netmask + + ++ ++ ++ ++Proto to match when finding network. E.g. "kernel". ++ ++Proto ++ ++ + + + +@@ -172,7 +185,7 @@ srca_start() { + rc=$OCF_SUCCESS + ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" + else +- ip route replace $NETWORK dev $INTERFACE src $1 || \ ++ $IP2UTIL route replace $NETWORK dev $INTERFACE src $1 || \ + errorexit "command 'ip route replace $NETWORK dev $INTERFACE src $1' failed" + + $CMDCHANGE $ROUTE_WO_SRC src $1 || \ +@@ -204,7 +217,7 @@ srca_stop() { + + [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" + +- ip route replace $NETWORK dev $INTERFACE || \ ++ $IP2UTIL route replace $NETWORK dev $INTERFACE || \ + errorexit "command 'ip route replace $NETWORK dev $INTERFACE' failed" + + $CMDCHANGE $ROUTE_WO_SRC || \ +@@ -473,7 +486,7 @@ rc=$? + } + + INTERFACE=`echo $findif_out | awk '{print $1}'` +-NETWORK=`ip route list dev $INTERFACE scope link proto kernel match $ipaddress|grep -o '^[^ ]*'` ++NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` + + case $1 in + start) srca_start $ipaddress diff --git a/SOURCES/bz1759115-aws-vpc-route53-1-update.patch b/SOURCES/bz1759115-aws-vpc-route53-1-update.patch new file mode 100644 index 0000000..9c689b1 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-1-update.patch @@ -0,0 +1,273 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/aws-vpc-route53.in 2018-06-29 14:05:02.000000000 +0200 ++++ /home/oalbrigt/src/resource-agents/heartbeat/aws-vpc-route53.in 2019-11-07 12:24:18.822111495 +0100 +@@ -152,9 +152,15 @@ + END + } + +-ec2ip_validate() { ++r53_validate() { + ocf_log debug "function: validate" + ++ # Check for required binaries ++ ocf_log debug "Checking for required binaries" ++ for command in curl dig; do ++ check_binary "$command" ++ done ++ + # Full name + [[ -z "$OCF_RESKEY_fullname" ]] && ocf_log error "Full name parameter not set $OCF_RESKEY_fullname!" && exit $OCF_ERR_CONFIGURED + +@@ -175,32 +181,111 @@ + ocf_log debug "ok" + + if [ -n "$OCF_RESKEY_profile" ]; then +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile" ++ AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + else +- AWS_PROFILE_OPT="--profile default" ++ AWS_PROFILE_OPT="--profile default --cli-connect-timeout 10" + fi + + return $OCF_SUCCESS + } + +-ec2ip_monitor() { +- ec2ip_validate ++r53_monitor() { ++ # ++ # For every start action the agent will call Route53 API to check for DNS record ++ # otherwise it will try to get results directly bu querying the DNS using "dig". ++ # Due to complexity in some DNS architectures "dig" can fail, and if this happens ++ # the monitor will fallback to the Route53 API call. ++ # ++ # There will be no failure, failover or restart of the agent if the monitor operation fails ++ # hence we only return $OCF_SUCESS in this function ++ # ++ # In case of the monitor operation detects a wrong or non-existent Route53 DNS entry ++ # it will try to fix the existing one, or create it again ++ # ++ # ++ ARECORD="" ++ IPREGEX="^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$" ++ r53_validate + ocf_log debug "Checking Route53 record sets" +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )" +- ocf_log debug "Found IP address: $ARECORD ." +- if [ "${ARECORD}" == "${IPADDRESS}" ]; then +- ocf_log debug "ARECORD $ARECORD found" ++ # ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ # ++ if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then ++ # ++ cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ ocf_log info "Route53 Agent Starting or probing - executing monitoring API call: $cmd" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli returned code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Skipping cluster action due to API call error" ++ return $OCF_ERR_GENERIC ++ fi ++ ARECORD=$(echo $CLIRES | grep RESOURCERECORDS | awk '{ print $5 }') ++ # ++ if ocf_is_probe; then ++ # ++ # Prevent R53 record change during probe ++ # ++ if [[ $ARECORD =~ $IPREGEX ]] && [ "$ARECORD" != "$IPADDRESS" ]; then ++ ocf_log debug "Route53 DNS record $ARECORD found at probing, disregarding" ++ return $OCF_NOT_RUNNING ++ fi ++ fi ++ else ++ # ++ cmd="dig +retries=3 +time=5 +short $OCF_RESKEY_fullname 2>/dev/null" ++ ocf_log info "executing monitoring command : $cmd" ++ ARECORD="$($cmd)" ++ rc=$? ++ ocf_log debug "dig return code: $rc" ++ # ++ if [[ ! $ARECORD =~ $IPREGEX ]] || [ $rc -ne 0 ]; then ++ ocf_log info "Fallback to Route53 API query due to DNS resolution failure" ++ cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ ocf_log debug "executing monitoring API call: $cmd" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli return code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Monitor skipping cluster action due to API call error" ++ return $OCF_SUCCESS ++ fi ++ ARECORD=$(echo $CLIRES | grep RESOURCERECORDS | awk '{ print $5 }') ++ fi ++ # ++ fi ++ ocf_log info "Route53 DNS record pointing $OCF_RESKEY_fullname to IP address $ARECORD" ++ # ++ if [ "$ARECORD" == "$IPADDRESS" ]; then ++ ocf_log info "Route53 DNS record $ARECORD found" ++ return $OCF_SUCCESS ++ elif [[ $ARECORD =~ $IPREGEX ]] && [ "$ARECORD" != "$IPADDRESS" ]; then ++ ocf_log info "Route53 DNS record points to a different host, setting DNS record on Route53 to this host" ++ _update_record "UPSERT" "$IPADDRESS" + return $OCF_SUCCESS + else +- ocf_log debug "No ARECORD found" +- return $OCF_NOT_RUNNING ++ ocf_log info "No Route53 DNS record found, setting DNS record on Route53 to this host" ++ _update_record "UPSERT" "$IPADDRESS" ++ return $OCF_SUCCESS + fi + + return $OCF_SUCCESS + } + + _update_record() { ++ # ++ # This function is the one that will actually execute Route53's API call ++ # and configure the DNS record using the correct API calls and parameters ++ # ++ # It creates a temporary JSON file under /tmp with the required API payload ++ # ++ # Failures in this function are critical and will cause the agent to fail ++ # + update_action="$1" + IPADDRESS="$2" + ocf_log info "Updating Route53 $OCF_RESKEY_hostedzoneid with $IPADDRESS for $OCF_RESKEY_fullname" +@@ -209,19 +294,19 @@ + ocf_exit_reason "Failed to create temporary file for record update" + exit $OCF_ERR_GENERIC + fi +- cat >>"${ROUTE53RECORD}" <<-EOF ++ cat >>"$ROUTE53RECORD" <<-EOF + { + "Comment": "Update record to reflect new IP address for a system ", + "Changes": [ + { +- "Action": "${update_action}", ++ "Action": "$update_action", + "ResourceRecordSet": { +- "Name": "${OCF_RESKEY_fullname}", ++ "Name": "$OCF_RESKEY_fullname", + "Type": "A", +- "TTL": ${OCF_RESKEY_ttl}, ++ "TTL": $OCF_RESKEY_ttl, + "ResourceRecords": [ + { +- "Value": "${IPADDRESS}" ++ "Value": "$IPADDRESS" + } + ] + } +@@ -229,46 +314,53 @@ + ] + } + EOF +- cmd="aws --profile ${OCF_RESKEY_profile} route53 change-resource-record-sets --hosted-zone-id ${OCF_RESKEY_hostedzoneid} \ +- --change-batch file://${ROUTE53RECORD} " ++ cmd="aws --profile $OCF_RESKEY_profile route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " + ocf_log debug "Executing command: $cmd" +- CHANGEID=$($cmd | grep CHANGEINFO | /usr/bin/awk -F'\t' '{ print $3 }' ) +- ocf_log debug "Change id: ${CHANGEID}" +- rmtempfile ${ROUTE53RECORD} +- CHANGEID=$(echo $CHANGEID |cut -d'/' -f 3 |cut -d'"' -f 1 ) +- ocf_log debug "Change id: ${CHANGEID}" ++ CLIRES="$($cmd 2>&1)" ++ rc=$? ++ ocf_log debug "awscli returned code: $rc" ++ if [ $rc -ne 0 ]; then ++ CLIRES=$(echo $CLIRES | grep -v '^$') ++ ocf_log warn "Route53 API returned an error: $CLIRES" ++ ocf_log warn "Skipping cluster action due to API call error" ++ return $OCF_ERR_GENERIC ++ fi ++ CHANGEID=$(echo $CLIRES | awk '{ print $12 }') ++ ocf_log debug "Change id: $CHANGEID" ++ rmtempfile $ROUTE53RECORD ++ CHANGEID=$(echo $CHANGEID | cut -d'/' -f 3 | cut -d'"' -f 1 ) ++ ocf_log debug "Change id: $CHANGEID" + STATUS="PENDING" +- MYSECONDS=2 ++ MYSECONDS=20 + while [ "$STATUS" = 'PENDING' ]; do +- sleep ${MYSECONDS} +- STATUS="$(aws --profile ${OCF_RESKEY_profile} route53 get-change --id $CHANGEID | grep CHANGEINFO | /usr/bin/awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" +- ocf_log debug "Waited for ${MYSECONDS} seconds and checked execution of Route 53 update status: ${STATUS} " ++ sleep $MYSECONDS ++ STATUS="$(aws --profile $OCF_RESKEY_profile route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" ++ ocf_log debug "Waited for $MYSECONDS seconds and checked execution of Route 53 update status: $STATUS " + done + } + +-ec2ip_stop() { +- ocf_log info "Bringing down Route53 agent. (Will remove ARECORD)" +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )" +- ocf_log debug "Found IP address: $ARECORD ." +- if [ ${ARECORD} != ${IPADDRESS} ]; then +- ocf_log debug "No ARECORD found" +- return $OCF_SUCCESS +- else +- # determine IP address +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- # Patch file +- ocf_log debug "Deleting IP address to ${IPADDRESS}" +- return $OCF_SUCCESS +- fi +- +- _update_record "DELETE" "$IPADDRESS" ++r53_stop() { ++ # ++ # Stop operation doesn't perform any API call or try to remove the DNS record ++ # this mostly because this is not necessarily mandatory or desired ++ # the start and monitor functions will take care of changing the DNS record ++ # if the agent starts in a different cluster node ++ # ++ ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" + return $OCF_SUCCESS + } + +-ec2ip_start() { +- IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')" +- _update_record "UPSERT" "$IPADDRESS" ++r53_start() { ++ # ++ # Start agent and config DNS in Route53 ++ # ++ ocf_log info "Starting Route53 DNS update...." ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ r53_monitor ++ if [ $? != $OCF_SUCCESS ]; then ++ ocf_log info "Could not start agent - check configurations" ++ return $OCF_ERR_GENERIC ++ fi + return $OCF_SUCCESS + } + +@@ -284,16 +376,16 @@ + exit $OCF_SUCCESS + ;; + monitor) +- ec2ip_monitor ++ r53_monitor + ;; + stop) +- ec2ip_stop ++ r53_stop + ;; + validate-all) +- ec2ip_validate ++ r53_validate + ;; + start) +- ec2ip_start ++ r53_start + ;; + *) + usage diff --git a/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch b/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch new file mode 100644 index 0000000..afb8bb6 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch @@ -0,0 +1,220 @@ +From 9b77d06bfe3308692946b8ac08bc7ec3399a762b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 2 Apr 2020 13:38:30 +0200 +Subject: [PATCH 1/2] aws-vpc-route53: cleanup and improvements + +--- + heartbeat/aws-vpc-route53.in | 73 ++++++++++++++++++++---------------- + 1 file changed, 41 insertions(+), 32 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index b276dfb3c..1cfc2b01f 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -43,8 +43,14 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++OCF_RESKEY_hostedzoneid_default="" ++OCF_RESKEY_fullname_default="" ++OCF_RESKEY_ip_default="local" + OCF_RESKEY_ttl_default=10 + ++: ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}} ++: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} ++: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} + : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} + + ####################################################################### +@@ -104,7 +110,7 @@ Hosted zone ID of Route 53. This is the table of + the Route 53 record. + + AWS hosted zone ID +- ++ + + + +@@ -113,7 +119,7 @@ Example: service.cloud.example.corp. + Note: The trailing dot is important to Route53! + + Full service name +- ++ + + + +@@ -189,6 +195,31 @@ r53_validate() { + return $OCF_SUCCESS + } + ++r53_start() { ++ # ++ # Start agent and config DNS in Route53 ++ # ++ ocf_log info "Starting Route53 DNS update...." ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ r53_monitor ++ if [ $? != $OCF_SUCCESS ]; then ++ ocf_log info "Could not start agent - check configurations" ++ return $OCF_ERR_GENERIC ++ fi ++ return $OCF_SUCCESS ++} ++ ++r53_stop() { ++ # ++ # Stop operation doesn't perform any API call or try to remove the DNS record ++ # this mostly because this is not necessarily mandatory or desired ++ # the start and monitor functions will take care of changing the DNS record ++ # if the agent starts in a different cluster node ++ # ++ ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" ++ return $OCF_SUCCESS ++} ++ + r53_monitor() { + # + # For every start action the agent will call Route53 API to check for DNS record +@@ -339,31 +370,6 @@ _update_record() { + done + } + +-r53_stop() { +- # +- # Stop operation doesn't perform any API call or try to remove the DNS record +- # this mostly because this is not necessarily mandatory or desired +- # the start and monitor functions will take care of changing the DNS record +- # if the agent starts in a different cluster node +- # +- ocf_log info "Bringing down Route53 agent. (Will NOT remove Route53 DNS record)" +- return $OCF_SUCCESS +-} +- +-r53_start() { +- # +- # Start agent and config DNS in Route53 +- # +- ocf_log info "Starting Route53 DNS update...." +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" +- r53_monitor +- if [ $? != $OCF_SUCCESS ]; then +- ocf_log info "Could not start agent - check configurations" +- return $OCF_ERR_GENERIC +- fi +- return $OCF_SUCCESS +-} +- + ############################################################################### + + case $__OCF_ACTION in +@@ -375,20 +381,23 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; +- monitor) +- r53_monitor ++ start) ++ r53_validate || exit $? ++ r53_start + ;; + stop) + r53_stop + ;; ++ monitor) ++ r53_monitor ++ ;; + validate-all) + r53_validate + ;; +- start) +- r53_start +- ;; + *) + usage + exit $OCF_ERR_UNIMPLEMENTED + ;; + esac ++ ++exit $? + +From 745c6b9b3e331ed3705a641f1ec03a2604de3a1d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 2 Apr 2020 13:40:33 +0200 +Subject: [PATCH 2/2] aws-vpc-route53: add support for public and secondary + private IPs + +--- + heartbeat/aws-vpc-route53.in | 31 +++++++++++++++++++++++++++++-- + 1 file changed, 29 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index 1cfc2b01f..ca6556951 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -121,6 +121,15 @@ Note: The trailing dot is important to Route53! + Full service name + + ++ ++ ++IP (local (default), public or secondary private IP address (e.g. 10.0.0.1). ++ ++A secondary private IP can be setup with the awsvip agent. ++ ++Type of IP or secondary private IP address (local, public or e.g. 10.0.0.1) ++ ++ + + + Time to live for Route53 ARECORD +@@ -173,6 +182,15 @@ r53_validate() { + # Hosted Zone ID + [[ -z "$OCF_RESKEY_hostedzoneid" ]] && ocf_log error "Hosted Zone ID parameter not set $OCF_RESKEY_hostedzoneid!" && exit $OCF_ERR_CONFIGURED + ++ # Type of IP/secondary IP address ++ case $OCF_RESKEY_ip in ++ local|public|*.*.*.*) ++ ;; ++ *) ++ ocf_exit_reason "Invalid value for ip: ${OCF_RESKEY_ip}" ++ exit $OCF_ERR_CONFIGURED ++ esac ++ + # profile + [[ -z "$OCF_RESKEY_profile" ]] && ocf_log error "AWS CLI profile not set $OCF_RESKEY_profile!" && exit $OCF_ERR_CONFIGURED + +@@ -200,7 +218,7 @@ r53_start() { + # Start agent and config DNS in Route53 + # + ocf_log info "Starting Route53 DNS update...." +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ _get_ip + r53_monitor + if [ $? != $OCF_SUCCESS ]; then + ocf_log info "Could not start agent - check configurations" +@@ -239,7 +257,7 @@ r53_monitor() { + r53_validate + ocf_log debug "Checking Route53 record sets" + # +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)" ++ _get_ip + # + if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then + # +@@ -308,6 +326,15 @@ r53_monitor() { + return $OCF_SUCCESS + } + ++_get_ip() { ++ case $OCF_RESKEY_ip in ++ local|public) ++ IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4)";; ++ *.*.*.*) ++ IPADDRESS="${OCF_RESKEY_ip}";; ++ esac ++} ++ + _update_record() { + # + # This function is the one that will actually execute Route53's API call diff --git a/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch b/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch new file mode 100644 index 0000000..07a02c3 --- /dev/null +++ b/SOURCES/bz1759115-aws-vpc-route53-3-awscli-property.patch @@ -0,0 +1,302 @@ +From 01d3e07ec6c5240633633cb56d1bc915190f40a5 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Fri, 24 Apr 2020 18:19:19 -0400 +Subject: [PATCH 1/4] Replace aws command line with OCF_RESKEY_awscli property. + +--- + heartbeat/aws-vpc-move-ip | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 26ca6007d..af697adbe 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -159,14 +159,14 @@ END + execute_cmd_as_role(){ + cmd=$1 + role=$2 +- output="$(aws sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile --output=text)" ++ output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile --output=text)" + export AWS_ACCESS_KEY_ID="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $5}')" + export AWS_SECRET_ACCESS_KEY="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $7}')" + export AWS_SESSION_TOKEN="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $8}')" + + #Execute command + ocf_log debug "Assumed Role ${role}" +- ocf_log debug "$(aws sts get-caller-identity)" ++ ocf_log debug "$($OCF_RESKEY_awscli sts get-caller-identity)" + ocf_log debug "executing command: $cmd" + response="$($cmd)" + unset output AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN +@@ -181,7 +181,7 @@ ec2ip_set_address_param_compat(){ + } + + ec2ip_validate() { +- for cmd in aws ip curl; do ++ for cmd in $OCF_RESKEY_awscli ip curl; do + check_binary "$cmd" + done + + +From 20466ba91c21a489303774ac9a1f5f5fd7b86f12 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Fri, 24 Apr 2020 18:20:17 -0400 +Subject: [PATCH 2/4] - Replace aws command line with OCF_RESKEY_awscli + property. - Add OCF_RESKEY_awscli and OCF_RESKEY_profile default variables. - + Add awscli (Path to AWS CLI tools) parameter. - Remove required attribute on + profile parameter. - Replace --profile $OCF_RESKEY_profile with + AWS_PROFILE_OPT. + +--- + heartbeat/aws-vpc-route53.in | 71 ++++++++++++++++++++++-------------- + 1 file changed, 43 insertions(+), 28 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index ca6556951..3042b345b 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -43,11 +43,16 @@ + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + ++# Defaults ++OCF_RESKEY_awscli_default="/usr/bin/aws" ++OCF_RESKEY_profile_default="default" + OCF_RESKEY_hostedzoneid_default="" + OCF_RESKEY_fullname_default="" + OCF_RESKEY_ip_default="local" + OCF_RESKEY_ttl_default=10 + ++: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} ++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} + : ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}} + : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} + : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} +@@ -103,7 +108,35 @@ primitive res_route53 ocf:heartbeat:aws-vpc-route53 \ + meta target-role=Started + + Update Route53 VPC record for AWS EC2 ++ + ++ ++ ++Path to command line tools for AWS ++ ++Path to AWS CLI tools ++ ++ ++ ++ ++ ++The name of the AWS CLI profile of the root account. This ++profile will have to use the "text" format for CLI output. ++The file /root/.aws/config should have an entry which looks ++like: ++ ++ [profile cluster] ++ region = us-east-1 ++ output = text ++ ++"cluster" is the name which has to be used in the cluster ++configuration. The region has to be the current one. The ++output has to be "text". ++ ++AWS Profile Name ++ ++ ++ + + + Hosted zone ID of Route 53. This is the table of +@@ -112,6 +145,7 @@ the Route 53 record. + AWS hosted zone ID + + ++ + + + The full name of the service which will host the IP address. +@@ -121,6 +155,7 @@ Note: The trailing dot is important to Route53! + Full service name + + ++ + + + IP (local (default), public or secondary private IP address (e.g. 10.0.0.1). +@@ -130,6 +165,7 @@ A secondary private IP can be setup with the awsvip agent. + Type of IP or secondary private IP address (local, public or e.g. 10.0.0.1) + + ++ + + + Time to live for Route53 ARECORD +@@ -137,25 +173,8 @@ Time to live for Route53 ARECORD + ARECORD TTL + + +- +- +-The name of the AWS CLI profile of the root account. This +-profile will have to use the "text" format for CLI output. +-The file /root/.aws/config should have an entry which looks +-like: +- +- [profile cluster] +- region = us-east-1 +- output = text +- +-"cluster" is the name which has to be used in the cluster +-configuration. The region has to be the current one. The +-output has to be "text". +- +-AWS Profile Name +- +- + ++ + + + +@@ -198,17 +217,13 @@ r53_validate() { + [[ -z "$OCF_RESKEY_ttl" ]] && ocf_log error "TTL not set $OCF_RESKEY_ttl!" && exit $OCF_ERR_CONFIGURED + + ocf_log debug "Testing aws command" +- aws --version 2>&1 ++ $OCF_RESKEY_awscli --version 2>&1 + if [ "$?" -gt 0 ]; then + ocf_log error "Error while executing aws command as user root! Please check if AWS CLI tools (Python flavor) are properly installed and configured." && exit $OCF_ERR_INSTALLED + fi + ocf_log debug "ok" + +- if [ -n "$OCF_RESKEY_profile" ]; then +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- else +- AWS_PROFILE_OPT="--profile default --cli-connect-timeout 10" +- fi ++ AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + + return $OCF_SUCCESS + } +@@ -261,7 +276,7 @@ r53_monitor() { + # + if [ "$__OCF_ACTION" = "start" ] || ocf_is_probe ; then + # +- cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" + ocf_log info "Route53 Agent Starting or probing - executing monitoring API call: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -293,7 +308,7 @@ r53_monitor() { + # + if [[ ! $ARECORD =~ $IPREGEX ]] || [ $rc -ne 0 ]; then + ocf_log info "Fallback to Route53 API query due to DNS resolution failure" +- cmd="aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" + ocf_log debug "executing monitoring API call: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -372,7 +387,7 @@ _update_record() { + ] + } + EOF +- cmd="aws --profile $OCF_RESKEY_profile route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " ++ cmd="$OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 change-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --change-batch file://$ROUTE53RECORD " + ocf_log debug "Executing command: $cmd" + CLIRES="$($cmd 2>&1)" + rc=$? +@@ -392,7 +407,7 @@ _update_record() { + MYSECONDS=20 + while [ "$STATUS" = 'PENDING' ]; do + sleep $MYSECONDS +- STATUS="$(aws --profile $OCF_RESKEY_profile route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" ++ STATUS="$($OCF_RESKEY_awscli $AWS_PROFILE_OPT route53 get-change --id $CHANGEID | grep CHANGEINFO | awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )" + ocf_log debug "Waited for $MYSECONDS seconds and checked execution of Route 53 update status: $STATUS " + done + } + +From 113bee3ae17a8d610edc0e3879b56e96efbe8b31 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Mon, 27 Apr 2020 11:08:27 -0400 +Subject: [PATCH 3/4] Move AWS_PROFILE_OPT before the start/stop/etc and after + the usage/meta-data case statements. + +--- + heartbeat/aws-vpc-route53.in | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index 3042b345b..ee4f8afcb 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -223,8 +223,6 @@ r53_validate() { + fi + ocf_log debug "ok" + +- AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- + return $OCF_SUCCESS + } + +@@ -423,6 +421,11 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; ++esac ++ ++AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" ++ ++case $__OCF_ACTION in + start) + r53_validate || exit $? + r53_start + +From 8f46c90a73731be0c8f99adcd718f7cfc2d52002 Mon Sep 17 00:00:00 2001 +From: Brandon Perkins +Date: Mon, 27 Apr 2020 11:54:22 -0400 +Subject: [PATCH 4/4] Move AWS_PROFILE_OPT before functions and after + initialization. + +--- + heartbeat/aws-vpc-route53.in | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index ee4f8afcb..b06b93726 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -37,6 +37,7 @@ + # + # Mar. 15, 2017, vers 1.0.2 + ++ + ####################################################################### + # Initialization: + +@@ -57,9 +58,13 @@ OCF_RESKEY_ttl_default=10 + : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} + : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} + : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} ++####################################################################### ++ + ++AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" + ####################################################################### + ++ + usage() { + cat <<-EOT + usage: $0 {start|stop|status|monitor|validate-all|meta-data} +@@ -421,11 +426,6 @@ case $__OCF_ACTION in + metadata + exit $OCF_SUCCESS + ;; +-esac +- +-AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" +- +-case $__OCF_ACTION in + start) + r53_validate || exit $? + r53_start diff --git a/SOURCES/bz1763249-manpages-fix-pcs-syntax.patch b/SOURCES/bz1763249-manpages-fix-pcs-syntax.patch new file mode 100644 index 0000000..eff376b --- /dev/null +++ b/SOURCES/bz1763249-manpages-fix-pcs-syntax.patch @@ -0,0 +1,53 @@ +From 0903a60930238238d5caa6c3b42b28d7bc4cccf4 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 26 Oct 2020 13:11:05 +0100 +Subject: [PATCH 1/2] man: use promotable keyword in manpage examples + +--- + doc/man/ra2refentry.xsl | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/doc/man/ra2refentry.xsl b/doc/man/ra2refentry.xsl +index d0535fd36..f3cdcdbb2 100644 +--- a/doc/man/ra2refentry.xsl ++++ b/doc/man/ra2refentry.xsl +@@ -556,7 +556,7 @@ + + +- --master ++ promotable + + + + +From bfcd5796ae12e6a43a245d0c785f183342943393 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 26 Oct 2020 16:56:00 +0100 +Subject: [PATCH 2/2] man: use OCF_CHECK_LEVEL for depth parameters in pcs + examples + +--- + doc/man/ra2refentry.xsl | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/doc/man/ra2refentry.xsl b/doc/man/ra2refentry.xsl +index f3cdcdbb2..f8e12321f 100644 +--- a/doc/man/ra2refentry.xsl ++++ b/doc/man/ra2refentry.xsl +@@ -612,7 +612,14 @@ + + + +- ++ ++ ++ ++ ++ ++ OCF_CHECK_LEVEL ++ ++ + =" + + " diff --git a/SOURCES/bz1764888-exportfs-allow-same-fsid.patch b/SOURCES/bz1764888-exportfs-allow-same-fsid.patch new file mode 100644 index 0000000..93d47af --- /dev/null +++ b/SOURCES/bz1764888-exportfs-allow-same-fsid.patch @@ -0,0 +1,22 @@ +From 9cea030ba6d5c759971873b80d6d97b545ecac39 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 7 Nov 2019 13:03:30 +0100 +Subject: [PATCH] exportfs: allow multiple exports of same directory + +--- + heartbeat/exportfs | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index d79aced88..1cabdee70 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -82,7 +82,7 @@ The directory or directories to export. + + + +- ++ + + The fsid option to pass to exportfs. This can be a unique positive + integer, a UUID (assuredly sans comma characters), or the special string diff --git a/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch b/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch new file mode 100644 index 0000000..9e816bd --- /dev/null +++ b/SOURCES/bz1765128-mysql-galera-fix-incorrect-rc.patch @@ -0,0 +1,46 @@ +From c718050a4a2bb47d640af1d8e19995590755670f Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Wed, 23 Oct 2019 22:04:44 +0200 +Subject: [PATCH] Low: mysql-common: fix startup check + +PID value is not capture correctly so the startup +fails with the wrong exit code. + +Starting 'mysql' case 8 'check lib file': +Setting agent environment: export OCF_RESKEY_CRM_meta_timeout=15000 +Setting system environment: chmod u-w /var/lib/mysql +Running agent: ./mysql start +ERROR: The agent was hanging, killed it, maybe you damaged the agent or system's environment, see details below: +Oct 23 18:46:06 INFO: MySQL is not running +runuser: warning: cannot change directory to /nonexistent: No such file or directory +runuser: warning: cannot change directory to /nonexistent: No such file or directory +runuser: warning: cannot change directory to /nonexistent: No such file or directory +Oct 23 18:46:06 INFO: MySQL is not running +Oct 23 18:46:08 INFO: MySQL is not running +Oct 23 18:46:10 INFO: MySQL is not running +Oct 23 18:46:12 INFO: MySQL is not running +Oct 23 18:46:14 INFO: MySQL is not running +Oct 23 18:46:16 INFO: MySQL is not running +Oct 23 18:46:18 INFO: MySQL is not running +Oct 23 18:46:20 INFO: MySQL is not running +Oct 23 18:46:22 INFO: MySQL is not running +Oct 23 18:46:24 INFO: MySQL is not running +--- + heartbeat/mysql-common.sh | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh +index d1b1ddb96..4004a6b65 100755 +--- a/heartbeat/mysql-common.sh ++++ b/heartbeat/mysql-common.sh +@@ -239,8 +239,8 @@ mysql_common_start() + --datadir=$OCF_RESKEY_datadir \ + --log-error=$OCF_RESKEY_log \ + $OCF_RESKEY_additional_parameters \ +- $mysql_extra_params >/dev/null 2>&1 & +- pid=$!" ++ $mysql_extra_params >/dev/null 2>&1" & ++ pid=$! + + # Spin waiting for the server to come up. + # Let the CRM/LRM time us out if required. diff --git a/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch b/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch new file mode 100644 index 0000000..6e8ccfe --- /dev/null +++ b/SOURCES/bz1767916-IPaddr2-clusterip-not-supported.patch @@ -0,0 +1,104 @@ +From 92c49b6f2847546f3f938b10a2a97021774f0be3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Wed, 4 Dec 2019 14:36:59 +0100 +Subject: [PATCH] IPaddr2: ipt_CLUSTERIP "iptables" extension not "nft" backend + compatible +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Reference: +https://lists.clusterlabs.org/pipermail/users/2019-December/026674.html +(thread also sketches a future ambition for a [presumably, to revert +the habit of a functional overloading] separate agent to use +"xt_cluster" extension/cluster match). + +Signed-off-by: Jan Pokorný +--- + heartbeat/IPaddr2 | 29 ++++++++++++++++++++++------- + heartbeat/ocf-binaries.in | 2 ++ + 2 files changed, 24 insertions(+), 7 deletions(-) + +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 6f8e8c734..db0b0e547 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -123,6 +123,8 @@ VLDIR=$HA_RSCTMP + SENDARPPIDDIR=$HA_RSCTMP + CIP_lockfile=$HA_RSCTMP/IPaddr2-CIP-${OCF_RESKEY_ip} + ++IPADDR2_CIP_IPTABLES=$IPTABLES ++ + ####################################################################### + + meta_data() { +@@ -138,11 +140,21 @@ It can add an IP alias, or remove one. + In addition, it can implement Cluster Alias IP functionality + if invoked as a clone resource. + +-If used as a clone, you should explicitly set clone-node-max >= 2, ++If used as a clone, "shared address with a trivial, stateless ++(autonomous) load-balancing/mutual exclusion on ingress" mode gets ++applied (as opposed to "assume resource uniqueness" mode otherwise). ++For that, Linux firewall (kernel and userspace) is assumed, and since ++recent distributions are ambivalent in plain "iptables" command to ++particular back-end resolution, "iptables-legacy" (when present) gets ++prioritized so as to avoid incompatibilities (note that respective ++ipt_CLUSTERIP firewall extension in use here is, at the same time, ++marked deprecated, yet said "legacy" layer can make it workable, ++literally, to this day) with "netfilter" one (as in "iptables-nft"). ++In that case, you should explicitly set clone-node-max >= 2, + and/or clone-max < number of nodes. In case of node failure, + clone instances need to be re-allocated on surviving nodes. +-This would not be possible if there is already an instance on those nodes, +-and clone-node-max=1 (which is the default). ++This would not be possible if there is already an instance ++on those nodes, and clone-node-max=1 (which is the default). + + + Manages virtual IPv4 and IPv6 addresses (Linux specific version) +@@ -995,7 +1007,7 @@ ip_start() { + + if [ -n "$IP_CIP" ] && ([ $ip_status = "no" ] || [ $ip_status = "partial2" ]); then + $MODPROBE ip_conntrack +- $IPTABLES -I INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ ++ $IPADDR2_CIP_IPTABLES -I INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ + --new \ + --clustermac $IF_MAC \ + --total-nodes $IP_INC_GLOBAL \ +@@ -1089,7 +1101,7 @@ ip_stop() { + i=1 + while [ $i -le $IP_INC_GLOBAL ]; do + ocf_log info $i +- $IPTABLES -D INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ ++ $IPADDR2_CIP_IPTABLES -D INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \ + --new \ + --clustermac $IF_MAC \ + --total-nodes $IP_INC_GLOBAL \ +@@ -1186,8 +1198,11 @@ ip_validate() { + set_send_arp_program + + if [ -n "$IP_CIP" ]; then +- check_binary $IPTABLES +- check_binary $MODPROBE ++ if have_binary "$IPTABLES_LEGACY"; then ++ IPADDR2_CIP_IPTABLES="$IPTABLES_LEGACY" ++ fi ++ check_binary "$IPADDR2_CIP_IPTABLES" ++ check_binary $MODPROBE + fi + + # $BASEIP, $NETMASK, $NIC , $IP_INC_GLOBAL, and $BRDCAST have been checked within ip_init, +diff --git a/heartbeat/ocf-binaries.in b/heartbeat/ocf-binaries.in +index 9439ae170..e9bf95fc2 100644 +--- a/heartbeat/ocf-binaries.in ++++ b/heartbeat/ocf-binaries.in +@@ -26,6 +26,8 @@ export PATH + : ${GREP:=grep} + : ${IFCONFIG:=ifconfig} + : ${IPTABLES:=iptables} ++## for cases that are known not to be serviceable with iptables-nft impl. ++: ${IPTABLES_LEGACY:=iptables-legacy} + : ${IP2UTIL:=ip} + : ${MDADM:=mdadm} + : ${MODPROBE:=modprobe} diff --git a/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch b/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch new file mode 100644 index 0000000..bd84123 --- /dev/null +++ b/SOURCES/bz1777381-Filesystem-1-refresh-UUID.patch @@ -0,0 +1,33 @@ +From 18888da3ceef7a56388c89a616485fd8faa392cc Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Thu, 14 Nov 2019 17:52:13 +0800 +Subject: [PATCH] Filesystem: refresh UUID in the start phase + +In the case a fresh filesystem is just created from another node on the +shared storage, is not visible yet. Then try partprobe to refresh +/dev/disk/by-uuid/* up to date. + +Signed-off-by: Roger Zhou +--- + heartbeat/Filesystem | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index e66ddc77f..543986441 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -454,6 +454,14 @@ Filesystem_start() + # accordingly + + if [ $blockdevice = "yes" ]; then ++ if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then ++ # In the case a fresh filesystem is just created ++ # from another node on the shared storage, and ++ # is not visible yet. Then try partprobe to ++ # refresh /dev/disk/by-uuid/* up to date. ++ have_binary partprobe && partprobe >/dev/null 2>&1 ++ fi ++ + if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then + ocf_exit_reason "Couldn't find device [$DEVICE]. Expected /dev/??? to exist" + exit $OCF_ERR_INSTALLED diff --git a/SOURCES/bz1777381-Filesystem-2-udev-settle.patch b/SOURCES/bz1777381-Filesystem-2-udev-settle.patch new file mode 100644 index 0000000..fde7f89 --- /dev/null +++ b/SOURCES/bz1777381-Filesystem-2-udev-settle.patch @@ -0,0 +1,124 @@ +From af39017b9333dcbadee2a15f3829667f2b18fb45 Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Fri, 20 Dec 2019 23:28:45 +0800 +Subject: [PATCH 1/2] Filesystem: respect udevd need time to create UUID + symlinks + +To refresh the filesystem UUID, there is a race condition. partprobe +might return before the UUID symlink get created. Particularly, when the +system has many devices, the udev daemon could need visible time to +process the udev event queue. Hence, wait udev for a moment. + +Signed-off-by: Roger Zhou +--- + heartbeat/Filesystem | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 543986441..c21ad5761 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -460,6 +460,10 @@ Filesystem_start() + # is not visible yet. Then try partprobe to + # refresh /dev/disk/by-uuid/* up to date. + have_binary partprobe && partprobe >/dev/null 2>&1 ++ local timeout ++ timeout=${OCF_RESKEY_CRM_meta_timeout:="60000"} ++ timeout=$((timeout/1000)) ++ have_binary udevadm && udevadm settle -t $timeout --exit-if-exists=$DEVICE + fi + + if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then + +From a9fb8077c8201b287ee0486b2a34db4b7d4d8f5d Mon Sep 17 00:00:00 2001 +From: Roger Zhou +Date: Wed, 25 Dec 2019 15:45:03 +0800 +Subject: [PATCH 2/2] Filesystem: add trigger_udev_rules_if_need() for -U, -L, + or /dev/xxx device + +DEVICE parameter of this RA accepts "-U " and "-L + ++ ++ ++Role to use to query/update the route table ++ ++route table query/update role ++ ++ ++ + + + Name of the network interface, i.e. eth0 diff --git a/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch b/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch new file mode 100644 index 0000000..b0e8230 --- /dev/null +++ b/SOURCES/bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch @@ -0,0 +1,46 @@ +--- a/heartbeat/Filesystem 2020-06-11 15:49:54.111316780 +0200 ++++ b/heartbeat/Filesystem 2020-06-11 15:53:53.423821158 +0200 +@@ -60,6 +60,21 @@ + # Defaults + DFLT_STATUSDIR=".Filesystem_status/" + ++# Parameter defaults ++ ++OCF_RESKEY_fstype_default="" ++OCF_RESKEY_fast_stop_default="yes" ++ ++: ${OCF_RESKEY_fstype=${OCF_RESKEY_fstype_default}} ++if [ -z "${OCF_RESKEY_fast_stop}" ]; then ++ case "$OCF_RESKEY_fstype" in ++ gfs2) ++ OCF_RESKEY_fast_stop="no";; ++ *) ++ OCF_RESKEY_fast_stop=${OCF_RESKEY_fast_stop_default};; ++ esac ++fi ++ + # Variables used by multiple methods + HOSTOS=`uname` + +@@ -135,7 +150,7 @@ + The type of filesystem to be mounted. + + filesystem type +- ++ + + + +@@ -178,9 +193,11 @@ + users easily and want to prevent the stop action from failing, + then set this parameter to "no" and add an appropriate timeout + for the stop operation. ++ ++This defaults to "no" for GFS2 filesystems. + + fast stop +- ++ + + + diff --git a/SOURCES/bz1815013-redis-parse-password-correctly-based-on-version.patch b/SOURCES/bz1815013-redis-parse-password-correctly-based-on-version.patch new file mode 100644 index 0000000..c61a306 --- /dev/null +++ b/SOURCES/bz1815013-redis-parse-password-correctly-based-on-version.patch @@ -0,0 +1,169 @@ +From 2270c5d6aaf8b3b6d663d413a8e7193a493cfdc5 Mon Sep 17 00:00:00 2001 +From: Konstantin Pokotilenko +Date: Tue, 24 Sep 2019 17:26:11 +0300 +Subject: [PATCH 1/2] Consider redis-cli features to choose optimal password + passing method and warning filtering workaround + +--- + heartbeat/redis.in | 60 +++++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 57 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index ec7186d8b..409961d0b 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -237,6 +237,51 @@ CRM_ATTR_REPL_INFO="${HA_SBIN_DIR}/crm_attribute --type crm_config --name ${INST + MASTER_HOST="" + MASTER_ACTIVE_CACHED="" + MASTER_ACTIVE="" ++CLI_HAVE_AUTH_WARNING=0 ++CLI_HAVE_ARG_NO_AUTH_WARNING=0 ++CLI_HAVE_ENV_AUTH=0 ++ ++cmp_redis_version() ++{ ++ ++ if [ "$1" == "$2" ]; then ++ return 1 ++ elif [ $(echo -e "$1\n$2" | sort -V | head -1) == "$1" ]; then ++ return 0 ++ else ++ return 2 ++ fi ++} ++ ++redis_cli_features() ++{ ++ ++ CLI_VER=$(redis-cli --version | tr " " "\n" | tail -1) ++ # Starting with 4.0.10 there is a warning on stderr when using a pass ++ # Starting with 5.0.0 there is an argument to silence the warning: --no-auth-warning ++ # Starting with 5.0.3 there is an option to use REDISCLI_AUTH evironment variable for password, no warning in this case ++ ++ cmp_redis_version $CLI_VER 5.0.3 ++ res=$? ++ echo 5.0.3: $res ++ if [[ res -ge 1 ]]; then ++ CLI_HAVE_ENV_AUTH=1 ++ fi ++ ++ cmp_redis_version $CLI_VER 5.0.0 ++ res=$? ++ echo 5.0.0: $res ++ if [[ res -ge 1 ]]; then ++ CLI_HAVE_ARG_NO_AUTH_WARNING=1 ++ fi ++ ++ cmp_redis_version $CLI_VER 4.0.10 ++ res=$? ++ echo 4.0.10: $res ++ if [[ res -ge 1 ]]; then ++ CLI_HAVE_AUTH_WARNING=1 ++ fi ++} + + master_is_active() + { +@@ -315,9 +360,16 @@ set_score() + redis_client() { + ocf_log debug "redis_client: '$REDIS_CLIENT' -s '$REDIS_SOCKET' $*" + if [ -n "$clientpasswd" ]; then +- # Starting with 4.0.10 there is a warning on stderr when using a pass +- # Once we stop supporting versions < 5.0.0 we can add --no-auth-warning here +- ("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//' ++ # Consider redis-cli features to choose optimal password passing method and warning filtering workaround ++ if [[ CLI_HAVE_ENV_AUTH -eq 1 ]]; then ++ REDISCLI_AUTH=$clientpasswd "$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//' ++ elif [[ CLI_HAVE_ARG_NO_AUTH_WARNING -eq 1 ]]; then ++ "$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" --no-auth-warning | sed 's/\r//' ++ elif [[ CLI_HAVE_AUTH_WARNING -eq 1 ]]; then ++ ("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//' ++ else ++ "$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" | sed 's/\r//' ++ fi + else + "$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//' + fi +@@ -686,6 +738,8 @@ if [ -r "$REDIS_CONFIG" ]; then + clientpasswd="$(sed -n -e 's/^\s*requirepass\s*\(.*\)\s*$/\1/p' < $REDIS_CONFIG | tail -n 1)" + fi + ++redis_cli_features ++ + ocf_log debug "action=${1:-$__OCF_ACTION} notify_type=${OCF_RESKEY_CRM_meta_notify_type} notify_operation=${OCF_RESKEY_CRM_meta_notify_operation} master_host=${OCF_RESKEY_CRM_meta_notify_master_uname} slave_host=${OCF_RESKEY_CRM_meta_notify_slave_uname} promote_host=${OCF_RESKEY_CRM_meta_notify_promote_uname} demote_host=${OCF_RESKEY_CRM_meta_notify_demote_uname}; params: bin=${OCF_RESKEY_bin} client_bin=${OCF_RESKEY_client_bin} config=${OCF_RESKEY_config} user=${OCF_RESKEY_user} rundir=${OCF_RESKEY_rundir} port=${OCF_RESKEY_port}" + + case "${1:-$__OCF_ACTION}" in + +From 0b9f942a88bfc3ad04938aa5135fad8f8bece69c Mon Sep 17 00:00:00 2001 +From: Konstantin Pokotilenko +Date: Tue, 24 Sep 2019 18:35:59 +0300 +Subject: [PATCH 2/2] use ocf_version_cmp instead of own implementation use + same method of getting redis-cli version as already used before in file, this + also uses redis client from variable instead of hardcoded remove debug output + fix --no-auth-warning argument position + +--- + heartbeat/redis.in | 25 +++++-------------------- + 1 file changed, 5 insertions(+), 20 deletions(-) + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 409961d0b..d722fb12c 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -241,43 +241,28 @@ CLI_HAVE_AUTH_WARNING=0 + CLI_HAVE_ARG_NO_AUTH_WARNING=0 + CLI_HAVE_ENV_AUTH=0 + +-cmp_redis_version() +-{ +- +- if [ "$1" == "$2" ]; then +- return 1 +- elif [ $(echo -e "$1\n$2" | sort -V | head -1) == "$1" ]; then +- return 0 +- else +- return 2 +- fi +-} +- + redis_cli_features() + { + +- CLI_VER=$(redis-cli --version | tr " " "\n" | tail -1) ++ CLI_VER=$("$REDIS_CLIENT" -v | awk '{print $NF}') + # Starting with 4.0.10 there is a warning on stderr when using a pass + # Starting with 5.0.0 there is an argument to silence the warning: --no-auth-warning + # Starting with 5.0.3 there is an option to use REDISCLI_AUTH evironment variable for password, no warning in this case + +- cmp_redis_version $CLI_VER 5.0.3 ++ ocf_version_cmp $CLI_VER 5.0.3 + res=$? +- echo 5.0.3: $res + if [[ res -ge 1 ]]; then + CLI_HAVE_ENV_AUTH=1 + fi + +- cmp_redis_version $CLI_VER 5.0.0 ++ ocf_version_cmp $CLI_VER 5.0.0 + res=$? +- echo 5.0.0: $res + if [[ res -ge 1 ]]; then + CLI_HAVE_ARG_NO_AUTH_WARNING=1 + fi + +- cmp_redis_version $CLI_VER 4.0.10 ++ ocf_version_cmp $CLI_VER 4.0.10 + res=$? +- echo 4.0.10: $res + if [[ res -ge 1 ]]; then + CLI_HAVE_AUTH_WARNING=1 + fi +@@ -364,7 +349,7 @@ redis_client() { + if [[ CLI_HAVE_ENV_AUTH -eq 1 ]]; then + REDISCLI_AUTH=$clientpasswd "$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//' + elif [[ CLI_HAVE_ARG_NO_AUTH_WARNING -eq 1 ]]; then +- "$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" --no-auth-warning | sed 's/\r//' ++ "$REDIS_CLIENT" -s "$REDIS_SOCKET" --no-auth-warning -a "$clientpasswd" "$@" | sed 's/\r//' + elif [[ CLI_HAVE_AUTH_WARNING -eq 1 ]]; then + ("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//' + else diff --git a/SOURCES/bz1817432-use-safe-temp-file-location.patch b/SOURCES/bz1817432-use-safe-temp-file-location.patch new file mode 100644 index 0000000..0149d72 --- /dev/null +++ b/SOURCES/bz1817432-use-safe-temp-file-location.patch @@ -0,0 +1,44 @@ +diff -uNr a/heartbeat/ClusterMon b/heartbeat/ClusterMon +--- a/heartbeat/ClusterMon 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/ClusterMon 2020-03-27 12:09:23.636845893 +0100 +@@ -86,7 +86,7 @@ + PID file location to ensure only one instance is running + + PID file +- ++ + + + +@@ -94,7 +94,7 @@ + Location to write HTML output to. + + HTML output +- ++ + + + +@@ -233,8 +233,8 @@ + fi + + : ${OCF_RESKEY_update:="15000"} +-: ${OCF_RESKEY_pidfile:="/tmp/ClusterMon_${OCF_RESOURCE_INSTANCE}.pid"} +-: ${OCF_RESKEY_htmlfile:="/tmp/ClusterMon_${OCF_RESOURCE_INSTANCE}.html"} ++: ${OCF_RESKEY_pidfile:="${HA_RSCTMP}/ClusterMon_${OCF_RESOURCE_INSTANCE}.pid"} ++: ${OCF_RESKEY_htmlfile:="${HA_RSCTMP}/ClusterMon_${OCF_RESOURCE_INSTANCE}.html"} + + OCF_RESKEY_update=`expr $OCF_RESKEY_update / 1000` + +diff -uNr a/heartbeat/sapdb-nosha.sh b/heartbeat/sapdb-nosha.sh +--- a/heartbeat/sapdb-nosha.sh 2018-06-29 14:05:02.000000000 +0200 ++++ b/heartbeat/sapdb-nosha.sh 2020-03-27 12:07:16.183958164 +0100 +@@ -740,5 +740,5 @@ + } + + # Set a tempfile and make sure to clean it up again +-TEMPFILE="/tmp/SAPDatabase.$$.tmp" +-trap trap_handler INT TERM +\ No newline at end of file ++TEMPFILE="${HA_RSCTMP}/SAPDatabase.$$.tmp" ++trap trap_handler INT TERM diff --git a/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch b/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch new file mode 100644 index 0000000..2b025c5 --- /dev/null +++ b/SOURCES/bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch @@ -0,0 +1,23 @@ +From bb9e54cdac71a1f26aa626d234e38c8ae8417e9f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 26 Mar 2020 16:26:14 +0100 +Subject: [PATCH] ocf-shellfuncs: fix ocf_is_clone() (clone_max can be 0 with + cloned resources) + +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 7a97558a5..e0eaae1d5 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -557,7 +557,7 @@ ocf_is_probe() { + # defined as a resource where the clone-max meta attribute is present, + # and set to greater than zero. + ocf_is_clone() { +- [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] && [ "${OCF_RESKEY_CRM_meta_clone_max}" -gt 0 ] ++ [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] + } + + # returns true if the resource is configured as a multistate diff --git a/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch b/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch new file mode 100644 index 0000000..4b9be99 --- /dev/null +++ b/SOURCES/bz1817598-ocf_is_clone-2-update-comment.patch @@ -0,0 +1,24 @@ +From 420e55da2eb542b35fe8af5d05496b129cd190d5 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 27 Mar 2020 08:44:12 +0100 +Subject: [PATCH] ocf-shellfuncs: ocf_is_clone: update comment based on + clone-max fix in previous commit + +--- + heartbeat/ocf-shellfuncs.in | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index e0eaae1d5..c4d40e382 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -554,8 +554,7 @@ ocf_is_probe() { + } + + # returns true if the resource is configured as a clone. This is +-# defined as a resource where the clone-max meta attribute is present, +-# and set to greater than zero. ++# defined as a resource where the clone-max meta attribute is present. + ocf_is_clone() { + [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] + } diff --git a/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch b/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch new file mode 100644 index 0000000..e470d50 --- /dev/null +++ b/SOURCES/bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch @@ -0,0 +1,48 @@ +From dbd45cb5fcce0a3378f6ecd0c14b578e6f843e3d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 24 Jul 2020 16:03:20 +0200 +Subject: [PATCH 1/2] nfsserver: fix SELinux issue due to newer ls versions + giving additional output. + +This patch has been tested on RHEL6, 7 and 8. +--- + heartbeat/nfsserver | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 0dbc173f3..80d20676b 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -192,7 +192,7 @@ fi + which restorecon > /dev/null 2>&1 && selinuxenabled + SELINUX_ENABLED=$? + if [ $SELINUX_ENABLED -eq 0 ]; then +- export SELINUX_LABEL="$(ls -ldZ $STATD_PATH | cut -f4 -d' ')" ++ export SELINUX_LABEL="$(ls -dZ $STATD_PATH | grep -o '\S\+:\S\+:\S\+')" + fi + + ## + +From 81118dbb25fe2cfc7d5fc803178cd7c3b445915e Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:33:44 +0200 +Subject: [PATCH 2/2] nfsnotify: fix SELinux issue due to newer ls versions + giving additional output. + +--- + heartbeat/nfsnotify.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in +index 7f710bca7..851f6ad6b 100644 +--- a/heartbeat/nfsnotify.in ++++ b/heartbeat/nfsnotify.in +@@ -305,7 +305,7 @@ esac + which restorecon > /dev/null 2>&1 && selinuxenabled + SELINUX_ENABLED=$? + if [ $SELINUX_ENABLED -eq 0 ]; then +- export SELINUX_LABEL="$(ls -ldZ $STATD_PATH | cut -f4 -d' ')" ++ export SELINUX_LABEL="$(ls -dZ $STATD_PATH | grep -o '\S\+:\S\+:\S\+')" + fi + + case $__OCF_ACTION in diff --git a/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch b/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch new file mode 100644 index 0000000..b3efdce --- /dev/null +++ b/SOURCES/bz1818997-nfsserver-1-fix-nfsv4-only-support.patch @@ -0,0 +1,43 @@ +From 47dd1d16f08de06d512f9e04c3966c35f0ac4d3f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 27 May 2020 13:05:57 +0200 +Subject: [PATCH] nfsserver: fix NFSv4-only support + +When disabling NFSv2 and NFSv3 mountd doesnt register with rpcbind, but +it's still running. This patch checks that mountd is running instead of +basing its status on it being registered w/rpcbind. +--- + heartbeat/nfsserver | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index acef0147a..9e6e1fcb1 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -316,7 +316,7 @@ nfsserver_systemd_monitor() + fi + + ocf_log debug "Status: nfs-mountd" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -ne "0" ]; then + ocf_exit_reason "nfs-mountd is not running" +@@ -683,7 +683,7 @@ nfsserver_start () + local i=1 + while : ; do + ocf_log info "Start: nfs-mountd i: $i" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -eq "0" ]; then + break; +@@ -800,7 +800,7 @@ nfsserver_stop () + + nfs_exec stop nfs-mountd > /dev/null 2>&1 + ocf_log info "Stop: nfs-mountd" +- rpcinfo -t localhost 100005 > /dev/null 2>&1 ++ ps axww | grep -q "[r]pc.mountd" + rc=$? + if [ "$rc" -eq "0" ]; then + ocf_exit_reason "Failed to stop nfs-mountd" diff --git a/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch b/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch new file mode 100644 index 0000000..085a39a --- /dev/null +++ b/SOURCES/bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch @@ -0,0 +1,34 @@ +From 290741f43ff414630f558ee3432e830e39d1599d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 22 Jul 2020 11:56:32 +0200 +Subject: [PATCH] nfsserver: stop nfsdcld if present during stop-action + +--- + heartbeat/nfsserver | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 9e6e1fcb1..0dbc173f3 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -806,6 +806,20 @@ nfsserver_stop () + ocf_exit_reason "Failed to stop nfs-mountd" + return $OCF_ERR_GENERIC + fi ++ ++ if systemctl --no-legend list-unit-files "nfsdcld*" | grep -q nfsdcld; then ++ nfs_exec stop nfsdcld > /dev/null 2>&1 ++ ocf_log info "Stop: nfsdcld" ++ fn=`mktemp` ++ nfs_exec status nfsdcld > $fn 2>&1 ++ rc=$? ++ ocf_log debug "$(cat $fn)" ++ rm -f $fn ++ if [ "$rc" -eq "0" ]; then ++ ocf_exit_reason "Failed to stop nfsdcld" ++ return $OCF_ERR_GENERIC ++ fi ++ fi + esac + + diff --git a/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch b/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch new file mode 100644 index 0000000..85355b3 --- /dev/null +++ b/SOURCES/bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch @@ -0,0 +1,24 @@ +From 390d1cb8b057ef0e6869fb57dc1e6b6997af49f0 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 3 Apr 2020 16:10:04 +0200 +Subject: [PATCH] aws-vpc-move-ip: delete remaining route entries + +--- + heartbeat/aws-vpc-move-ip | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 97a467217..26ca6007d 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -256,6 +256,10 @@ ec2ip_drop() { + return $OCF_ERR_GENERIC + fi + ++ # delete remaining route-entries if any ++ ip route show to exact ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface | xargs -r ip route delete ++ ip route show table local to exact ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface | xargs -r ip route delete ++ + return $OCF_SUCCESS + } + diff --git a/SOURCES/bz1819965-1-ocf.py-update.patch b/SOURCES/bz1819965-1-ocf.py-update.patch new file mode 100644 index 0000000..e94deb7 --- /dev/null +++ b/SOURCES/bz1819965-1-ocf.py-update.patch @@ -0,0 +1,357 @@ +--- a/heartbeat/ocf.py 2020-04-08 13:03:20.543477544 +0200 ++++ b/heartbeat/ocf.py 2020-04-06 10:23:45.950913519 +0200 +@@ -88,6 +88,10 @@ + + OCF_RESOURCE_INSTANCE = env.get("OCF_RESOURCE_INSTANCE") + ++OCF_ACTION = env.get("__OCF_ACTION") ++if OCF_ACTION is None and len(argv) == 2: ++ OCF_ACTION = argv[1] ++ + HA_DEBUG = env.get("HA_debug", 0) + HA_DATEFMT = env.get("HA_DATEFMT", "%b %d %T ") + HA_LOGFACILITY = env.get("HA_LOGFACILITY") +@@ -135,3 +139,343 @@ + log.addHandler(dfh) + + logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ ++ ++_exit_reason_set = False ++ ++def ocf_exit_reason(msg): ++ """ ++ Print exit error string to stderr. ++ ++ Allows the OCF agent to provide a string describing ++ why the exit code was returned. ++ """ ++ global _exit_reason_set ++ cookie = env.get("OCF_EXIT_REASON_PREFIX", "ocf-exit-reason:") ++ sys.stderr.write("{}{}\n".format(cookie, msg)) ++ sys.stderr.flush() ++ logger.error(msg) ++ _exit_reason_set = True ++ ++ ++def have_binary(name): ++ """ ++ True if binary exists, False otherwise. ++ """ ++ def _access_check(fn): ++ return (os.path.exists(fn) and ++ os.access(fn, os.F_OK | os.X_OK) and ++ not os.path.isdir(fn)) ++ if _access_check(name): ++ return True ++ path = env.get("PATH", os.defpath).split(os.pathsep) ++ seen = set() ++ for dir in path: ++ dir = os.path.normcase(dir) ++ if dir not in seen: ++ seen.add(dir) ++ name2 = os.path.join(dir, name) ++ if _access_check(name2): ++ return True ++ return False ++ ++ ++def is_true(val): ++ """ ++ Convert an OCF truth value to a ++ Python boolean. ++ """ ++ return val in ("yes", "true", "1", 1, "YES", "TRUE", "ja", "on", "ON", True) ++ ++ ++def is_probe(): ++ """ ++ A probe is defined as a monitor operation ++ with an interval of zero. This is called ++ by Pacemaker to check the status of a possibly ++ not running resource. ++ """ ++ return (OCF_ACTION == "monitor" and ++ env.get("OCF_RESKEY_CRM_meta_interval", "") == "0") ++ ++ ++def get_parameter(name, default=None): ++ """ ++ Extract the parameter value from the environment ++ """ ++ return env.get("OCF_RESKEY_{}".format(name), default) ++ ++ ++def distro(): ++ """ ++ Return name of distribution/platform. ++ ++ If possible, returns "name/version", else ++ just "name". ++ """ ++ import subprocess ++ import platform ++ try: ++ ret = subprocess.check_output(["lsb_release", "-si"]) ++ if type(ret) != str: ++ ret = ret.decode() ++ distro = ret.strip() ++ ret = subprocess.check_output(["lsb_release", "-sr"]) ++ if type(ret) != str: ++ ret = ret.decode() ++ version = ret.strip() ++ return "{}/{}".format(distro, version) ++ except Exception: ++ if os.path.exists("/etc/debian_version"): ++ return "Debian" ++ if os.path.exists("/etc/SuSE-release"): ++ return "SUSE" ++ if os.path.exists("/etc/redhat-release"): ++ return "Redhat" ++ return platform.system() ++ ++ ++class Parameter(object): ++ def __init__(self, name, shortdesc, longdesc, content_type, unique, required, default): ++ self.name = name ++ self.shortdesc = shortdesc ++ self.longdesc = longdesc ++ self.content_type = content_type ++ self.unique = unique ++ self.required = required ++ self.default = default ++ ++ def __str__(self): ++ return self.to_xml() ++ ++ def to_xml(self): ++ ret = '' + "\n" ++ ret += '' + self.shortdesc + '' + "\n" ++ ret += ' ++ ++ ++1.0 ++ ++{longdesc} ++ ++{shortdesc} ++ ++ ++{parameters} ++ ++ ++ ++{actions} ++ ++ ++ ++""".format(name=self.name, ++ longdesc=self.longdesc, ++ shortdesc=self.shortdesc, ++ parameters="".join(p.to_xml() for p in self.parameters), ++ actions="".join(a.to_xml() for a in self.actions)) ++ ++ def run(self): ++ run(self) ++ ++ ++def run(agent, handlers=None): ++ """ ++ Main loop implementation for resource agents. ++ Does not return. ++ ++ Arguments: ++ ++ agent: Agent object. ++ ++ handlers: Dict of action name to handler function. ++ ++ Handler functions can take parameters as arguments, ++ the run loop will read parameter values from the ++ environment and pass to the handler. ++ """ ++ import inspect ++ ++ agent._handlers.update(handlers or {}) ++ handlers = agent._handlers ++ ++ def check_required_params(): ++ for p in agent.parameters: ++ if p.required and get_parameter(p.name) is None: ++ ocf_exit_reason("{}: Required parameter not set".format(p.name)) ++ sys.exit(OCF_ERR_CONFIGURED) ++ ++ def call_handler(func): ++ if hasattr(inspect, 'signature'): ++ params = inspect.signature(func).parameters.keys() ++ else: ++ params = inspect.getargspec(func).args ++ def value_for_parameter(param): ++ val = get_parameter(param) ++ if val is not None: ++ return val ++ for p in agent.parameters: ++ if p.name == param: ++ return p.default ++ arglist = [value_for_parameter(p) for p in params] ++ try: ++ rc = func(*arglist) ++ if rc is None: ++ rc = OCF_SUCCESS ++ return rc ++ except Exception as err: ++ if not _exit_reason_set: ++ ocf_exit_reason(str(err)) ++ else: ++ logger.error(str(err)) ++ return OCF_ERR_GENERIC ++ ++ meta_data_action = False ++ for action in agent.actions: ++ if action.name == "meta-data": ++ meta_data_action = True ++ break ++ if not meta_data_action: ++ agent.add_action("meta-data", timeout=10) ++ ++ if len(sys.argv) == 2 and sys.argv[1] in ("-h", "--help"): ++ sys.stdout.write("usage: %s {%s}\n\n" % (sys.argv[0], "|".join(sorted(handlers.keys()))) + ++ "Expects to have a fully populated OCF RA compliant environment set.\n") ++ sys.exit(OCF_SUCCESS) ++ ++ if OCF_ACTION is None: ++ ocf_exit_reason("No action argument set") ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ if OCF_ACTION in ('meta-data', 'usage', 'methods'): ++ sys.stdout.write(agent.to_xml() + "\n") ++ sys.exit(OCF_SUCCESS) ++ ++ check_required_params() ++ if OCF_ACTION in handlers: ++ rc = call_handler(handlers[OCF_ACTION]) ++ sys.exit(rc) ++ sys.exit(OCF_ERR_UNIMPLEMENTED) ++ ++ ++if __name__ == "__main__": ++ import unittest ++ ++ class TestMetadata(unittest.TestCase): ++ def test_noparams_noactions(self): ++ m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc") ++ self.assertEqual(""" ++ ++ ++1.0 ++ ++longdesc ++ ++shortdesc ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++""", str(m)) ++ ++ def test_params_actions(self): ++ m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc") ++ m.add_parameter("testparam") ++ m.add_action("start") ++ self.assertEqual(str(m.actions[0]), '\n') ++ ++ unittest.main() diff --git a/SOURCES/bz1819965-2-azure-events.patch b/SOURCES/bz1819965-2-azure-events.patch new file mode 100644 index 0000000..220d2ba --- /dev/null +++ b/SOURCES/bz1819965-2-azure-events.patch @@ -0,0 +1,1060 @@ +diff -uNr a/configure.ac b/configure.ac +--- a/configure.ac 2020-04-16 11:54:08.466619607 +0200 ++++ b/configure.ac 2020-04-16 12:05:17.241352586 +0200 +@@ -30,6 +30,8 @@ + PKG_FEATURES="" + + AC_CONFIG_AUX_DIR(.) ++AC_CONFIG_MACRO_DIR([m4]) ++ + AC_CANONICAL_HOST + + dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) +@@ -72,6 +74,11 @@ + [AC_MSG_ERROR([systemd support requested but pkg-config unable to query systemd package])]) + with_systemdsystemunitdir=no], + [with_systemdsystemunitdir="$def_systemdsystemunitdir"])]) ++if test "x$with_systemdsystemunitdir" != "xno" && \ ++ test "x${prefix}" != "xNONE" && \ ++ test "x${prefix}" != "x/usr"; then ++ with_systemdsystemunitdir="${prefix}/$with_systemdsystemunitdir" ++fi + AS_IF([test "x$with_systemdsystemunitdir" != "xno"], + [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])]) + AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"]) +@@ -79,6 +86,11 @@ + AC_ARG_WITH([systemdtmpfilesdir], + AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]), + [], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)]) ++ if test "x$with_systemdtmpfilesdir" != xno && \ ++ test "x${prefix}" != "xNONE" && \ ++ test "x${prefix}" != "x/usr"; then ++ with_systemdtmpfilesdir="${prefix}/$with_systemdtmpfilesdir" ++ fi + if test "x$with_systemdtmpfilesdir" != xno; then + AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir]) + fi +@@ -501,12 +513,35 @@ + AC_SUBST(RM) + AC_SUBST(TEST) + ++dnl Ensure PYTHON is an absolute path ++AC_PATH_PROG([PYTHON], [$PYTHON]) ++ + AM_PATH_PYTHON + if test -z "$PYTHON"; then + echo "*** Essential program python not found" 1>&2 +- exit 1 + fi + ++AC_PYTHON_MODULE(googleapiclient) ++AC_PYTHON_MODULE(pyroute2) ++ ++AS_VERSION_COMPARE([$PYTHON_VERSION], [2.7], [BUILD_OCF_PY=0], [BUILD_OCF_PY=1], [BUILD_OCF_PY=1]) ++ ++BUILD_AZURE_EVENTS=1 ++if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then ++ BUILD_AZURE_EVENTS=0 ++ AC_MSG_WARN("Not building azure-events") ++fi ++AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) ++ ++BUILD_GCP_PD_MOVE=1 ++AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1) ++ ++BUILD_GCP_VPC_MOVE_ROUTE=1 ++AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1) ++ ++BUILD_GCP_VPC_MOVE_VIP=1 ++AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1) ++ + AC_PATH_PROGS(ROUTE, route) + AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) + +@@ -541,6 +576,12 @@ + if test x"${STYLESHEET_PREFIX}" = x""; then + DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ + -type d | LC_ALL=C sort) ++ if test x"${DIRS}" = x""; then ++ # when datadir is not standard OS path, we cannot find docbook.xsl ++ # use standard OS path as backup ++ DIRS=$(find "/usr/share" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ ++ -type d | LC_ALL=C sort) ++ fi + XSLT=$(basename ${DOCBOOK_XSL_PATH}) + for d in ${DIRS}; do + if test -f "${d}/${XSLT}"; then +@@ -948,6 +989,7 @@ + ) + + dnl Files we output that need to be executable ++AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) + AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) + AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) + AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) +@@ -1021,7 +1063,7 @@ + AC_MSG_RESULT([]) + AC_MSG_RESULT([$PACKAGE configuration:]) + AC_MSG_RESULT([ Version = ${VERSION}]) +-AC_MSG_RESULT([ Build Version = e711383fd5c7bef9c24ff6bc85465e59f91080f9]) ++AC_MSG_RESULT([ Build Version = $Format:%H$]) + AC_MSG_RESULT([ Features =${PKG_FEATURES}]) + AC_MSG_RESULT([]) + AC_MSG_RESULT([ Prefix = ${prefix}]) +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2020-04-16 11:54:08.466619607 +0200 ++++ b/doc/man/Makefile.am 2020-04-16 12:08:34.913726440 +0200 +@@ -55,7 +55,7 @@ + # 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was + # first reported in 1995 and added to Savannah in in 2005... + if BUILD_DOC +-man_MANS = ocf_heartbeat_AoEtarget.7 \ ++man_MANS = ocf_heartbeat_AoEtarget.7 \ + ocf_heartbeat_AudibleAlarm.7 \ + ocf_heartbeat_ClusterMon.7 \ + ocf_heartbeat_CTDB.7 \ +@@ -183,6 +183,22 @@ + man_MANS += ocf_heartbeat_IPv6addr.7 + endif + ++if BUILD_AZURE_EVENTS ++man_MANS += ocf_heartbeat_azure-events.7 ++endif ++ ++if BUILD_GCP_PD_MOVE ++man_MANS += ocf_heartbeat_gcp-pd-move.7 ++endif ++ ++if BUILD_GCP_VPC_MOVE_ROUTE ++man_MANS += ocf_heartbeat_gcp-vpc-move-route.7 ++endif ++ ++if BUILD_GCP_VPC_MOVE_VIP ++man_MANS += ocf_heartbeat_gcp-vpc-move-vip.7 ++endif ++ + xmlfiles = $(man_MANS:.7=.xml) + + %.1 %.5 %.7 %.8: %.xml +diff -uNr a/heartbeat/azure-events.in b/heartbeat/azure-events.in +--- a/heartbeat/azure-events.in 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/azure-events.in 2020-04-16 12:02:15.114693551 +0200 +@@ -0,0 +1,824 @@ ++#!@PYTHON@ -tt ++# ++# Resource agent for monitoring Azure Scheduled Events ++# ++# License: GNU General Public License (GPL) ++# (c) 2018 Tobias Niekamp, Microsoft Corp. ++# and Linux-HA contributors ++ ++import os ++import sys ++import time ++import subprocess ++import json ++try: ++ import urllib2 ++except ImportError: ++ import urllib.request as urllib2 ++import socket ++from collections import defaultdict ++ ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) ++sys.path.append(OCF_FUNCTIONS_DIR) ++import ocf ++ ++############################################################################## ++ ++ ++VERSION = "0.10" ++USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro()) ++ ++attr_globalPullState = "azure-events_globalPullState" ++attr_lastDocVersion = "azure-events_lastDocVersion" ++attr_curNodeState = "azure-events_curNodeState" ++attr_pendingEventIDs = "azure-events_pendingEventIDs" ++ ++default_loglevel = ocf.logging.INFO ++default_relevantEventTypes = set(["Reboot", "Redeploy"]) ++ ++global_pullMaxAttempts = 3 ++global_pullDelaySecs = 1 ++ ++############################################################################## ++ ++class attrDict(defaultdict): ++ """ ++ A wrapper for accessing dict keys like an attribute ++ """ ++ def __init__(self, data): ++ super(attrDict, self).__init__(attrDict) ++ for d in data.keys(): ++ self.__setattr__(d, data[d]) ++ ++ def __getattr__(self, key): ++ try: ++ return self[key] ++ except KeyError: ++ raise AttributeError(key) ++ ++ def __setattr__(self, key, value): ++ self[key] = value ++ ++############################################################################## ++ ++class azHelper: ++ """ ++ Helper class for Azure's metadata API (including Scheduled Events) ++ """ ++ metadata_host = "http://169.254.169.254/metadata" ++ instance_api = "instance" ++ events_api = "scheduledevents" ++ api_version = "2017-08-01" ++ ++ @staticmethod ++ def _sendMetadataRequest(endpoint, postData=None): ++ """ ++ Send a request to Azure's Azure Metadata Service API ++ """ ++ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) ++ ocf.logger.debug("_sendMetadataRequest: url = %s" % url) ++ ++ req = urllib2.Request(url, postData) ++ req.add_header("Metadata", "true") ++ req.add_header("User-Agent", USER_AGENT) ++ resp = urllib2.urlopen(req) ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ if data: ++ data = json.loads(data) ++ ++ ocf.logger.debug("_sendMetadataRequest: finished") ++ return data ++ ++ @staticmethod ++ def getInstanceInfo(): ++ """ ++ Fetch details about the current VM from Azure's Azure Metadata Service API ++ """ ++ ocf.logger.debug("getInstanceInfo: begin") ++ ++ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) ++ ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) ++ ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ ++ @staticmethod ++ def pullScheduledEvents(): ++ """ ++ Retrieve all currently scheduled events via Azure Metadata Service API ++ """ ++ ocf.logger.debug("pullScheduledEvents: begin") ++ ++ jsondata = azHelper._sendMetadataRequest(azHelper.events_api) ++ ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata) ++ ++ ocf.logger.debug("pullScheduledEvents: finished") ++ return attrDict(jsondata) ++ ++ @staticmethod ++ def forceEvents(eventIDs): ++ """ ++ Force a set of events to start immediately ++ """ ++ ocf.logger.debug("forceEvents: begin") ++ ++ events = [] ++ for e in eventIDs: ++ events.append({ ++ "EventId": e, ++ }) ++ postData = { ++ "StartRequests" : events ++ } ++ ocf.logger.info("forceEvents: postData = %s" % postData) ++ resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData)) ++ ++ ocf.logger.debug("forceEvents: finished") ++ return ++ ++############################################################################## ++ ++class clusterHelper: ++ """ ++ Helper functions for Pacemaker control via crm ++ """ ++ @staticmethod ++ def _getLocation(node): ++ """ ++ Helper function to retrieve local/global attributes ++ """ ++ if node: ++ return ["--node", node] ++ else: ++ return ["--type", "crm_config"] ++ ++ @staticmethod ++ def _exec(command, *args): ++ """ ++ Helper function to execute a UNIX command ++ """ ++ args = list(args) ++ ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args))) ++ ++ def flatten(*n): ++ return (str(e) for a in n ++ for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),))) ++ command = list(flatten([command] + args)) ++ ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) ++ try: ++ ret = subprocess.check_output(command) ++ ocf.logger.debug("_exec: return = %s" % ret) ++ return ret.rstrip() ++ except Exception as err: ++ ocf.logger.exception(err) ++ return None ++ ++ @staticmethod ++ def setAttr(key, value, node=None): ++ """ ++ Set the value of a specific global/local attribute in the Pacemaker cluster ++ """ ++ ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node)) ++ ++ if value: ++ ret = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--update", value, ++ clusterHelper._getLocation(node)) ++ else: ++ ret = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--delete", ++ clusterHelper._getLocation(node)) ++ ++ ocf.logger.debug("setAttr: finished") ++ return len(ret) == 0 ++ ++ @staticmethod ++ def getAttr(key, node=None): ++ """ ++ Retrieve a global/local attribute from the Pacemaker cluster ++ """ ++ ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node)) ++ ++ val = clusterHelper._exec("crm_attribute", ++ "--name", key, ++ "--query", "--quiet", ++ "--default", "", ++ clusterHelper._getLocation(node)) ++ ocf.logger.debug("getAttr: finished") ++ if not val: ++ return None ++ return val if not val.isdigit() else int(val) ++ ++ @staticmethod ++ def getAllNodes(): ++ """ ++ Get a list of hostnames for all nodes in the Pacemaker cluster ++ """ ++ ocf.logger.debug("getAllNodes: begin") ++ ++ nodes = [] ++ nodeList = clusterHelper._exec("crm_node", "--list") ++ for n in nodeList.decode().split("\n"): ++ nodes.append(n.split()[1]) ++ ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) ++ ++ return nodes ++ ++ @staticmethod ++ def getHostNameFromAzName(azName): ++ """ ++ Helper function to get the actual host name from an Azure node name ++ """ ++ return clusterHelper.getAttr("hostName_%s" % azName) ++ ++ @staticmethod ++ def removeHoldFromNodes(): ++ """ ++ Remove the ON_HOLD state from all nodes in the Pacemaker cluster ++ """ ++ ocf.logger.debug("removeHoldFromNodes: begin") ++ ++ for n in clusterHelper.getAllNodes(): ++ if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD": ++ clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n) ++ ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n) ++ ++ ocf.logger.debug("removeHoldFromNodes: finished") ++ return False ++ ++ @staticmethod ++ def otherNodesAvailable(exceptNode): ++ """ ++ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE ++ """ ++ ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode) ++ ++ for n in clusterHelper.getAllNodes(): ++ state = clusterHelper.getAttr(attr_curNodeState, node=n) ++ state = stringToNodeState(state) if state else AVAILABLE ++ if state == AVAILABLE and n != exceptNode.hostName: ++ ocf.logger.info("otherNodesAvailable: at least %s is available" % n) ++ ocf.logger.debug("otherNodesAvailable: finished") ++ return True ++ ocf.logger.info("otherNodesAvailable: no other nodes are available") ++ ocf.logger.debug("otherNodesAvailable: finished") ++ ++ return False ++ ++ @staticmethod ++ def transitionSummary(): ++ """ ++ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) ++ """ ++ # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? ++ # # crm_simulate -Ls ++ # Transition Summary: ++ # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) ++ # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) ++ # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1) ++ # * Start rsc_nc_HN1_HDB03 (hsr3-db1) ++ # # Excepted result when there are no pending actions: ++ # Transition Summary: ++ ocf.logger.debug("transitionSummary: begin") ++ ++ summary = clusterHelper._exec("crm_simulate", "-Ls") ++ if not summary: ++ ocf.logger.warning("transitionSummary: could not load transition summary") ++ return False ++ if summary.find("Transition Summary:") < 0: ++ ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) ++ return False ++ summary = summary.split("Transition Summary:")[1] ++ ret = summary.decode().split("\n").pop(0) ++ ++ ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) ++ return ret ++ ++ @staticmethod ++ def listOperationsOnNode(node): ++ """ ++ Get a list of all current operations for a given node (used to check if any resources are pending) ++ """ ++ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0 ++ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete ++ # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete ++ # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending ++ # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete ++ ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node) ++ ++ resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node) ++ if len(resources) == 0: ++ ret = [] ++ else: ++ ret = resources.decode().split("\n") ++ ++ ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) ++ return ret ++ ++ @staticmethod ++ def noPendingResourcesOnNode(node): ++ """ ++ Check that there are no pending resources on a given node ++ """ ++ ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node) ++ ++ for r in clusterHelper.listOperationsOnNode(node): ++ ocf.logger.debug("noPendingResourcesOnNode: * %s" % r) ++ resource = r.split()[-1] ++ if resource == "pending": ++ ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource) ++ ocf.logger.debug("noPendingResourcesOnNode: finished; return = False") ++ return False ++ ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node) ++ ocf.logger.debug("noPendingResourcesOnNode: finished; return = True") ++ ++ return True ++ ++ @staticmethod ++ def allResourcesStoppedOnNode(node): ++ """ ++ Check that all resources on a given node are stopped ++ """ ++ ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node) ++ ++ if clusterHelper.noPendingResourcesOnNode(node): ++ if len(clusterHelper.transitionSummary()) == 0: ++ ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node) ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True") ++ return True ++ ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty") ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") ++ return False ++ ++ ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node) ++ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") ++ return False ++ ++############################################################################## ++ ++AVAILABLE = 0 # Node is online and ready to handle events ++STOPPING = 1 # Standby has been triggered, but some resources are still running ++IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service ++ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available ++ ++def stringToNodeState(name): ++ if type(name) == int: return name ++ if name == "STOPPING": return STOPPING ++ if name == "IN_EVENT": return IN_EVENT ++ if name == "ON_HOLD": return ON_HOLD ++ return AVAILABLE ++ ++def nodeStateToString(state): ++ if state == STOPPING: return "STOPPING" ++ if state == IN_EVENT: return "IN_EVENT" ++ if state == ON_HOLD: return "ON_HOLD" ++ return "AVAILABLE" ++ ++############################################################################## ++ ++class Node: ++ """ ++ Core class implementing logic for a cluster node ++ """ ++ def __init__(self, ra): ++ self.raOwner = ra ++ self.azInfo = azHelper.getInstanceInfo() ++ self.azName = self.azInfo.name ++ self.hostName = socket.gethostname() ++ self.setAttr("azName", self.azName) ++ clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName) ++ ++ def getAttr(self, key): ++ """ ++ Get a local attribute ++ """ ++ return clusterHelper.getAttr(key, node=self.hostName) ++ ++ def setAttr(self, key, value): ++ """ ++ Set a local attribute ++ """ ++ return clusterHelper.setAttr(key, value, node=self.hostName) ++ ++ def selfOrOtherNode(self, node): ++ """ ++ Helper function to distinguish self/other node ++ """ ++ return node if node else self.hostName ++ ++ def setState(self, state, node=None): ++ """ ++ Set the state for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state))) ++ ++ clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node) ++ ++ ocf.logger.debug("setState: finished") ++ ++ def getState(self, node=None): ++ """ ++ Get the state for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("getState: begin; node = %s" % node) ++ ++ state = clusterHelper.getAttr(attr_curNodeState, node=node) ++ ocf.logger.debug("getState: state = %s" % state) ++ ocf.logger.debug("getState: finished") ++ if not state: ++ return AVAILABLE ++ return stringToNodeState(state) ++ ++ def setEventIDs(self, eventIDs, node=None): ++ """ ++ Set pending EventIDs for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs))) ++ ++ if eventIDs: ++ eventIDStr = ",".join(eventIDs) ++ else: ++ eventIDStr = None ++ clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node) ++ ++ ocf.logger.debug("setEventIDs: finished") ++ return ++ ++ def getEventIDs(self, node=None): ++ """ ++ Get pending EventIDs for a given node (or self) ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("getEventIDs: begin; node = %s" % node) ++ ++ eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) ++ if eventIDStr: ++ eventIDs = eventIDStr.decode().split(",") ++ else: ++ eventIDs = None ++ ++ ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs)) ++ return eventIDs ++ ++ def updateNodeStateAndEvents(self, state, eventIDs, node=None): ++ """ ++ Set the state and pending EventIDs for a given node (or self) ++ """ ++ ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs))) ++ ++ self.setState(state, node=node) ++ self.setEventIDs(eventIDs, node=node) ++ ++ ocf.logger.debug("updateNodeStateAndEvents: finished") ++ return state ++ ++ def putNodeStandby(self, node=None): ++ """ ++ Put self to standby ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("putNodeStandby: begin; node = %s" % node) ++ ++ clusterHelper._exec("crm_attribute", ++ "-t", "nodes", ++ "-N", node, ++ "-n", "standby", ++ "-v", "on", ++ "--lifetime=forever") ++ ++ ocf.logger.debug("putNodeStandby: finished") ++ ++ def putNodeOnline(self, node=None): ++ """ ++ Put self back online ++ """ ++ node = self.selfOrOtherNode(node) ++ ocf.logger.debug("putNodeOnline: begin; node = %s" % node) ++ ++ clusterHelper._exec("crm_attribute", ++ "-t", "nodes", ++ "-N", node, ++ "-n", "standby", ++ "-v", "off", ++ "--lifetime=forever") ++ ++ ocf.logger.debug("putNodeOnline: finished") ++ ++ def separateEvents(self, events): ++ """ ++ Split own/other nodes' events ++ """ ++ ocf.logger.debug("separateEvents: begin; events = %s" % str(events)) ++ ++ localEvents = [] ++ remoteEvents = [] ++ for e in events: ++ e = attrDict(e) ++ if e.EventType not in self.raOwner.relevantEventTypes: ++ continue ++ if self.azName in e.Resources: ++ localEvents.append(e) ++ else: ++ remoteEvents.append(e) ++ ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents))) ++ return (localEvents, remoteEvents) ++ ++ def removeOrphanedEvents(self, azEvents): ++ """ ++ Remove remote events that are already finished ++ """ ++ ocf.logger.debug("removeOrphanedEvents: begin; azEvents = %s" % str(azEvents)) ++ ++ azEventIDs = set() ++ for e in azEvents: ++ azEventIDs.add(e.EventId) ++ # for all nodes except self ... ++ for n in clusterHelper.getAllNodes(): ++ if n == self.hostName: ++ continue ++ curState = self.getState(node=n) ++ # ... that still show in an event or shutting down resources ... ++ if curState in (STOPPING, IN_EVENT): ++ ocf.logger.info("removeOrphanedEvents: node %s has state %s" % (n, curState)) ++ clusterEventIDs = self.getEventIDs(node=n) ++ stillActive = False ++ # ... but don't have any more events running according to Azure, ... ++ for p in clusterEventIDs: ++ if p in azEventIDs: ++ ocf.logger.info("removeOrphanedEvents: (at least) event %s on node %s has not yet finished" % (str(p), n)) ++ stillActive = True ++ break ++ if not stillActive: ++ # ... put them back online. ++ ocf.logger.info("removeOrphanedEvents: clusterEvents %s on node %s are not in azEvents %s -> bring node back online" % (str(clusterEventIDs), n, str(azEventIDs))) ++ self.putNodeOnline(node=n) ++ ++ ocf.logger.debug("removeOrphanedEvents: finished") ++ ++ def handleRemoteEvents(self, azEvents): ++ """ ++ Handle a list of events (as provided by Azure Metadata Service) for other nodes ++ """ ++ ocf.logger.debug("handleRemoteEvents: begin; hostName = %s, events = %s" % (self.hostName, str(azEvents))) ++ ++ if len(azEvents) == 0: ++ ocf.logger.debug("handleRemoteEvents: no remote events to handle") ++ ocf.logger.debug("handleRemoteEvents: finished") ++ return ++ eventIDsForNode = {} ++ ++ # iterate through all current events as per Azure ++ for e in azEvents: ++ ocf.logger.info("handleRemoteEvents: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources))) ++ # before we can force an event to start, we need to ensure all nodes involved have stopped their resources ++ if e.EventStatus == "Scheduled": ++ allNodesStopped = True ++ for azName in e.Resources: ++ hostName = clusterHelper.getHostNameFromAzName(azName) ++ state = self.getState(node=hostName) ++ if state == STOPPING: ++ # the only way we can continue is when node state is STOPPING, but all resources have been stopped ++ if not clusterHelper.allResourcesStoppedOnNode(hostName): ++ ocf.logger.info("handleRemoteEvents: (at least) node %s has still resources running -> wait" % hostName) ++ allNodesStopped = False ++ break ++ elif state in (AVAILABLE, IN_EVENT, ON_HOLD): ++ ocf.logger.info("handleRemoteEvents: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state))) ++ allNodesStopped = False ++ break ++ if allNodesStopped: ++ ocf.logger.info("handleRemoteEvents: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId)) ++ for n in e.Resources: ++ hostName = clusterHelper.getHostNameFromAzName(n) ++ if hostName in eventIDsForNode: ++ eventIDsForNode[hostName].append(e.EventId) ++ else: ++ eventIDsForNode[hostName] = [e.EventId] ++ elif e.EventStatus == "Started": ++ ocf.logger.info("handleRemoteEvents: remote event already started") ++ ++ # force the start of all events whose nodes are ready (i.e. have no more resources running) ++ if len(eventIDsForNode.keys()) > 0: ++ eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist]) ++ ocf.logger.info("handleRemoteEvents: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce))) ++ for node, eventId in eventIDsForNode.items(): ++ self.updateNodeStateAndEvents(IN_EVENT, eventId, node=node) ++ azHelper.forceEvents(eventIDsToForce) ++ ++ ocf.logger.debug("handleRemoteEvents: finished") ++ ++ def handleLocalEvents(self, azEvents): ++ """ ++ Handle a list of own events (as provided by Azure Metadata Service) ++ """ ++ ocf.logger.debug("handleLocalEvents: begin; hostName = %s, azEvents = %s" % (self.hostName, str(azEvents))) ++ ++ azEventIDs = set() ++ for e in azEvents: ++ azEventIDs.add(e.EventId) ++ ++ curState = self.getState() ++ clusterEventIDs = self.getEventIDs() ++ mayUpdateDocVersion = False ++ ocf.logger.info("handleLocalEvents: current state = %s; pending local clusterEvents = %s" % (nodeStateToString(curState), str(clusterEventIDs))) ++ ++ # check if there are currently/still events set for the node ++ if clusterEventIDs: ++ # there are pending events set, so our state must be STOPPING or IN_EVENT ++ i = 0; touchedEventIDs = False ++ while i < len(clusterEventIDs): ++ # clean up pending events that are already finished according to AZ ++ if clusterEventIDs[i] not in azEventIDs: ++ ocf.logger.info("handleLocalEvents: remove finished local clusterEvent %s" % (clusterEventIDs[i])) ++ clusterEventIDs.pop(i) ++ touchedEventIDs = True ++ else: ++ i += 1 ++ if len(clusterEventIDs) > 0: ++ # there are still pending events (either because we're still stopping, or because the event is still in place) ++ # either way, we need to wait ++ if touchedEventIDs: ++ ocf.logger.info("handleLocalEvents: added new local clusterEvent %s" % str(clusterEventIDs)) ++ self.setEventIDs(clusterEventIDs) ++ else: ++ ocf.logger.info("handleLocalEvents: no local clusterEvents were updated") ++ else: ++ # there are no more pending events left after cleanup ++ if clusterHelper.noPendingResourcesOnNode(self.hostName): ++ # and no pending resources on the node -> set it back online ++ ocf.logger.info("handleLocalEvents: all local events finished -> clean up, put node online and AVAILABLE") ++ curState = self.updateNodeStateAndEvents(AVAILABLE, None) ++ self.putNodeOnline() ++ clusterHelper.removeHoldFromNodes() ++ # repeat handleLocalEvents() since we changed status to AVAILABLE ++ else: ++ ocf.logger.info("handleLocalEvents: all local events finished, but some resources have not completed startup yet -> wait") ++ else: ++ # there are no pending events set for us (yet) ++ if curState == AVAILABLE: ++ if len(azEventIDs) > 0: ++ if clusterHelper.otherNodesAvailable(self): ++ ocf.logger.info("handleLocalEvents: can handle local events %s -> set state STOPPING" % (str(azEventIDs))) ++ # this will also set mayUpdateDocVersion = True ++ curState = self.updateNodeStateAndEvents(STOPPING, azEventIDs) ++ else: ++ ocf.logger.info("handleLocalEvents: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(azEventIDs)) ++ self.setState(ON_HOLD) ++ else: ++ ocf.logger.debug("handleLocalEvents: no local azEvents to handle") ++ if curState == STOPPING: ++ if clusterHelper.noPendingResourcesOnNode(self.hostName): ++ ocf.logger.info("handleLocalEvents: all local resources are started properly -> put node standby") ++ self.putNodeStandby() ++ mayUpdateDocVersion = True ++ else: ++ ocf.logger.info("handleLocalEvents: some local resources are not clean yet -> wait") ++ ++ ocf.logger.debug("handleLocalEvents: finished; mayUpdateDocVersion = %s" % str(mayUpdateDocVersion)) ++ return mayUpdateDocVersion ++ ++############################################################################## ++ ++class raAzEvents: ++ """ ++ Main class for resource agent ++ """ ++ def __init__(self, relevantEventTypes): ++ self.node = Node(self) ++ self.relevantEventTypes = relevantEventTypes ++ ++ def monitor(self): ++ ocf.logger.debug("monitor: begin") ++ ++ pullFailedAttemps = 0 ++ while True: ++ # check if another node is pulling at the same time; ++ # this should only be a concern for the first pull, as setting up Scheduled Events may take up to 2 minutes. ++ if clusterHelper.getAttr(attr_globalPullState) == "PULLING": ++ pullFailedAttemps += 1 ++ if pullFailedAttemps == global_pullMaxAttempts: ++ ocf.logger.warning("monitor: exceeded maximum number of attempts (%d) to pull events" % global_pullMaxAttempts) ++ ocf.logger.debug("monitor: finished") ++ return ocf.OCF_SUCCESS ++ else: ++ ocf.logger.info("monitor: another node is pulling; retry in %d seconds" % global_pullDelaySecs) ++ time.sleep(global_pullDelaySecs) ++ continue ++ ++ # we can pull safely from Azure Metadata Service ++ clusterHelper.setAttr(attr_globalPullState, "PULLING") ++ events = azHelper.pullScheduledEvents() ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ ++ # get current document version ++ curDocVersion = events.DocumentIncarnation ++ lastDocVersion = self.node.getAttr(attr_lastDocVersion) ++ ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion)) ++ ++ # split events local/remote ++ (localEvents, remoteEvents) = self.node.separateEvents(events.Events) ++ ++ # ensure local events are only executing once ++ if curDocVersion != lastDocVersion: ++ ocf.logger.debug("monitor: curDocVersion has not been handled yet") ++ # handleLocalEvents() returns True if mayUpdateDocVersion is True; ++ # this is only the case if we can ensure there are no pending events ++ if self.node.handleLocalEvents(localEvents): ++ ocf.logger.info("monitor: handleLocalEvents completed successfully -> update curDocVersion") ++ self.node.setAttr(attr_lastDocVersion, curDocVersion) ++ else: ++ ocf.logger.debug("monitor: handleLocalEvents still waiting -> keep curDocVersion") ++ else: ++ ocf.logger.info("monitor: already handled curDocVersion, skip") ++ ++ # remove orphaned remote events and then handle the remaining remote events ++ self.node.removeOrphanedEvents(remoteEvents) ++ self.node.handleRemoteEvents(remoteEvents) ++ break ++ ++ ocf.logger.debug("monitor: finished") ++ return ocf.OCF_SUCCESS ++ ++############################################################################## ++ ++def setLoglevel(verbose): ++ # set up writing into syslog ++ loglevel = default_loglevel ++ if verbose: ++ opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)) ++ urllib2.install_opener(opener) ++ loglevel = ocf.logging.DEBUG ++ ocf.log.setLevel(loglevel) ++ ++description = ( ++ "Microsoft Azure Scheduled Events monitoring agent", ++ """This resource agent implements a monitor for scheduled ++(maintenance) events for a Microsoft Azure VM. ++ ++If any relevant events are found, it moves all Pacemaker resources ++away from the affected node to allow for a graceful shutdown. ++ ++ Usage: ++ [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events ACTION ++ ++ action (required): Supported values: monitor, help, meta-data ++ eventTypes (optional): List of event types to be considered ++ relevant by the resource agent (comma-separated). ++ Supported values: Freeze,Reboot,Redeploy ++ Default = Reboot,Redeploy ++/ verbose (optional): If set to true, displays debug info. ++ Default = false ++ ++ Deployment: ++ crm configure primitive rsc_azure-events ocf:heartbeat:azure-events \ ++ op monitor interval=10s ++ crm configure clone cln_azure-events rsc_azure-events ++ ++For further information on Microsoft Azure Scheduled Events, please ++refer to the following documentation: ++https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events ++""") ++ ++def monitor_action(eventTypes): ++ relevantEventTypes = set(eventTypes.split(",") if eventTypes else []) ++ ra = raAzEvents(relevantEventTypes) ++ return ra.monitor() ++ ++def validate_action(eventTypes): ++ if eventTypes: ++ for event in eventTypes.split(","): ++ if event not in ("Freeze", "Reboot", "Redeploy"): ++ ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes) ++ return ocf.OCF_ERR_CONFIGURED ++ return ocf.OCF_SUCCESS ++ ++def main(): ++ agent = ocf.Agent("azure-events", shortdesc=description[0], longdesc=description[1]) ++ agent.add_parameter( ++ "eventTypes", ++ shortdesc="List of resources to be considered", ++ longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)", ++ content_type="string", ++ default="Reboot,Redeploy") ++ agent.add_parameter( ++ "verbose", ++ shortdesc="Enable verbose agent logging", ++ longdesc="Set to true to enable verbose logging", ++ content_type="boolean", ++ default="false") ++ agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS) ++ agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS) ++ agent.add_action("validate-all", timeout=20, handler=validate_action) ++ agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action) ++ setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false"))) ++ agent.run() ++ ++if __name__ == '__main__': ++ main() +diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2020-04-16 11:54:08.467619588 +0200 ++++ b/heartbeat/Makefile.am 2020-04-16 12:08:07.788224036 +0200 +@@ -55,7 +55,7 @@ + osp_SCRIPTS = nova-compute-wait \ + NovaEvacuate + +-ocf_SCRIPTS = AoEtarget \ ++ocf_SCRIPTS = AoEtarget \ + AudibleAlarm \ + ClusterMon \ + CTDB \ +@@ -116,10 +116,7 @@ + fio \ + galera \ + garbd \ +- gcp-pd-move \ + gcp-vpc-move-ip \ +- gcp-vpc-move-vip \ +- gcp-vpc-move-route \ + iSCSILogicalUnit \ + iSCSITarget \ + ids \ +@@ -177,6 +174,22 @@ + vsftpd \ + zabbixserver + ++if BUILD_AZURE_EVENTS ++ocf_SCRIPTS += azure-events ++endif ++ ++if BUILD_GCP_PD_MOVE ++ocf_SCRIPTS += gcp-pd-move ++endif ++ ++if BUILD_GCP_VPC_MOVE_ROUTE ++ocf_SCRIPTS += gcp-vpc-move-route ++endif ++ ++if BUILD_GCP_VPC_MOVE_VIP ++ocf_SCRIPTS += gcp-vpc-move-vip ++endif ++ + ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat + ocfcommon_DATA = ocf-shellfuncs \ + ocf-binaries \ +@@ -205,3 +218,13 @@ + + %.check: % + OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - ++ ++do_spellcheck = printf '[%s]\n' "$(agent)"; \ ++ OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) \ ++ ./$(agent) meta-data 2>/dev/null \ ++ | xsltproc $(top_srcdir)/make/extract_text.xsl - \ ++ | aspell pipe list -d en_US --ignore-case \ ++ --home-dir=$(top_srcdir)/make -p spellcheck-ignore \ ++ | sed -n 's|^&\([^:]*\):.*|\1|p'; ++spellcheck: ++ @$(foreach agent,$(ocf_SCRIPTS), $(do_spellcheck)) +diff -uNr a/m4/ac_python_module.m4 b/m4/ac_python_module.m4 +--- a/m4/ac_python_module.m4 1970-01-01 01:00:00.000000000 +0100 ++++ b/m4/ac_python_module.m4 2020-04-14 11:11:26.325806378 +0200 +@@ -0,0 +1,30 @@ ++dnl @synopsis AC_PYTHON_MODULE(modname[, fatal]) ++dnl ++dnl Checks for Python module. ++dnl ++dnl If fatal is non-empty then absence of a module will trigger an ++dnl error. ++dnl ++dnl @category InstalledPackages ++dnl @author Andrew Collier . ++dnl @version 2004-07-14 ++dnl @license AllPermissive ++ ++AC_DEFUN([AC_PYTHON_MODULE],[ ++ AC_MSG_CHECKING(python module: $1) ++ $PYTHON -c "import $1" 2>/dev/null ++ if test $? -eq 0; ++ then ++ AC_MSG_RESULT(yes) ++ eval AS_TR_CPP(HAVE_PYMOD_$1)=yes ++ else ++ AC_MSG_RESULT(no) ++ eval AS_TR_CPP(HAVE_PYMOD_$1)=no ++ # ++ if test -n "$2" ++ then ++ AC_MSG_ERROR(failed to find required module $1) ++ exit 1 ++ fi ++ fi ++]) diff --git a/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch b/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch new file mode 100644 index 0000000..3c62631 --- /dev/null +++ b/SOURCES/bz1819965-3-azure-events-decode-when-type-not-str.patch @@ -0,0 +1,59 @@ +From 57424bd1f158f1ff597034e09ca90da864925a16 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 16 Jul 2020 09:58:55 +0200 +Subject: [PATCH] azure-events: only decode() when exec() output not of type + str + +--- + heartbeat/azure-events.in | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a48a86309..d4a166d9f 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -179,6 +179,8 @@ class clusterHelper: + ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) + try: + ret = subprocess.check_output(command) ++ if type(ret) != str: ++ ret = ret.decode() + ocf.logger.debug("_exec: return = %s" % ret) + return ret.rstrip() + except Exception as err: +@@ -232,7 +234,7 @@ class clusterHelper: + + nodes = [] + nodeList = clusterHelper._exec("crm_node", "--list") +- for n in nodeList.decode().split("\n"): ++ for n in nodeList.split("\n"): + nodes.append(n.split()[1]) + ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) + +@@ -303,7 +305,7 @@ class clusterHelper: + ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) + return False + summary = summary.split("Transition Summary:")[1] +- ret = summary.decode().split("\n").pop(0) ++ ret = summary.split("\n").pop(0) + + ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) + return ret +@@ -324,7 +326,7 @@ class clusterHelper: + if len(resources) == 0: + ret = [] + else: +- ret = resources.decode().split("\n") ++ ret = resources.split("\n") + + ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) + return ret +@@ -470,7 +472,7 @@ class Node: + + eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) + if eventIDStr: +- eventIDs = eventIDStr.decode().split(",") ++ eventIDs = eventIDStr.split(",") + else: + eventIDs = None + diff --git a/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch b/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch new file mode 100644 index 0000000..b5c1d96 --- /dev/null +++ b/SOURCES/bz1820523-exportfs-1-add-symlink-support.patch @@ -0,0 +1,51 @@ +From 091a6d1f26140651b7314cfb618c80f9258fd1d4 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 5 May 2020 14:20:43 +0200 +Subject: [PATCH] exportfs: add symlink support + +based on implementation in Filesystem agent +--- + heartbeat/exportfs | 30 ++++++++++++++++++++++-------- + 1 file changed, 22 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 1cabdee70..294d7eec9 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -450,14 +450,28 @@ exportfs_validate_all () + fi + } + +-# If someone puts a trailing slash at the end of the export directory, +-# this agent is going to fail in some unexpected ways due to how +-# export strings are matched. The simplest solution here is to strip off +-# a trailing '/' in the directory before processing anything. +-newdir=$(echo "$OCF_RESKEY_directory" | sed -n -e 's/^\(.*\)\/$/\1/p') +-if [ -n "$newdir" ]; then +- OCF_RESKEY_directory=$newdir +-fi ++for dir in $OCF_RESKEY_directory; do ++ # strip off trailing '/' from directory ++ dir=$(echo $dir | sed 's/\/*$//') ++ : ${dir:=/} ++ if [ -e "$dir" ] ; then ++ canonicalized_dir=$(readlink -f "$dir") ++ if [ $? -ne 0 ]; then ++ if [ "$__OCF_ACTION" != "stop" ]; then ++ ocf_exit_reason "Could not canonicalize $dir because readlink failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ fi ++ else ++ if [ "$__OCF_ACTION" != "stop" ]; then ++ ocf_exit_reason "$dir does not exist" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ fi ++ directories+="$canonicalized_dir " ++done ++ ++OCF_RESKEY_directory="${directories%% }" + + NUMDIRS=`echo "$OCF_RESKEY_directory" | wc -w` + OCF_REQUIRED_PARAMS="directory fsid clientspec" diff --git a/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch b/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch new file mode 100644 index 0000000..ec1dff4 --- /dev/null +++ b/SOURCES/bz1820523-exportfs-2-fix-monitor-action.patch @@ -0,0 +1,35 @@ +From fda12d3d6495e33e049ed3ac03d6bfb4d65aac3d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 9 Jun 2020 10:27:13 +0200 +Subject: [PATCH] exportfs: fix monitor-action in symlink-logic for when + directory doesnt exist + +--- + heartbeat/exportfs | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 294d7eec9..d7d3463d9 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -463,10 +463,16 @@ for dir in $OCF_RESKEY_directory; do + fi + fi + else +- if [ "$__OCF_ACTION" != "stop" ]; then +- ocf_exit_reason "$dir does not exist" +- exit $OCF_ERR_CONFIGURED +- fi ++ case "$__OCF_ACTION" in ++ stop|monitor) ++ canonicalized_dir="$dir" ++ ocf_log debug "$dir does not exist" ++ ;; ++ *) ++ ocf_exit_reason "$dir does not exist" ++ exit $OCF_ERR_CONFIGURED ++ ;; ++ esac + fi + directories+="$canonicalized_dir " + done diff --git a/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch b/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch new file mode 100644 index 0000000..0a7144f --- /dev/null +++ b/SOURCES/bz1830716-NovaEvacuate-suppress-expected-error.patch @@ -0,0 +1,37 @@ +From 143864c694fb3f44a28b805a17ba7a2f6bf9931f Mon Sep 17 00:00:00 2001 +From: Vincent Untz +Date: Sun, 07 Feb 2016 10:30:00 +0100 +Subject: [PATCH] NovaEvacuate: Avoid initial useless message on stderr + +When no evacuation has been done yet, we're spamming syslog with: + + Could not query value of evacuate: attribute does not exist + +So let's just filter this out, since it's known to be expected on +initial setup. + +As this requires a bashism, also move the script to use bash. + +Change-Id: I3351919febc0ef0101e4a08ce6eb412e3c7cfc76 +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 319a747..f764bde 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!/bin/bash + # + # + # NovaCompute agent manages compute daemons. +@@ -220,7 +220,8 @@ + fi + + handle_evacuations $( +- attrd_updater -n evacuate -A | ++ attrd_updater -n evacuate -A \ ++ 2> >(grep -v "attribute does not exist" 1>&2) | + sed 's/ value=""/ value="no"/' | + tr '="' ' ' | + awk '{print $4" "$6}' diff --git a/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch b/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch new file mode 100644 index 0000000..558ecc6 --- /dev/null +++ b/SOURCES/bz1832321-rabbitmq-cluster-increase-wait-timeout.patch @@ -0,0 +1,60 @@ +From cf1e7bfab984b5e9451a63c25b39c0932e0d9116 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Wed, 6 May 2020 16:11:36 +0200 +Subject: [PATCH] Increase the rabbitmqctl wait timeout during start() + +After we start the rabbitmq process we wait for the pid to show up +and then declare the server to be started successfully. +This wait is done via 'rabbitmqctl wait'. Now from +From https://www.rabbitmq.com/rabbitmqctl.8.html we have: + + If the specified pidfile is not created or erlang node is not started + within --timeout the command will fail. Default timeout is 10 seconds. + +This default of 10 seconds might not be enough in overloaded +environments. So what we want to do here is wait for as much time as +the start() operation allows us. So we wait for OCF_RESKEY_CRM_meta_timeout +minus 5 seconds. In the rare and non-sensical case that it is less than +10s we do not pass a timeout string at all to rabbitmqctl. + +Co-Authored-By: John Eckersberg +--- + heartbeat/rabbitmq-cluster | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index a9ebd37ad..f7d48120c 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -294,6 +294,8 @@ rmq_monitor() { + rmq_init_and_wait() + { + local rc ++ local wait_timeout ++ local timeout_string + + prepare_dir $RMQ_PID_DIR + prepare_dir $RMQ_LOG_DIR +@@ -305,11 +307,20 @@ rmq_init_and_wait() + setsid sh -c "$RMQ_SERVER > ${RMQ_LOG_DIR}/startup_log 2> ${RMQ_LOG_DIR}/startup_err" & + + ocf_log info "Waiting for server to start" +- $RMQ_CTL wait $RMQ_PID_FILE ++ # We want to give the wait command almost the full startup timeout we are given ++ # So we use the start operation timeout (in ms), convert it and subtract 5 seconds ++ # In the silly case that it is less than 10 seconds we just skip setting the timeout ++ wait_timeout=`expr $OCF_RESKEY_CRM_meta_timeout / 1000 - 5` ++ if [ $wait_timeout -gt 10 ]; then ++ timeout_string="--timeout ${wait_timeout}" ++ else ++ timeout_string="" ++ fi ++ $RMQ_CTL $timeout_string wait $RMQ_PID_FILE + rc=$? + if [ $rc -ne $OCF_SUCCESS ]; then + remove_pid +- ocf_log info "rabbitmq-server start failed: $rc" ++ ocf_log info "rabbitmq-server start failed with a timeout of ($timeout_string): $rc" + return $OCF_ERR_GENERIC + fi + diff --git a/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch b/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch new file mode 100644 index 0000000..b0d7ade --- /dev/null +++ b/SOURCES/bz1836186-pgsql-support-Pacemaker-v2.03-output.patch @@ -0,0 +1,52 @@ +--- a/heartbeat/ocf-shellfuncs.in 2020-06-16 10:47:54.462276461 +0200 ++++ b/heartbeat/ocf-shellfuncs.in 2020-06-16 10:43:36.880739016 +0200 +@@ -563,7 +563,7 @@ + # (master/slave) resource. This is defined as a resource where the + # master-max meta attribute is present, and set to greater than zero. + ocf_is_ms() { +- [ ! -z "${OCF_RESKEY_CRM_meta_master_max}" ] && [ "${OCF_RESKEY_CRM_meta_master_max}" -gt 0 ] ++ [ "${OCF_RESKEY_CRM_meta_promotable}" = "true" ] || { [ ! -z "${OCF_RESKEY_CRM_meta_master_max}" ] && [ "${OCF_RESKEY_CRM_meta_master_max}" -gt 0 ]; } + } + + # version check functions +--- a/heartbeat/pgsql 2020-06-16 10:47:54.474276441 +0200 ++++ b/heartbeat/pgsql 2020-06-16 10:49:02.835159083 +0200 +@@ -1021,7 +1021,7 @@ + + # I can't get master node name from $OCF_RESKEY_CRM_meta_notify_master_uname on monitor, + # so I will get master node name using crm_mon -n +- print_crm_mon | tr -d "\t" | tr -d " " | grep -q "^${RESOURCE_NAME}[(:].*[):].*Master" ++ print_crm_mon | grep -q -i -E " +Date: Mon, 18 May 2020 16:18:21 +0200 +Subject: [PATCH] db2: HADR add STANDBY/REMOTE_CATCHUP_PENDING/DISCONNECTED to + correctly promote standby node when master node disappears (e.g. via fencing) + +--- + heartbeat/db2 | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index 62b288d46..a57fd2bb6 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -774,14 +774,19 @@ db2_promote() { + ;; + + STANDBY/PEER/CONNECTED|Standby/Peer) +- # must take over ++ # must take over + ;; + + STANDBY/*PEER/DISCONNECTED|Standby/DisconnectedPeer) +- # must take over forced ++ # must take over by force peer window only + force="by force peer window only" + ;; + ++ # must take over by force ++ STANDBY/REMOTE_CATCHUP_PENDING/DISCONNECTED) ++ force="by force" ++ ;; ++ + *) + return $OCF_ERR_GENERIC + esac diff --git a/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch b/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch new file mode 100644 index 0000000..89fbb06 --- /dev/null +++ b/SOURCES/bz1839721-podman-force-rm-container-if-rm-fails.patch @@ -0,0 +1,53 @@ +From 5a732511db2c49ff6afe0a20e738b565a35273ae Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Fri, 29 May 2020 11:57:29 +0200 +Subject: [PATCH] podman: make sure to remove containers with lingering exec + sessions + +It may happen that some "podman exec" commands don't finish +cleanly and leave lingering "Exec sessions" in the container's +state. In that case, a "podman rm" command will always fail. + +To overcome the podman bug, issue a "podman rm -f" command when +we detect a container is stopped but still has some lingering +"Exec sessions" associated with it. + +Related-Bug: rhbz#1839721 +--- + heartbeat/podman | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/heartbeat/podman b/heartbeat/podman +index f77d988fc..e2f6e981b 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -232,6 +232,9 @@ container_exists() + + remove_container() + { ++ local rc ++ local execids ++ + if ocf_is_true "$OCF_RESKEY_reuse"; then + # never remove the container if we have reuse enabled. + return 0 +@@ -244,6 +247,19 @@ remove_container() + fi + ocf_log notice "Cleaning up inactive container, ${CONTAINER}." + ocf_run podman rm $CONTAINER ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ # due to a podman bug (rhbz#1841485), sometimes a stopped ++ # container can still be associated with Exec sessions, in ++ # which case the "podman rm" has to be forced ++ execids=$(podman inspect $CONTAINER --format '{{len .ExecIDs}}') ++ if [ "$execids" -ne "0" ]; then ++ ocf_log warn "Inactive container ${CONTAINER} has lingering exec sessions. Force-remove it." ++ ocf_run podman rm -f $CONTAINER ++ rc=$? ++ fi ++ fi ++ return $rc + } + + podman_simple_status() diff --git a/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch b/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch new file mode 100644 index 0000000..7a7185f --- /dev/null +++ b/SOURCES/bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch @@ -0,0 +1,265 @@ +--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:41:35.308379032 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2020-06-05 10:48:45.555132686 +0200 +@@ -9,12 +9,46 @@ + # Initialization: + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Parameter defaults ++ ++OCF_RESKEY_address_default="" ++OCF_RESKEY_routing_table_default="" ++OCF_RESKEY_interface_default="eth0" ++OCF_RESKEY_profile_default="default" ++OCF_RESKEY_endpoint_default="vpc.aliyuncs.com" ++OCF_RESKEY_aliyuncli_default="detect" ++ ++ ++: ${OCF_RESKEY_address=${OCF_RESKEY_address_default}} ++: ${OCF_RESKEY_routing_table=${OCF_RESKEY_routing_table_default}} ++: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} ++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} ++: ${OCF_RESKEY_endpoint=${OCF_RESKEY_endpoint_default}} ++: ${OCF_RESKEY_aliyuncli=${OCF_RESKEY_aliyuncli_default}} ++ + ####################################################################### + +-# aliyuncli doesnt work without HOME parameter ++# aliyun cli doesnt work without HOME parameter + export HOME="/root" + + USAGE="usage: $0 {start|stop|status|meta-data}"; ++ ++if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then ++ OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" ++fi ++ ++if [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyuncli' ]]; then ++ OUTPUT="text" ++ EXECUTING='{ print $3 }' ++ IFS_=" " ++ ENDPOINT="" ++elif [[ "${OCF_RESKEY_aliyuncli##*/}" == 'aliyun' ]]; then ++ OUTPUT="table cols=InstanceId,DestinationCidrBlock rows=RouteTables.RouteTable[].RouteEntrys.RouteEntry[]" ++ EXECUTING='{ gsub (" ", "", $0); print $1 }' ++ IFS_="|" ++ ENDPOINT="--endpoint $OCF_RESKEY_endpoint" ++fi + ############################################################################### + + +@@ -24,27 +58,61 @@ + # + ############################################################################### + ++request_create_route_entry() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd 2>&1) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ocf_log debug "result: $res; rc: $rc" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++ return $rc ++} ++ ++request_delete_route_entry() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ocf_log debug "result: $res; rc: $rc" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++ return $rc ++} + ++request_describe_route_tables() { ++ cmd="${OCF_RESKEY_aliyuncli} vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output ${OUTPUT} ${ENDPOINT}" ++ ocf_log debug "executing command: $cmd" ++ res=$($cmd) ++ rc=$? ++ if [[ $rc -eq 0 ]] ++ then ++ ROUTE_TO_INSTANCE=$(echo "$res" |grep "\s${OCF_RESKEY_address}/" | awk -F "${IFS_}" "${EXECUTING}") ++ ocf_log debug "ROUTE_TO_INSTANCE: $ROUTE_TO_INSTANCE" ++ else ++ ocf_log err "result: $res; cmd: $cmd; rc: $rc" ++ fi ++} + + ip_get_and_configure() { + ocf_log debug "function: ip_get_and_configure" + +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then + if [ -n "$ROUTE_TO_INSTANCE" ]; then + ip_drop + fi +- +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- ocf_log debug "executing command: $cmd" +- $cmd ++ request_create_route_entry + rc=$? + while [ $rc -ne 0 ]; do + sleep 1 +- cmd="aliyuncli vpc CreateRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ECS_INSTANCE_ID --NextHopType Instance --output text" +- ocf_log debug "executing command: $cmd" +- $cmd ++ request_create_route_entry + rc=$? + done + wait_for_started +@@ -68,17 +136,15 @@ + ocf_log debug "function: ip_drop" + cmd="ip addr delete ${OCF_RESKEY_address}/32 dev $OCF_RESKEY_interface" + ocf_log debug "executing command: $cmd" +- $cmd ++ res=$($cmd) + rc=$? + if [ $rc -ne 0 ] && [ $rc -ne 2 ]; then +- ocf_log err "command failed, rc $rc" ++ ocf_log err "command failed, rc: $rc; cmd: $cmd; result: $res" + return $OCF_ERR_GENERIC + fi +- +- cmd="aliyuncli vpc DeleteRouteEntry --RouteTableId $OCF_RESKEY_routing_table --DestinationCidrBlock ${OCF_RESKEY_address}/32 --NextHopId $ROUTE_TO_INSTANCE --output text" +- ocf_log debug "executing command: $cmd" +- $cmd +- if [ $? -ne 0 ]; then ++ request_delete_route_entry ++ rc=$? ++ if [ $rc -ne 0 ]; then + ocf_log err "command failed, rc: $rc" + return $OCF_ERR_GENERIC + fi +@@ -90,26 +156,18 @@ + } + + wait_for_started() { +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + while [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; do + sleep 3 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + done + } + + wait_for_deleted() { +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" +- ++ request_describe_route_tables + while [ ! -z "$ROUTE_TO_INSTANCE" ]; do + sleep 1 +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + done + } + +@@ -124,38 +182,58 @@ + by changing an entry in an specific routing table + + Move IP within a VPC of the Aliyun ECS ++ + ++ ++ ++Path to command line tools for Aliyun ++ ++Path to Aliyun CLI tools ++ ++ ++ + + + VPC private IP address + + vpc ip +- ++ + ++ + + + Name of the routing table, where the route for the IP address should be changed, i.e. vtb-... + + routing table name +- ++ + ++ + + + Name of the network interface, i.e. eth0 + + network interface name +- ++ + +- ++ ++ + +-Valid Aliyun CLI profile name (see 'aliyuncli-ra configure'). ++An endpoint is the service entry of an Alibaba Cloud service, i.e. vpc.cn-beijing.aliyuncs.com ++ ++service endpoint ++ ++ + +-See https://www.alibabacloud.com/help/doc-detail/43039.htm?spm=a2c63.p38356.b99.16.38a914abRZtOU3 for more information about aliyuncli-ra. ++ ++ ++Valid Aliyun CLI profile name (see 'aliyun cli configure'). ++See https://www.alibabacloud.com/help/zh/product/29991.htm for more information about aliyun cli. + + profile name +- ++ + + ++ + + + +@@ -170,6 +248,11 @@ + ecs_ip_validate() { + ocf_log debug "function: validate" + ++ if [ -z "${OCF_RESKEY_aliyuncli}" ]; then ++ ocf_exit_reason "unable to detect aliyuncli binary" ++ exit $OCF_ERR_INSTALLED ++ fi ++ + # IP address + if [ -z "$OCF_RESKEY_address" ]; then + ocf_log err "IP address parameter not set $OCF_RESKEY_ADDRESS!" +@@ -250,10 +333,7 @@ + + ecs_ip_monitor() { + ocf_log debug "function: ecsip_monitor: check routing table" +- cmd="aliyuncli vpc DescribeRouteTables --RouteTableId $OCF_RESKEY_routing_table --output text" +- ocf_log debug "executing command: $cmd" +- +- ROUTE_TO_INSTANCE="$($cmd |grep $OCF_RESKEY_address | awk '{ print $3 }')" ++ request_describe_route_tables + + if [ "$ECS_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then + ocf_log debug "not routed to this instance ($ECS_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE" diff --git a/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch b/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch new file mode 100644 index 0000000..fa194c9 --- /dev/null +++ b/SOURCES/bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch @@ -0,0 +1,70 @@ +From 194909ff08cfe75cd5da9f704d8ed4cc9ab40341 Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 10:58:34 +0200 +Subject: [PATCH 1/2] azure-events: handle exceptions in urlopen The locking in + azure-events does not correctly handle some failures. + +If the metadata server is not recheable or has an error +handling the request, attr_globalPullState will never go +back to IDLE unless the administrator manually changes it. + +> azure-events: ERROR: [Errno 104] Connection reset by peer +> lrmd[2734]: notice: rsc_azure-events_monitor_10000:113088:stderr [ ocf-exit-reason:[Errno 104] Connection reset by peer ] +--- + heartbeat/azure-events.in | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index 8709d97e3..bd812f4b2 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -82,9 +82,19 @@ class azHelper: + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) +- resp = urllib2.urlopen(req) +- data = resp.read() +- ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ try: ++ resp = urllib2.urlopen(req) ++ except URLError as e: ++ if hasattr(e, 'reason'): ++ print('We failed to reach a server. Reason: '), e.reason ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ elif hasattr(e, 'code'): ++ print('The server couldn\'t fulfill the request. Error code: '), e.code ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ else: ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ + if data: + data = json.loads(data) + + +From c4071ec4a82fcb831f170f341e0790633e4b904f Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 12:53:22 +0200 +Subject: [PATCH 2/2] azure-events: use ocf.logger.warning instead of print + +--- + heartbeat/azure-events.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index bd812f4b2..a48a86309 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -86,10 +86,10 @@ class azHelper: + resp = urllib2.urlopen(req) + except URLError as e: + if hasattr(e, 'reason'): +- print('We failed to reach a server. Reason: '), e.reason ++ ocf.logger.warning("Failed to reach the server: %s" % e.reason) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + elif hasattr(e, 'code'): +- print('The server couldn\'t fulfill the request. Error code: '), e.code ++ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + else: + data = resp.read() diff --git a/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch b/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch new file mode 100644 index 0000000..7795e78 --- /dev/null +++ b/SOURCES/bz1845574-azure-events-2-import-urlerror-encode-postdata.patch @@ -0,0 +1,68 @@ +From f2bf1d8a07ea810099b03469883cb7f485ab9ac1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:09:43 +0200 +Subject: [PATCH 1/2] azure-events: import URLError and encode postData when + necessary + +--- + heartbeat/azure-events.in | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index d4a166d9f..a7f359468 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -13,8 +13,10 @@ import subprocess + import json + try: + import urllib2 ++ from urllib2 import URLError + except ImportError: + import urllib.request as urllib2 ++ from urllib.error import URLError + import socket + from collections import defaultdict + +@@ -76,9 +78,13 @@ class azHelper: + Send a request to Azure's Azure Metadata Service API + """ + url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ data = "" + ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) + ocf.logger.debug("_sendMetadataRequest: url = %s" % url) + ++ if postData and type(postData) != bytes: ++ postData = postData.encode() ++ + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) + +From 1ab5d71bff95eb271f1e1bbc401961dc313219d9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 Jul 2020 21:25:43 +0200 +Subject: [PATCH 2/2] azure-events: report error if jsondata not received + +--- + heartbeat/azure-events.in | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a7f359468..3a24d6358 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -117,8 +117,12 @@ class azHelper: + jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) + ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) + +- ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) +- return attrDict(jsondata["compute"]) ++ if jsondata: ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ else: ++ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") ++ sys.exit(ocf.OCF_ERR_GENERIC) + + @staticmethod + def pullScheduledEvents(): diff --git a/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch b/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch new file mode 100644 index 0000000..06da3b4 --- /dev/null +++ b/SOURCES/bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch @@ -0,0 +1,31 @@ +From 314eadcd683551bd79b644de05cbf0e425c84f83 Mon Sep 17 00:00:00 2001 +From: Kazunori INOUE +Date: Tue, 9 Jun 2020 13:30:32 +0000 +Subject: [PATCH] nfsserver: prevent error messages when /etc/sysconfig/nfs + does not exist + +--- + heartbeat/nfsserver-redhat.sh | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/nfsserver-redhat.sh b/heartbeat/nfsserver-redhat.sh +index cef0862ee..73a70c186 100644 +--- a/heartbeat/nfsserver-redhat.sh ++++ b/heartbeat/nfsserver-redhat.sh +@@ -150,10 +150,12 @@ set_env_args() + + # override local nfs config. preserve previous local config though. + if [ -s $tmpconfig ]; then +- cat $NFS_SYSCONFIG | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 +- if [ $? -ne 0 ]; then +- # backup local nfs config if it doesn't have our HA autogen tag in it. +- mv -f $NFS_SYSCONFIG $NFS_SYSCONFIG_LOCAL_BACKUP ++ if [ -f "$NFS_SYSCONFIG" ]; then ++ cat $NFS_SYSCONFIG | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 ++ if [ $? -ne 0 ]; then ++ # backup local nfs config if it doesn't have our HA autogen tag in it. ++ mv -f $NFS_SYSCONFIG $NFS_SYSCONFIG_LOCAL_BACKUP ++ fi + fi + + cat $tmpconfig | grep -q -e "$NFS_SYSCONFIG_AUTOGEN_TAG" > /dev/null 2>&1 diff --git a/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch b/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch new file mode 100644 index 0000000..fd685b1 --- /dev/null +++ b/SOURCES/bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch @@ -0,0 +1,27 @@ +From f572186ec9bd26c791771a18d38804cfde602578 Mon Sep 17 00:00:00 2001 +From: zzhou1 <10611019+zzhou1@users.noreply.github.com> +Date: Tue, 3 Sep 2019 09:24:23 +0000 +Subject: [PATCH] exportfs: doc clarification for clientspec format + +Point out the standard of the format is aligned with `man exportfs`, and also point out the correct way to deal with the use case to export the same directory(-ies) to multiple subnets. +--- + heartbeat/exportfs | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index 80ed057f2..dc609a0b4 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -47,6 +47,12 @@ Manages NFS exports + + The client specification allowing remote machines to mount the directory + (or directories) over NFS. ++ ++Note: it follows the format defined in "man exportfs". For example, in ++the use case to export the directory(-ies) for multiple subnets, please ++do config a dedicated primitive for each subnet CIDR ip address, ++and do not attempt to use multiple CIDR ip addresses in a space ++seperated list, like in /etc/exports. + + + Client ACL. diff --git a/SOURCES/bz1845583-exportfs-2-fix-typo.patch b/SOURCES/bz1845583-exportfs-2-fix-typo.patch new file mode 100644 index 0000000..0406f31 --- /dev/null +++ b/SOURCES/bz1845583-exportfs-2-fix-typo.patch @@ -0,0 +1,23 @@ +From 0f8189161ef5c09c6a6df96cb15937d430f75353 Mon Sep 17 00:00:00 2001 +From: Valentin Vidic +Date: Wed, 23 Oct 2019 22:11:14 +0200 +Subject: [PATCH] Low: exportfs: Fix spelling error + +Replace seperated -> separated. +--- + heartbeat/exportfs | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/exportfs b/heartbeat/exportfs +index dc609a0b4..d79aced88 100755 +--- a/heartbeat/exportfs ++++ b/heartbeat/exportfs +@@ -52,7 +52,7 @@ Note: it follows the format defined in "man exportfs". For example, in + the use case to export the directory(-ies) for multiple subnets, please + do config a dedicated primitive for each subnet CIDR ip address, + and do not attempt to use multiple CIDR ip addresses in a space +-seperated list, like in /etc/exports. ++separated list, like in /etc/exports. + + + Client ACL. diff --git a/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch b/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch new file mode 100644 index 0000000..13401c2 --- /dev/null +++ b/SOURCES/bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch @@ -0,0 +1,317 @@ +--- a/heartbeat/gcp-vpc-move-vip.in 2020-08-17 10:33:22.132531259 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-08-17 10:34:54.050633259 +0200 +@@ -22,7 +22,8 @@ + import sys + import time + +-OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" ++ % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + from ocf import * +@@ -43,6 +44,10 @@ + import urllib2 as urlrequest + + ++# Constants for alias add/remove modes ++ADD = 0 ++REMOVE = 1 ++ + CONN = None + THIS_VM = None + ALIAS = None +@@ -53,27 +58,27 @@ + + + 1.0 +- Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance +- Floating IP Address on Google Cloud Platform ++ Floating IP Address or Range on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP range to a running instance ++ Floating IP Address or Range on Google Cloud Platform + + +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) + + +- ++ + Subnet name for the Alias IP + Subnet name for the Alias IP + + +- +- List of hosts in the cluster ++ ++ List of hosts in the cluster, separated by spaces + Host list + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). +- Stackdriver-logging support. Requires additional libraries (google-cloud-logging). ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support + + + +@@ -107,7 +112,8 @@ + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read().decode("utf-8") ++ return request_opener.open( ++ request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -134,17 +140,21 @@ + time.sleep(1) + + +-def set_alias(project, zone, instance, alias, alias_range_name=None): +- fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++def set_aliases(project, zone, instance, aliases, fingerprint): ++ """Sets the alias IP ranges for an instance. ++ ++ Args: ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ aliases: list, the list of dictionaries containing alias IP ranges ++ to be added to or removed from the instance. ++ fingerprint: string, the fingerprint of the network interface. ++ """ + body = { +- 'aliasIpRanges': [], +- 'fingerprint': fingerprint ++ 'aliasIpRanges': aliases, ++ 'fingerprint': fingerprint + } +- if alias: +- obj = {'ipCidrRange': alias} +- if alias_range_name: +- obj['subnetworkRangeName'] = alias_range_name +- body['aliasIpRanges'].append(obj) + + request = CONN.instances().updateNetworkInterface( + instance=instance, networkInterface='nic0', project=project, zone=zone, +@@ -153,21 +163,75 @@ + wait_for_operation(project, zone, operation) + + +-def get_alias(project, zone, instance): +- iface = get_network_ifaces(project, zone, instance) ++def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): ++ """Adds or removes an alias IP range for a GCE instance. ++ ++ Args: ++ mode: int, a constant (ADD (0) or REMOVE (1)) indicating the ++ operation type. ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ alias: string, the alias IP range to be added to or removed from ++ the instance. ++ alias_range_name: string, the subnet name for the alias IP range. ++ ++ Returns: ++ True if the existing list of alias IP ranges was modified, or False ++ otherwise. ++ """ ++ ifaces = get_network_ifaces(project, zone, instance) ++ fingerprint = ifaces[0]['fingerprint'] ++ ++ try: ++ old_aliases = ifaces[0]['aliasIpRanges'] ++ except KeyError: ++ old_aliases = [] ++ ++ new_aliases = [a for a in old_aliases if a['ipCidrRange'] != alias] ++ ++ if alias: ++ if mode == ADD: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ new_aliases.append(obj) ++ elif mode == REMOVE: ++ pass # already removed during new_aliases build ++ else: ++ raise ValueError('Invalid value for mode: {}'.format(mode)) ++ ++ if (sorted(new_aliases) != sorted(old_aliases)): ++ set_aliases(project, zone, instance, new_aliases, fingerprint) ++ return True ++ else: ++ return False ++ ++ ++def add_alias(project, zone, instance, alias, alias_range_name=None): ++ return add_rm_alias(ADD, project, zone, instance, alias, alias_range_name) ++ ++ ++def remove_alias(project, zone, instance, alias): ++ return add_rm_alias(REMOVE, project, zone, instance, alias) ++ ++ ++def get_aliases(project, zone, instance): ++ ifaces = get_network_ifaces(project, zone, instance) + try: +- return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ aliases = ifaces[0]['aliasIpRanges'] ++ return [a['ipCidrRange'] for a in aliases] + except KeyError: +- return '' ++ return [] + + +-def get_localhost_alias(): ++def get_localhost_aliases(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) + net_iface = json.loads(net_iface) + try: +- return net_iface[0]['ipAliases'][0] ++ return net_iface[0]['ipAliases'] + except (KeyError, IndexError): +- return '' ++ return [] + + + def get_zone(project, instance): +@@ -201,21 +265,17 @@ + + + def gcp_alias_start(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- # If I already have the IP, exit. If it has an alias IP that isn't the VIP, +- # then remove it +- if my_alias == alias: ++ if alias in my_aliases: ++ # TODO: Do we need to check alias_range_name? + logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) +- elif my_alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') + +- # Loops through all hosts & remove the alias IP from the host that has it ++ # If the alias is currently attached to another host, detach it. + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: + hostlist = hostlist.replace(THIS_VM, '').split() +@@ -223,47 +283,53 @@ + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: + host_zone = get_zone(project, host) +- host_alias = get_alias(project, host_zone, host) +- if alias == host_alias: ++ host_aliases = get_aliases(project, host_zone, host) ++ if alias in host_aliases: + logger.info( +- '%s is attached to %s - Removing all alias IP addresses from %s' % +- (alias, host, host)) +- set_alias(project, host_zone, host, '') ++ '%s is attached to %s - Removing %s from %s' % ++ (alias, host, alias, host)) ++ remove_alias(project, host_zone, host, alias) + break + +- # add alias IP to localhost +- set_alias( ++ # Add alias IP range to localhost ++ add_alias( + project, my_zone, THIS_VM, alias, + os.environ.get('OCF_RESKEY_alias_range_name')) + +- # Check the IP has been added +- my_alias = get_localhost_alias() +- if alias == my_alias: ++ # Verify that the IP range has been added ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: + logger.info('Finished adding %s to %s' % (alias, THIS_VM)) +- elif my_alias: +- logger.error( +- 'Failed to add IP. %s has an IP attached but it isn\'t %s' % +- (THIS_VM, alias)) +- sys.exit(OCF_ERR_GENERIC) + else: +- logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ if my_aliases: ++ logger.error( ++ 'Failed to add alias IP range %s. %s has alias IP ranges attached but' ++ + ' they don\'t include %s' % (alias, THIS_VM, alias)) ++ else: ++ logger.error( ++ 'Failed to add IP range %s. %s has no alias IP ranges attached' ++ % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + + def gcp_alias_stop(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- if my_alias == alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') ++ if alias in my_aliases: ++ logger.info('Removing %s from %s' % (alias, THIS_VM)) ++ remove_alias(project, my_zone, THIS_VM, alias) ++ else: ++ logger.info( ++ '%s is not attached to %s. No action required' ++ % (alias, THIS_VM)) + + + def gcp_alias_status(alias): +- my_alias = get_localhost_alias() +- if alias == my_alias: +- logger.info('%s has the correct IP address attached' % THIS_VM) ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: ++ logger.info('%s has the correct IP range attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,7 +341,8 @@ + + # Populate global vars + try: +- CONN = googleapiclient.discovery.build('compute', 'v1') ++ CONN = googleapiclient.discovery.build('compute', 'v1', ++ cache_discovery=False) + except Exception as e: + logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -283,7 +350,8 @@ + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' ++ + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') +@@ -309,7 +377,8 @@ + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) + log.addHandler(handler) +- logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': ++ OCF_RESOURCE_INSTANCE}) + except ImportError: + logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') diff --git a/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch b/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch new file mode 100644 index 0000000..887fc99 --- /dev/null +++ b/SOURCES/bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch @@ -0,0 +1,32 @@ +From 2b22a14a128b87214bfb1ece221274aac78ba81b Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 18 Aug 2020 18:43:13 -0700 +Subject: [PATCH] gcp-vpc-move-vip: Fix sort for list of dicts in python3 + +python2 sorts a list of dicts of `{'ipCidrRange': }` correctly by +default. python3 fails with a TypeError: + +`TypeError: '<' not supported between instances of 'dict' and 'dict'` + +Fix this by using the key parameter of sorted(). python2 also supports +this. + +Signed-off-by: Reid Wahl +--- + heartbeat/gcp-vpc-move-vip.in | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 85d59f6bb..01d91a59d 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -200,7 +200,8 @@ def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): + else: + raise ValueError('Invalid value for mode: {}'.format(mode)) + +- if (sorted(new_aliases) != sorted(old_aliases)): ++ if (sorted(new_aliases, key=lambda item: item.get('ipCidrRange')) ++ != sorted(old_aliases, key=lambda item: item.get('ipCidrRange'))): + set_aliases(project, zone, instance, new_aliases, fingerprint) + return True + else: diff --git a/SOURCES/bz1848025-sybaseASE-run-verify-for-start-action-only.patch b/SOURCES/bz1848025-sybaseASE-run-verify-for-start-action-only.patch new file mode 100644 index 0000000..402bbd6 --- /dev/null +++ b/SOURCES/bz1848025-sybaseASE-run-verify-for-start-action-only.patch @@ -0,0 +1,41 @@ +From 953f689cb2a37606b6d4b2250ebec23f129f5095 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Thu, 9 Jul 2020 23:32:22 -0700 +Subject: [PATCH] sybaseASE: Run verify_all() for start operation only + +The `sybaseASE` resource agent runs the `verify_all()` function at the +beginning of start, stop, and monitor operations. + +When `verify_all()` is run for a probe (monitor) operation and +`sybase_home` resides on a cluster-managed filesystem, the probe often +fails with `$OCF_ERR_GENERIC` because the filesystem isn't mounted yet. +This prevents the resource from starting on that node. + +For the stop operation, there's simply no reason to run `verify_all()`. + +This patch removes `verify_all()` for the stop and monitor operations. +It is now only run for the start operation. + +Resolves: RHBZ#1848673 +Resolves: RHBZ#1848025 +--- + heartbeat/sybaseASE.in | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index 9ddd429be..7ff30bd31 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -864,12 +864,10 @@ case $__OCF_ACTION in + exit $? + ;; + stop) +- verify_all || exit $OCF_ERR_GENERIC + ase_stop + exit $? + ;; + status | monitor) +- verify_all || exit $OCF_ERR_GENERIC + ase_status $OCF_CHECK_LEVEL + exit $? + ;; diff --git a/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch b/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch new file mode 100644 index 0000000..b171613 --- /dev/null +++ b/SOURCES/bz1850778-azure-lb-fix-redirect-issue.patch @@ -0,0 +1,54 @@ +From d22700fc5d5098c683b465ea0fede43803fd4d6b Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Tue, 7 Jul 2020 02:18:09 -0700 +Subject: [PATCH] azure-lb: Don't redirect nc listener output to pidfile + +The `lb_start()` function spawns an `nc` listener background process +and echoes the resulting pid to `$pidfile`. Due to a bug in the +redirection, all future data received by the `nc` process is also +appended to `$pidfile`. + +If binary data is received later and appended to `$pidfile`, the +monitor operation fails when `grep` searches the now-binary file. + +``` +line 97: kill: Binary: arguments must be process or job IDs ] +line 97: kill: file: arguments must be process or job IDs ] +line 97: kill: /var/run/nc_PF2_02.pid: arguments must be process or job + IDs ] +line 97: kill: matches: arguments must be process or job IDs ] +``` + +Then the start operation fails during recovery. `lb_start()` spawns a +new `nc` process, but the old process is still running and using the +configured port. + +``` +nc_PF2_02_start_0:777:stderr [ Ncat: bind to :::62502: Address + already in use. QUITTING. ] +``` + +This patch fixes the issue by removing the `nc &` command from the +section whose output gets redirected to `$pidfile`. Now, only the `nc` +PID is echoed to `$pidfile`. + +Resolves: RHBZ#1850778 +Resolves: RHBZ#1850779 +--- + heartbeat/azure-lb | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 05c134514..05755d778 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -113,7 +113,8 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- eval "$cmd & echo \$!" > $pidfile ++ $cmd & ++ echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" + lb_monitor diff --git a/SOURCES/bz1858752-Filesystem-support-whitespace-device-dir.patch b/SOURCES/bz1858752-Filesystem-support-whitespace-device-dir.patch new file mode 100644 index 0000000..15fb71f --- /dev/null +++ b/SOURCES/bz1858752-Filesystem-support-whitespace-device-dir.patch @@ -0,0 +1,566 @@ +From a8051cf9e21d231ce3c445f09631266157ffc2e0 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Fri, 10 Jul 2020 03:44:18 -0700 +Subject: [PATCH 1/3] Filesystem: Support whitespace in device or directory + name + +Whitespace in a device name (e.g., a CIFS share) or a directory name +breaks resource operations. + +One issue is that many of the variable occurrences aren't quoted, so a +string containing whitespace is split into multiple tokens. This is a +problem when the string meant to be passed as a single argument to a +function (e.g., `list_submounts()`). + +Another issue involves the parsing of `list_mounts()` output. +`list_mounts()` can pull data from `/proc/mounts`, `/etc/mtab`, or the +`mount` command. `/proc/mounts` and `/etc/mtab` represent spaces within +a field as octal `\040` strings, while `mount` represents them as +literal space characters. + +`list_mounts()` had to be modified to output the mount list as three +distinct fields ((`device`, `mountpoint`, `fstype`), separated by tab +characters) regardless of the data source. Parsers of `list_mounts()` +were modified to use tabs as field delimiters. + +The for loop in `Filesystem_stop()` also had to become a while loop to +read line-by-line irrespective of spaces. A for loop splits on spaces. + +Resolves: RHBZ#1624591 +--- + heartbeat/Filesystem | 106 +++++++++++++++++++++++++------------------ + 1 file changed, 61 insertions(+), 45 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 2f07a90ad..9a52aa712 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -91,6 +91,7 @@ fi + + # Variables used by multiple methods + HOSTOS=`uname` ++TAB=' ' + + # The status file is going to an extra directory, by default + # +@@ -100,7 +101,7 @@ suffix="${OCF_RESOURCE_INSTANCE}" + [ "$OCF_RESKEY_CRM_meta_clone" ] && + suffix="${suffix}_$OCF_RESKEY_CRM_meta_clone" + suffix="${suffix}_`uname -n`" +-STATUSFILE=${OCF_RESKEY_directory}/$prefix$suffix ++STATUSFILE="${OCF_RESKEY_directory}/$prefix$suffix" + + ####################################################################### + +@@ -283,6 +284,7 @@ flushbufs() { + is_bind_mount() { + echo "$options" | grep -w bind >/dev/null 2>&1 + } ++ + list_mounts() { + local inpf="" + local mount_list="" +@@ -296,15 +298,23 @@ list_mounts() { + + # Make sure that the mount list has not been changed while reading. + while [ "$mount_list" != "$check_list" ]; do +- check_list=$mount_list ++ check_list="$mount_list" + if [ "$inpf" ]; then +- mount_list=$(cut -d' ' -f1,2,3 < $inpf) ++ # ... ++ # Spaces in device or mountpoint are octal \040 in $inpf ++ # Convert literal spaces (field separators) to tabs ++ mount_list=$(cut -d' ' -f1,2,3 < $inpf | tr ' ' "$TAB") + else +- mount_list=$($MOUNT | cut -d' ' -f1,3,5) ++ # on type ... ++ # Use tabs as field separators ++ match_string='\(.*\) on \(.*\) type \([^[:space:]]\)' ++ replace_string="\\1${TAB}\\3${TAB}\\5" ++ mount_list=$($MOUNT | sed "s/$match_string/$replace_string/g") + fi + done + +- echo "$mount_list" ++ # Convert octal \040 to space characters ++ printf "$mount_list" + } + + determine_blockdevice() { +@@ -318,7 +328,8 @@ determine_blockdevice() { + nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none|lustre) + : ;; + *) +- DEVICE=`list_mounts | grep " $CANONICALIZED_MOUNTPOINT " | cut -d' ' -f1` ++ match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}" ++ DEVICE=`list_mounts | grep "$match_string" | cut -d"$TAB" -f1` + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -329,7 +340,7 @@ determine_blockdevice() { + # Lists all filesystems potentially mounted under a given path, + # excluding the path itself. + list_submounts() { +- list_mounts | grep " $1/" | cut -d' ' -f2 | sort -r ++ list_mounts | grep "${TAB}${1}/" | cut -d"$TAB" -f2 | sort -r + } + + # kernels < 2.6.26 can't handle bind remounts +@@ -358,15 +369,15 @@ bind_mount() { + if is_bind_mount && [ "$options" != "-o bind" ] + then + bind_kernel_check +- bind_opts=`echo $options | sed 's/bind/remount/'` +- $MOUNT $bind_opts $MOUNTPOINT ++ bind_opts=`echo "$options" | sed 's/bind/remount/'` ++ $MOUNT $bind_opts "$MOUNTPOINT" + else + true # make sure to return OK + fi + } + + is_option() { +- echo $OCF_RESKEY_options | grep -w "$1" >/dev/null 2>&1 ++ echo "$OCF_RESKEY_options" | grep -w "$1" >/dev/null 2>&1 + } + + is_fsck_needed() { +@@ -374,7 +385,7 @@ is_fsck_needed() { + force) true;; + no) false;; + ""|auto) +- case $FSTYPE in ++ case "$FSTYPE" in + ext4|ext4dev|ext3|reiserfs|reiser4|nss|xfs|jfs|vfat|fat|nfs4|nfs|cifs|smbfs|ocfs2|gfs2|none|lustre|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs) + false;; + *) +@@ -403,7 +414,7 @@ fstype_supported() + fi + + # support fuse-filesystems (e.g. GlusterFS) +- case $FSTYPE in ++ case "$FSTYPE" in + fuse.*|glusterfs|rozofs) support="fuse";; + esac + +@@ -486,7 +497,8 @@ trigger_udev_rules_if_needed() + Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint +- if list_mounts | grep -q -E " $CANONICALIZED_MOUNTPOINT/\w+" >/dev/null 2>&1; then ++ match_string="${TAB}${CANONICALIZED_MOUNTPOINT}" ++ if list_mounts | grep -q -E "$match_string/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -514,9 +526,9 @@ Filesystem_start() + if is_fsck_needed; then + ocf_log info "Starting filesystem check on $DEVICE" + if [ -z "$FSTYPE" ]; then +- $FSCK -p $DEVICE ++ $FSCK -p "$DEVICE" + else +- $FSCK -t $FSTYPE -p $DEVICE ++ $FSCK -t "$FSTYPE" -p "$DEVICE" + fi + + # NOTE: if any errors at all are detected, it returns non-zero +@@ -529,20 +541,20 @@ Filesystem_start() + fi + + [ -d "$MOUNTPOINT" ] || +- ocf_run mkdir -p $MOUNTPOINT ++ ocf_run mkdir -p "$MOUNTPOINT" + if [ ! -d "$MOUNTPOINT" ] ; then + ocf_exit_reason "Couldn't find directory [$MOUNTPOINT] to use as a mount point" + exit $OCF_ERR_INSTALLED + fi + +- flushbufs $DEVICE ++ flushbufs "$DEVICE" + # Mount the filesystem. + case "$FSTYPE" in +- none) $MOUNT $options $DEVICE $MOUNTPOINT && ++ none) $MOUNT $options "$DEVICE" "$MOUNTPOINT" && + bind_mount + ;; +- "") $MOUNT $options $DEVICE $MOUNTPOINT ;; +- *) $MOUNT -t $FSTYPE $options $DEVICE $MOUNTPOINT ;; ++ "") $MOUNT $options "$DEVICE" "$MOUNTPOINT" ;; ++ *) $MOUNT -t "$FSTYPE" $options "$DEVICE" "$MOUNTPOINT" ;; + esac + + if [ $? -ne 0 ]; then +@@ -595,23 +607,23 @@ signal_processes() { + done + } + try_umount() { +- local SUB=$1 +- $UMOUNT $umount_force $SUB +- list_mounts | grep -q " $SUB " >/dev/null 2>&1 || { ++ local SUB="$1" ++ $UMOUNT $umount_force "$SUB" ++ list_mounts | grep -q "${TAB}${SUB}${TAB}" >/dev/null 2>&1 || { + ocf_log info "unmounted $SUB successfully" + return $OCF_SUCCESS + } + return $OCF_ERR_GENERIC + } + fs_stop() { +- local SUB=$1 timeout=$2 sig cnt ++ local SUB="$1" timeout=$2 sig cnt + for sig in TERM KILL; do + cnt=$((timeout/2)) # try half time with TERM + while [ $cnt -gt 0 ]; do +- try_umount $SUB && ++ try_umount "$SUB" && + return $OCF_SUCCESS + ocf_exit_reason "Couldn't unmount $SUB; trying cleanup with $sig" +- signal_processes $SUB $sig ++ signal_processes "$SUB" $sig + cnt=$((cnt-1)) + sleep 1 + done +@@ -633,7 +645,7 @@ Filesystem_stop() + # Wipe the status file, but continue with a warning if + # removal fails -- the file system might be read only + if [ $OCF_CHECK_LEVEL -eq 20 ]; then +- rm -f ${STATUSFILE} ++ rm -f "${STATUSFILE}" + if [ $? -ne 0 ]; then + ocf_log warn "Failed to remove status file ${STATUSFILE}." + fi +@@ -650,7 +662,7 @@ Filesystem_stop() + + # Umount all sub-filesystems mounted under $MOUNTPOINT/ too. + local timeout +- for SUB in `list_submounts $MOUNTPOINT` $MOUNTPOINT; do ++ while read SUB; do + ocf_log info "Trying to unmount $SUB" + if ocf_is_true "$FAST_STOP"; then + timeout=6 +@@ -658,15 +670,18 @@ Filesystem_stop() + timeout=${OCF_RESKEY_CRM_meta_timeout:="20000"} + timeout=$((timeout/1000)) + fi +- fs_stop $SUB $timeout ++ fs_stop "$SUB" $timeout + rc=$? + if [ $rc -ne $OCF_SUCCESS ]; then + ocf_exit_reason "Couldn't unmount $SUB, giving up!" + fi +- done ++ done <<-EOF ++ $(list_submounts "$CANONICALIZED_MOUNTPOINT"; \ ++ echo $CANONICALIZED_MOUNTPOINT) ++ EOF + fi + +- flushbufs $DEVICE ++ flushbufs "$DEVICE" + + return $rc + } +@@ -677,7 +692,8 @@ Filesystem_stop() + # + Filesystem_status() + { +- if list_mounts | grep -q " $CANONICALIZED_MOUNTPOINT " >/dev/null 2>&1; then ++ match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}" ++ if list_mounts | grep -q "$match_string" >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else +@@ -712,7 +728,7 @@ Filesystem_monitor_10() + return $OCF_SUCCESS + fi + dd_opts="iflag=direct bs=4k count=1" +- err_output=`dd if=$DEVICE $dd_opts 2>&1 >/dev/null` ++ err_output=`dd if="$DEVICE" $dd_opts 2>&1 >/dev/null` + if [ $? -ne 0 ]; then + ocf_exit_reason "Failed to read device $DEVICE" + ocf_log err "dd said: $err_output" +@@ -733,20 +749,20 @@ Filesystem_monitor_20() + # to bypass caches. + dd_opts="oflag=direct,sync bs=4k conv=fsync,sync" + fi +- status_dir=`dirname $STATUSFILE` ++ status_dir=$(dirname "$STATUSFILE") + [ -d "$status_dir" ] || mkdir -p "$status_dir" +- err_output=`echo "${OCF_RESOURCE_INSTANCE}" | dd of=${STATUSFILE} $dd_opts 2>&1` ++ err_output=`echo "${OCF_RESOURCE_INSTANCE}" | dd of="${STATUSFILE}" $dd_opts 2>&1` + if [ $? -ne 0 ]; then + ocf_exit_reason "Failed to write status file ${STATUSFILE}" + ocf_log err "dd said: $err_output" + return $OCF_ERR_GENERIC + fi +- test -f ${STATUSFILE} ++ test -f "${STATUSFILE}" + if [ $? -ne 0 ]; then + ocf_exit_reason "Cannot stat the status file ${STATUSFILE}" + return $OCF_ERR_GENERIC + fi +- cat ${STATUSFILE} > /dev/null ++ cat "${STATUSFILE}" > /dev/null + if [ $? -ne 0 ]; then + ocf_exit_reason "Cannot read the status file ${STATUSFILE}" + return $OCF_ERR_GENERIC +@@ -791,9 +807,9 @@ Filesystem_validate_all() + # NOTE: Without inserting the $FSTYPE module, this step may be imprecise + # TODO: This is Linux specific crap. + if [ ! -z "$FSTYPE" -a "$FSTYPE" != none ]; then +- cut -f2 /proc/filesystems |grep -q ^$FSTYPE$ ++ cut -f2 /proc/filesystems |grep -q "^${FSTYPE}$" + if [ $? -ne 0 ]; then +- modpath=/lib/modules/`uname -r` ++ modpath=/lib/modules/`uname -r` + moddep=$modpath/modules.dep + # Do we have $FSTYPE in modules.dep? + cut -d' ' -f1 $moddep |grep -q "^$modpath.*$FSTYPE\.k\?o:$" +@@ -826,7 +842,7 @@ set_blockdevice_var() { + blockdevice=no + + # these are definitely not block devices +- case $FSTYPE in ++ case "$FSTYPE" in + nfs4|nfs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs) return;; + esac + +@@ -834,7 +850,7 @@ set_blockdevice_var() { + return + fi + +- case $DEVICE in ++ case "$DEVICE" in + -*) # Oh... An option to mount instead... Typically -U or -L + ;; + /dev/null) # Special case for BSC +@@ -863,7 +879,7 @@ if [ -n "${OCF_RESKEY_force_unmount}" ]; then + FORCE_UNMOUNT=$OCF_RESKEY_force_unmount + fi + +-DEVICE=$OCF_RESKEY_device ++DEVICE="$OCF_RESKEY_device" + FSTYPE=$OCF_RESKEY_fstype + if [ ! -z "$OCF_RESKEY_options" ]; then + options="-o $OCF_RESKEY_options" +@@ -899,10 +915,10 @@ if [ -z "$OCF_RESKEY_directory" ]; then + exit $OCF_ERR_CONFIGURED + fi + else +- MOUNTPOINT=$(echo $OCF_RESKEY_directory | sed 's/\/*$//') ++ MOUNTPOINT="$(echo "$OCF_RESKEY_directory" | sed 's/\/*$//')" + : ${MOUNTPOINT:=/} + if [ -e "$MOUNTPOINT" ] ; then +- CANONICALIZED_MOUNTPOINT=$(readlink -f "$MOUNTPOINT") ++ CANONICALIZED_MOUNTPOINT="$(readlink -f "$MOUNTPOINT")" + if [ $? -ne 0 ]; then + ocf_exit_reason "Could not canonicalize $MOUNTPOINT because readlink failed" + exit $OCF_ERR_GENERIC +@@ -947,7 +963,7 @@ CLUSTERSAFE=0 + is_option "ro" && + CLUSTERSAFE=2 + +-case $FSTYPE in ++case "$FSTYPE" in + nfs4|nfs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs) + CLUSTERSAFE=1 # this is kind of safe too + ;; + +From eca9a96ad3356df3636bfa3187afe1b1954693b2 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Fri, 10 Jul 2020 16:38:04 -0700 +Subject: [PATCH 2/3] Filesystem: POSIX-compliant syntax for portability + +Updated to use POSIX `$()` instead of Bourne-shell backticks, and to +use `grep ... >/dev/null 2>&1` instead of `grep -q`. (Note: `grep -q` +only suppresses `stdout` anyway. `grep -q -s` would be required to +suppress both `stdout` and `stderr`.) +--- + heartbeat/Filesystem | 33 +++++++++++++++++---------------- + 1 file changed, 17 insertions(+), 16 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 9a52aa712..34ade20d7 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -90,7 +90,7 @@ fi + : ${OCF_RESKEY_force_unmount=${OCF_RESKEY_force_unmount_default}} + + # Variables used by multiple methods +-HOSTOS=`uname` ++HOSTOS=$(uname) + TAB=' ' + + # The status file is going to an extra directory, by default +@@ -100,7 +100,7 @@ prefix=${OCF_RESKEY_statusfile_prefix} + suffix="${OCF_RESOURCE_INSTANCE}" + [ "$OCF_RESKEY_CRM_meta_clone" ] && + suffix="${suffix}_$OCF_RESKEY_CRM_meta_clone" +-suffix="${suffix}_`uname -n`" ++suffix="${suffix}_$(uname -n)" + STATUSFILE="${OCF_RESKEY_directory}/$prefix$suffix" + + ####################################################################### +@@ -329,7 +329,7 @@ determine_blockdevice() { + : ;; + *) + match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}" +- DEVICE=`list_mounts | grep "$match_string" | cut -d"$TAB" -f1` ++ DEVICE=$(list_mounts | grep "$match_string" | cut -d"$TAB" -f1) + if [ -b "$DEVICE" ]; then + blockdevice=yes + fi +@@ -354,7 +354,7 @@ bind_kernel_check() { + exit(1); + }' + [ $? -ne 0 ] && +- ocf_log warn "kernel `uname -r` cannot handle read only bind mounts" ++ ocf_log warn "kernel $(uname -r) cannot handle read only bind mounts" + } + + bind_root_mount_check() { +@@ -369,7 +369,7 @@ bind_mount() { + if is_bind_mount && [ "$options" != "-o bind" ] + then + bind_kernel_check +- bind_opts=`echo "$options" | sed 's/bind/remount/'` ++ bind_opts=$(echo "$options" | sed 's/bind/remount/') + $MOUNT $bind_opts "$MOUNTPOINT" + else + true # make sure to return OK +@@ -469,7 +469,7 @@ trigger_udev_rules_if_needed() + refresh_flag="yes" + fi + else +- tmp="`echo $DEVICE|awk '{$1=""; print substr($0,2)}'`" ++ tmp="$(echo $DEVICE|awk '{$1=""; print substr($0,2)}')" + case "$DEVICE" in + -U*|--uuid*) + tmp="/dev/disk/by-uuid/$tmp" +@@ -498,7 +498,7 @@ Filesystem_start() + { + # Check if there are any mounts mounted under the mountpoint + match_string="${TAB}${CANONICALIZED_MOUNTPOINT}" +- if list_mounts | grep -q -E "$match_string/\w+" >/dev/null 2>&1; then ++ if list_mounts | grep -E "$match_string/\w+" >/dev/null 2>&1; then + ocf_log err "There is one or more mounts mounted under $MOUNTPOINT." + return $OCF_ERR_CONFIGURED + fi +@@ -602,14 +602,14 @@ signal_processes() { + return + fi + for pid in $pids; do +- ocf_log info "sending signal $sig to: `ps -f $pid | tail -1`" ++ ocf_log info "sending signal $sig to: $(ps -f $pid | tail -1)" + kill -s $sig $pid + done + } + try_umount() { + local SUB="$1" + $UMOUNT $umount_force "$SUB" +- list_mounts | grep -q "${TAB}${SUB}${TAB}" >/dev/null 2>&1 || { ++ list_mounts | grep "${TAB}${SUB}${TAB}" >/dev/null 2>&1 || { + ocf_log info "unmounted $SUB successfully" + return $OCF_SUCCESS + } +@@ -693,7 +693,7 @@ Filesystem_stop() + Filesystem_status() + { + match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}" +- if list_mounts | grep -q "$match_string" >/dev/null 2>&1; then ++ if list_mounts | grep "$match_string" >/dev/null 2>&1; then + rc=$OCF_SUCCESS + msg="$MOUNTPOINT is mounted (running)" + else +@@ -728,7 +728,7 @@ Filesystem_monitor_10() + return $OCF_SUCCESS + fi + dd_opts="iflag=direct bs=4k count=1" +- err_output=`dd if="$DEVICE" $dd_opts 2>&1 >/dev/null` ++ err_output=$(dd if="$DEVICE" $dd_opts 2>&1 >/dev/null) + if [ $? -ne 0 ]; then + ocf_exit_reason "Failed to read device $DEVICE" + ocf_log err "dd said: $err_output" +@@ -751,7 +751,7 @@ Filesystem_monitor_20() + fi + status_dir=$(dirname "$STATUSFILE") + [ -d "$status_dir" ] || mkdir -p "$status_dir" +- err_output=`echo "${OCF_RESOURCE_INSTANCE}" | dd of="${STATUSFILE}" $dd_opts 2>&1` ++ err_output=$(echo "${OCF_RESOURCE_INSTANCE}" | dd of="${STATUSFILE}" $dd_opts 2>&1) + if [ $? -ne 0 ]; then + ocf_exit_reason "Failed to write status file ${STATUSFILE}" + ocf_log err "dd said: $err_output" +@@ -807,12 +807,13 @@ Filesystem_validate_all() + # NOTE: Without inserting the $FSTYPE module, this step may be imprecise + # TODO: This is Linux specific crap. + if [ ! -z "$FSTYPE" -a "$FSTYPE" != none ]; then +- cut -f2 /proc/filesystems |grep -q "^${FSTYPE}$" ++ cut -f2 /proc/filesystems | grep "^${FSTYPE}$" >/dev/null 2>&1 + if [ $? -ne 0 ]; then +- modpath=/lib/modules/`uname -r` ++ modpath=/lib/modules/$(uname -r) + moddep=$modpath/modules.dep + # Do we have $FSTYPE in modules.dep? +- cut -d' ' -f1 $moddep |grep -q "^$modpath.*$FSTYPE\.k\?o:$" ++ cut -d' ' -f1 $moddep \ ++ | grep "^${modpath}.*${FSTYPE}\.k\?o:$" >/dev/null 2>&1 + if [ $? -ne 0 ]; then + ocf_log info "It seems we do not have $FSTYPE support" + fi +@@ -846,7 +847,7 @@ set_blockdevice_var() { + nfs4|nfs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|lustre) return;; + esac + +- if `is_option "loop"`; then ++ if $(is_option "loop"); then + return + fi + + +From 5517712f4bb6e90b23cde6310c03509c9061cb36 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Fri, 10 Jul 2020 16:44:17 -0700 +Subject: [PATCH 3/3] Filesystem: Convert leading space characters to tabs + +A few lines started with spaces instead of tabs. Tabs are the +convention in this file. +--- + heartbeat/Filesystem | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index 34ade20d7..501e5a0d0 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -359,10 +359,10 @@ bind_kernel_check() { + + bind_root_mount_check() { + if [ "$(df -P "$1" | awk 'END{print $6}')" = "/" ]; then +- return 1 +- else +- return 0 +- fi ++ return 1 ++ else ++ return 0 ++ fi + } + + bind_mount() { +@@ -571,10 +571,10 @@ get_pids() + local procs + local mmap_procs + +- if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_root_mount_check "$DEVICE"; then +- ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" +- FORCE_UNMOUNT=safe +- fi ++ if is_bind_mount && ocf_is_true "$FORCE_UNMOUNT" && ! bind_root_mount_check "$DEVICE"; then ++ ocf_log debug "Change force_umount from '$FORCE_UNMOUNT' to 'safe'" ++ FORCE_UNMOUNT=safe ++ fi + + if ocf_is_true "$FORCE_UNMOUNT"; then + if [ "X${HOSTOS}" = "XOpenBSD" ];then diff --git a/SOURCES/bz1861001-sybaseASE-add-logfile-parameter.patch b/SOURCES/bz1861001-sybaseASE-add-logfile-parameter.patch new file mode 100644 index 0000000..b294584 --- /dev/null +++ b/SOURCES/bz1861001-sybaseASE-add-logfile-parameter.patch @@ -0,0 +1,53 @@ +From d62d8776df8aaa1da32e8452b3816505d1ea1f7f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 28 Oct 2020 15:06:47 +0100 +Subject: [PATCH] sybaseASE: add logfile parameter + +--- + heartbeat/sybaseASE.in | 19 ++++++++++++++++++- + 1 file changed, 18 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index 7ff30bd31..fef76474e 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -115,6 +115,13 @@ fi + interfaces_file_default="${OCF_RESKEY_sybase_home}/interfaces" + : ${OCF_RESKEY_interfaces_file=${interfaces_file_default}} + ++if [ $__OCF_ACTION != "meta-data" ]; then ++ logfile_default="$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase/install/$OCF_RESKEY_server_name.log" ++else ++ logfile_default="detect" ++fi ++: ${OCF_RESKEY_logfile=${logfile_default}} ++ + export LD_POINTER_GUARD=0 + + ####################################################################################### +@@ -122,7 +129,7 @@ export LD_POINTER_GUARD=0 + ####################################################################################### + declare login_string="" + declare RUNSERVER_SCRIPT=$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase/install/RUN_$OCF_RESKEY_server_name +-declare CONSOLE_LOG=$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase/install/$OCF_RESKEY_server_name.log ++declare CONSOLE_LOG="$OCF_RESKEY_logfile" + + ################################################################################################## + # This function will be called by Pacemaker to get the meta data of resource agent "sybaseASE". # +@@ -223,6 +230,16 @@ meta_data() + + + ++ ++ ++ Logfile ++ ++ ++ Logfile ++ ++ ++ ++ + + + diff --git a/SOURCES/bz1872754-pgsqlms-new-ra.patch b/SOURCES/bz1872754-pgsqlms-new-ra.patch new file mode 100644 index 0000000..b3b314e --- /dev/null +++ b/SOURCES/bz1872754-pgsqlms-new-ra.patch @@ -0,0 +1,3338 @@ +diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2021-04-12 12:51:56.831835953 +0200 ++++ b/doc/man/Makefile.am 2021-04-13 13:38:14.198361848 +0200 +@@ -154,6 +154,7 @@ + ocf_heartbeat_ovsmonitor.7 \ + ocf_heartbeat_pgagent.7 \ + ocf_heartbeat_pgsql.7 \ ++ ocf_heartbeat_pgsqlms.7 \ + ocf_heartbeat_pingd.7 \ + ocf_heartbeat_podman.7 \ + ocf_heartbeat_portblock.7 \ +diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2021-04-12 12:51:56.831835953 +0200 ++++ b/heartbeat/Makefile.am 2021-04-13 13:37:45.741292178 +0200 +@@ -149,6 +149,7 @@ + ovsmonitor \ + pgagent \ + pgsql \ ++ pgsqlms \ + pingd \ + podman \ + portblock \ +@@ -209,7 +210,10 @@ + mysql-common.sh \ + nfsserver-redhat.sh \ + findif.sh \ +- ocf.py ++ ocf.py \ ++ OCF_Directories.pm \ ++ OCF_Functions.pm \ ++ OCF_ReturnCodes.pm + + # Legacy locations + hbdir = $(sysconfdir)/ha.d +diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm +--- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_Directories.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,139 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_Directories - Binaries and binary options for use in Resource Agents ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_Directories; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-directories shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLES ++ ++Here are the variables exported by this module: ++ ++=over ++ ++=item $INITDIR ++ ++=item $HA_DIR ++ ++=item $HA_RCDIR ++ ++=item $HA_CONFDIR ++ ++=item $HA_CF ++ ++=item $HA_VARLIB ++ ++=item $HA_RSCTMP ++ ++=item $HA_RSCTMP_OLD ++ ++=item $HA_FIFO ++ ++=item $HA_BIN ++ ++=item $HA_SBIN_DIR ++ ++=item $HA_DATEFMT ++ ++=item $HA_DEBUGLOG ++ ++=item $HA_RESOURCEDIR ++ ++=item $HA_DOCDIR ++ ++=item $__SCRIPT_NAME ++ ++=item $HA_VARRUN ++ ++=item $HA_VARLOCK ++ ++=item $ocf_prefix ++ ++=item $ocf_exec_prefix ++ ++=back ++ ++=cut ++ ++package OCF_Directories; ++ ++use strict; ++use warnings; ++use 5.008; ++use File::Basename; ++ ++BEGIN { ++ use Exporter; ++ ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $INITDIR ++ $HA_DIR ++ $HA_RCDIR ++ $HA_CONFDIR ++ $HA_CF ++ $HA_VARLIB ++ $HA_RSCTMP ++ $HA_RSCTMP_OLD ++ $HA_FIFO ++ $HA_BIN ++ $HA_SBIN_DIR ++ $HA_DATEFMT ++ $HA_DEBUGLOG ++ $HA_RESOURCEDIR ++ $HA_DOCDIR ++ $__SCRIPT_NAME ++ $HA_VARRUN ++ $HA_VARLOCK ++ $ocf_prefix ++ $ocf_exec_prefix ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $INITDIR = ( $ENV{'INITDIR'} || '/etc/init.d' ); ++our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' ); ++our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' ); ++our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' ); ++our $HA_CF = ( $ENV{'HA_CF'} || '/etc/ha.d/ha.cf' ); ++our $HA_VARLIB = ( $ENV{'HA_VARLIB'} || '/var/lib/heartbeat' ); ++our $HA_RSCTMP = ( $ENV{'HA_RSCTMP'} || '/run/resource-agents' ); ++our $HA_RSCTMP_OLD = ( $ENV{'HA_RSCTMP_OLD'} || '/var/run/heartbeat/rsctmp' ); ++our $HA_FIFO = ( $ENV{'HA_FIFO'} || '/var/lib/heartbeat/fifo' ); ++our $HA_BIN = ( $ENV{'HA_BIN'} || '/usr/libexec/heartbeat' ); ++our $HA_SBIN_DIR = ( $ENV{'HA_SBIN_DIR'} || '/usr/sbin' ); ++our $HA_DATEFMT = ( $ENV{'HA_DATEFMT'} || '%b %d %T ' ); ++our $HA_DEBUGLOG = ( $ENV{'HA_DEBUGLOG'} || '/dev/null' ); ++our $HA_RESOURCEDIR = ( $ENV{'HA_RESOURCEDIR'}|| '/etc/ha.d/resource.d' ); ++our $HA_DOCDIR = ( $ENV{'HA_DOCDIR'} || '/usr/share/doc/heartbeat' ); ++our $__SCRIPT_NAME = ( $ENV{'__SCRIPT_NAME'} || fileparse($0) ); ++our $HA_VARRUN = ( $ENV{'HA_VARRUN'} || '/var/run' ); ++our $HA_VARLOCK = ( $ENV{'HA_VARLOCK'} || '/var/lock/subsys' ); ++our $ocf_prefix = '/usr'; ++our $ocf_exec_prefix = '/usr'; ++ ++1; ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. ++ +diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm +--- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_Functions.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,631 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_Functions - helper subroutines for OCF agent ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_Functions; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-shellfuncs shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLE ++ ++The only variable exported by this module is C<__OCF_ACTION>. ++ ++=head1 SUBROUTINES ++ ++Here are the subroutines ported from ocf-shellfuncs and exported by this module: ++ ++=over ++ ++=item ha_debug ++ ++=item ha_log ++ ++=item hadate ++ ++=item ocf_is_clone ++ ++=item ocf_is_ms ++ ++=item ocf_is_probe ++ ++=item ocf_is_root ++ ++=item ocf_is_true ++ ++=item ocf_is_ver ++ ++=item ocf_local_nodename ++ ++=item ocf_log ++ ++=item ocf_exit_reason ++ ++=item ocf_maybe_random ++ ++=item ocf_ver2num ++ ++=item ocf_ver_complete_level ++ ++=item ocf_ver_level ++ ++=item ocf_version_cmp ++ ++=item set_logtag ++ ++=back ++ ++Here are the subroutines only existing in the perl module but not in the ++ocf-shellfuncs script: ++ ++=over ++ ++=item ocf_notify_env ++ ++=back ++ ++=cut ++ ++package OCF_Functions; ++ ++use strict; ++use warnings; ++use 5.008; ++use POSIX qw( strftime setlocale LC_ALL ); ++use English; ++ ++use FindBin; ++use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++use OCF_ReturnCodes; ++use OCF_Directories; ++ ++BEGIN { ++ use Exporter; ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $__OCF_ACTION ++ ocf_is_root ++ ocf_maybe_random ++ ocf_is_true ++ hadate ++ set_logtag ++ ha_log ++ ha_debug ++ ocf_log ++ ocf_exit_reason ++ ocf_is_probe ++ ocf_is_clone ++ ocf_is_ms ++ ocf_is_ver ++ ocf_ver2num ++ ocf_ver_level ++ ocf_ver_complete_level ++ ocf_version_cmp ++ ocf_local_nodename ++ ocf_notify_env ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $__OCF_ACTION; ++ ++sub ocf_is_root { ++ return $EUID == 0; ++} ++ ++sub ocf_maybe_random { ++ return int( rand( 32767 ) ); ++} ++ ++sub ocf_is_true { ++ my $v = shift; ++ return ( defined $v and $v =~ /^(?:yes|true|1|YES|TRUE|ja|on|ON)$/ ); ++} ++ ++sub hadate { ++ return strftime( $HA_DATEFMT, localtime ); ++} ++ ++sub set_logtag { ++ ++ return if defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne ''; ++ ++ if ( defined $ENV{'OCF_RESOURCE_INSTANCE'} and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ) { ++ $ENV{'HA_LOGTAG'} = "$__SCRIPT_NAME($ENV{'OCF_RESOURCE_INSTANCE'})[$PID]"; ++ } ++ else { ++ $ENV{'HA_LOGTAG'}="${__SCRIPT_NAME}[$PID]"; ++ } ++} ++ ++sub __ha_log { ++ my $ignore_stderr = 0; ++ my $loglevel = ''; ++ ++ if ( $_[0] eq '--ignore-stderr' ) { ++ $ignore_stderr = 1; ++ shift; ++ } ++ ++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'} ++ or $ENV{'HA_LOGFACILITY'} eq 'none'; ++ ++ # if we're connected to a tty, then output to stderr ++ if ( -t STDERR ) { ++ # FIXME ++ # T.N.: this was ported with the bug on $loglevel being empty ++ # and never set before the test here... ++ if ( defined $ENV{'HA_debug'} ++ and $ENV{'HA_debug'} == 0 ++ and $loglevel eq 'debug' ++ ) { ++ return 0; ++ } ++ elsif ( $ignore_stderr ) { ++ # something already printed this error to stderr, so ignore ++ return 0; ++ } ++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) { ++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG; ++ } ++ else { ++ printf STDERR "%s\n", join ' ', @ARG; ++ } ++ return 0; ++ } ++ ++ set_logtag(); ++ ++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) { ++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, @ARG; ++ return 0 if ( $? >> 8 ) == 0; ++ } ++ ++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) { ++ # logging through syslog ++ # loglevel is unknown, use 'notice' for now ++ $loglevel = 'notice'; ++ for ( "@ARG" ) { ++ if ( /ERROR/ ) { ++ $loglevel = 'err'; ++ } ++ elsif ( /WARN/ ) { ++ $loglevel = 'warning'; ++ } ++ elsif (/INFO|info/ ) { ++ $loglevel = 'info'; ++ } ++ } ++ ++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p', ++ "$ENV{'HA_LOGFACILITY'}.$loglevel", @ARG; ++ } ++ ++ if ( defined $ENV{'HA_LOGFILE'} and $ENV{'HA_LOGFILE'} ne '' ) { ++ # appending to $HA_LOGFILE ++ open my $logfile, '>>', $ENV{'HA_LOGFILE'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++ ++ # appending to stderr ++ printf STDERR "%s %s\n", hadate(), join ' ', @ARG ++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '') ++ and (not defined $ENV{'HA_LOGFILE'} or $ENV{'HA_LOGFILE'} eq '' ) ++ and not $ignore_stderr; ++ ++ if ( defined $ENV{'HA_DEBUGLOG'} and $ENV{'HA_DEBUGLOG'} ne '' ++ and $ENV{'HA_LOGFILE'} ne $ENV{'HA_DEBUGLOG'} ++ ) { ++ # appending to $HA_DEBUGLOG ++ open my $logfile, '>>', $ENV{'HA_DEBUGLOG'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++} ++ ++sub ha_log { ++ return __ha_log( @ARG ); ++} ++ ++sub ha_debug { ++ ++ return 0 if defined $ENV{'HA_debug'} and $ENV{'HA_debug'} == 0; ++ ++ if ( -t STDERR ) { ++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) { ++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG; ++ } ++ else { ++ printf STDERR "%s\n", join ' ', @ARG; ++ } ++ ++ return 0; ++ } ++ ++ set_logtag(); ++ ++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) { ++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, '-D', 'ha-debug', @ARG; ++ return 0 if ( $? >> 8 ) == 0; ++ } ++ ++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'} ++ or $ENV{'HA_LOGFACILITY'} eq 'none'; ++ ++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) { ++ # logging through syslog ++ ++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p', ++ "$ENV{'HA_LOGFACILITY'}.debug", @ARG; ++ } ++ ++ if ( defined $ENV{'HA_DEBUGLOG'} and -f $ENV{'HA_DEBUGLOG'} ) { ++ my $logfile; ++ # appending to $HA_DEBUGLOG ++ open $logfile, '>>', $ENV{'HA_DEBUGLOG'}; ++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), ++ join (' ', @ARG); ++ close $logfile; ++ } ++ ++ # appending to stderr ++ printf STDERR "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), join ' ', @ARG ++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '') ++ and (not defined $ENV{'HA_DEBUGLOG'} or $ENV{'HA_DEBUGLOG'} eq '' ); ++} ++ ++# ++# ocf_log: log messages from the resource agent ++# This function is slightly different from its equivalent in ocf-shellfuncs.in ++# as it behaves like printf. ++# Arguments: ++# * __OCF_PRIO: log level ++# * __OCF_MSG: printf-like format string ++# * all other arguments are values for the printf-like format string ++# ++sub ocf_log { ++ my $__OCF_PRIO; ++ my $__OCF_MSG; ++ ++ # TODO: Revisit and implement internally. ++ if ( scalar @ARG < 2 ) { ++ ocf_log ( 'err', "Not enough arguments [%d] to ocf_log", scalar @ARG ); ++ } ++ ++ $__OCF_PRIO = shift; ++ $__OCF_MSG = shift; ++ $__OCF_MSG = sprintf $__OCF_MSG, @ARG; ++ ++ for ( $__OCF_PRIO ) { ++ if ( /crit/ ) { $__OCF_PRIO = 'CRIT' } ++ elsif ( /err/ ) { $__OCF_PRIO = 'ERROR' } ++ elsif ( /warn/ ) { $__OCF_PRIO = 'WARNING' } ++ elsif ( /info/ ) { $__OCF_PRIO = 'INFO' } ++ elsif ( /debug/ ) { $__OCF_PRIO = 'DEBUG' } ++ else { $__OCF_PRIO =~ tr/[a-z]/[A-Z]/ } ++ } ++ ++ if ( $__OCF_PRIO eq 'DEBUG' ) { ++ ha_debug( "$__OCF_PRIO: $__OCF_MSG"); ++ } ++ else { ++ ha_log( "$__OCF_PRIO: $__OCF_MSG"); ++ } ++} ++ ++ ++# ++# ocf_exit_reason: print exit error string to stderr and log ++# Usage: Allows the OCF script to provide a string ++# describing why the exit code was returned. ++# Arguments: reason - required, The string that represents ++# why the error occured. ++# ++sub ocf_exit_reason { ++ my $cookie = $ENV{'OCF_EXIT_REASON_PREFIX'} || 'ocf-exit-reason:'; ++ my $fmt; ++ my $msg; ++ ++ # No argument is likely not intentional. ++ # Just one argument implies a printf format string of just "%s". ++ # "Least surprise" in case some interpolated string from variable ++ # expansion or other contains a percent sign. ++ # More than one argument: first argument is going to be the format string. ++ ocf_log ( 'err', 'Not enough arguments [%d] to ocf_exit_reason', ++ scalar @ARG ) if scalar @ARG < 1; ++ ++ $fmt = shift; ++ $msg = sprintf $fmt, @ARG; ++ ++ print STDERR "$cookie$msg\n"; ++ __ha_log( '--ignore-stderr', "ERROR: $msg" ); ++} ++ ++# returns true if the CRM is currently running a probe. A probe is ++# defined as a monitor operation with a monitoring interval of zero. ++sub ocf_is_probe { ++ return ( $__OCF_ACTION eq 'monitor' ++ and $ENV{'OCF_RESKEY_CRM_meta_interval'} == 0 ); ++} ++ ++# returns true if the resource is configured as a clone. This is ++# defined as a resource where the clone-max meta attribute is present, ++# and set to greater than zero. ++sub ocf_is_clone { ++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_clone_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_clone_max'} > 0 ); ++} ++ ++# returns true if the resource is configured as a multistate ++# (master/slave) resource. This is defined as a resource where the ++# master-max meta attribute is present, and set to greater than zero. ++sub ocf_is_ms { ++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} > 0 ); ++} ++ ++# version check functions ++# allow . and - to delimit version numbers ++# max version number is 999 ++# letters and such are effectively ignored ++# ++sub ocf_is_ver { ++ return $ARG[0] =~ /^[0-9][0-9.-]*[0-9]$/; ++} ++ ++sub ocf_ver2num { ++ my $v = 0; ++ ++ $v = $v * 1000 + $1 while $ARG[0] =~ /(\d+)/g; ++ ++ return $v; ++} ++ ++sub ocf_ver_level { ++ my $v = () = $ARG[0] =~ /(\d+)/g; ++ return $v; ++} ++ ++sub ocf_ver_complete_level { ++ my $ver = shift; ++ my $level = shift; ++ my $i = 0; ++ ++ for ( my $i = 0; $i < $level; $i++ ) { ++ $ver .= "$ver.0"; ++ } ++ ++ return $ver; ++} ++ ++# usage: ocf_version_cmp VER1 VER2 ++# version strings can contain digits, dots, and dashes ++# must start and end with a digit ++# returns: ++# 0: VER1 smaller (older) than VER2 ++# 1: versions equal ++# 2: VER1 greater (newer) than VER2 ++# 3: bad format ++sub ocf_version_cmp { ++ my $v1 = shift; ++ my $v2 = shift; ++ my $v1_level; ++ my $v2_level; ++ my $level_diff; ++ ++ return 3 unless ocf_is_ver( $v1 ); ++ return 3 unless ocf_is_ver( $v2 ); ++ ++ $v1_level = ocf_ver_level( $v1 ); ++ $v2_level = ocf_ver_level( $v2 ); ++ ++ if ( $v1_level < $v2_level ) { ++ $level_diff = $v2_level - $v1_level; ++ $v1 = ocf_ver_complete_level( $v1, $level_diff ); ++ } ++ elsif ( $v1_level > $v2_level ) { ++ $level_diff = $v1_level - $v2_level; ++ $v2 = ocf_ver_complete_level( $v2, $level_diff ); ++ } ++ ++ $v1 = ocf_ver2num( $v1 ); ++ $v2 = ocf_ver2num( $v2 ); ++ ++ if ( $v1 == $v2 ) { return 1; } ++ elsif ( $v1 < $v2 ) { return 0; } ++ ++ return 2; # -1 would look funny in shell ;-) ( T.N. not in perl ;) ) ++} ++ ++sub ocf_local_nodename { ++ # use crm_node -n for pacemaker > 1.1.8 ++ my $nodename; ++ ++ qx{ which pacemakerd > /dev/null 2>&1 }; ++ if ( $? == 0 ) { ++ my $version; ++ my $ret = qx{ pacemakerd -\$ }; ++ ++ $ret =~ /Pacemaker ([\d.]+)/; ++ $version = $1; ++ ++ if ( ocf_version_cmp( $version, '1.1.8' ) == 2 ) { ++ qx{ which crm_node > /dev/null 2>&1 }; ++ $nodename = qx{ crm_node -n } if $? == 0; ++ } ++ } ++ else { ++ # otherwise use uname -n ++ $nodename = qx { uname -n }; ++ } ++ ++ chomp $nodename; ++ return $nodename; ++} ++ ++# Parse and returns the notify environment variables in a convenient structure ++# Returns undef if the action is not a notify ++# Returns undef if the resource is neither a clone or a multistate one ++sub ocf_notify_env { ++ my $i; ++ my %notify_env; ++ ++ return undef unless $__OCF_ACTION eq 'notify'; ++ ++ return undef unless ocf_is_clone() or ocf_is_ms(); ++ ++ %notify_env = ( ++ 'type' => $ENV{'OCF_RESKEY_CRM_meta_notify_type'} || '', ++ 'operation' => $ENV{'OCF_RESKEY_CRM_meta_notify_operation'} || '', ++ 'active' => [ ], ++ 'inactive' => [ ], ++ 'start' => [ ], ++ 'stop' => [ ], ++ ); ++ ++ for my $action ( qw{ active start stop } ) { ++ next unless ++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"} ++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ } ++ ++ # notify_nactive_uname doesn't exists. See: ++ # http://lists.clusterlabs.org/pipermail/developers/2017-January/000406.html ++ if ( defined $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"} ) { ++ $i = 0; ++ $notify_env{'inactive'}[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"}; ++ } ++ ++ # exit if the resource is not a mutistate one ++ return %notify_env unless ocf_is_ms(); ++ ++ for my $action ( qw{ master slave promote demote } ) { ++ $notify_env{ $action } = [ ]; ++ ++ next unless ++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"} ++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}; ++ ++ $i = 0; ++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ => ++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"}; ++ } ++ ++ # Fix active and inactive fields for Pacemaker version < 1.1.16 ++ # ie. crm_feature_set < 3.0.11 ++ # See http://lists.clusterlabs.org/pipermail/developers/2016-August/000265.html ++ # and git commit a6713c5d40327eff8549e7f596501ab1785b8765 ++ if ( ++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.11' ) == 0 ++ ) { ++ $notify_env{ 'active' } = [ ++ @{ $notify_env{ 'master' } }, ++ @{ $notify_env{ 'slave' } } ++ ]; ++ } ++ ++ return %notify_env; ++} ++ ++$__OCF_ACTION = $ARGV[0]; ++ ++# Return to sanity for the agents... ++ ++undef $ENV{'LC_ALL'}; ++$ENV{'LC_ALL'} = 'C'; ++setlocale( LC_ALL, 'C' ); ++undef $ENV{'LANG'}; ++undef $ENV{'LANGUAGE'}; ++ ++$ENV{'OCF_ROOT'} = '/usr/lib/ocf' ++ unless defined $ENV{'OCF_ROOT'} and $ENV{'OCF_ROOT'} ne ''; ++ ++# old ++undef $ENV{'OCF_FUNCTIONS_DIR'} ++ if defined $ENV{'OCF_FUNCTIONS_DIR'} ++ and $ENV{'OCF_FUNCTIONS_DIR'} eq "$ENV{'OCF_ROOT'}/resource.d/heartbeat"; ++ ++# Define OCF_RESKEY_CRM_meta_interval in case it isn't already set, ++# to make sure that ocf_is_probe() always works ++$ENV{'OCF_RESKEY_CRM_meta_interval'} = 0 ++ unless defined $ENV{'OCF_RESKEY_CRM_meta_interval'}; ++ ++# Strip the OCF_RESKEY_ prefix from this particular parameter ++unless ( defined $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ++ and $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ne '' ++) { ++ $ENV{'OCF_CHECK_LEVEL'} = $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'}; ++} ++else { ++ ENV{'OCF_CHECK_LEVEL'} = 0; ++} ++ ++unless ( -d $ENV{'OCF_ROOT'} ) { ++ ha_log( "ERROR: OCF_ROOT points to non-directory $ENV{'OCF_ROOT'}." ); ++ $! = $OCF_ERR_GENERIC; ++ die; ++} ++ ++$ENV{'OCF_RESOURCE_TYPE'} = $__SCRIPT_NAME ++ unless defined $ENV{'OCF_RESOURCE_TYPE'} ++ and $ENV{'OCF_RESOURCE_TYPE'} ne ''; ++ ++unless ( defined $ENV{'OCF_RA_VERSION_MAJOR'} ++ and $ENV{'OCF_RA_VERSION_MAJOR'} ne '' ++) { ++ # We are being invoked as an init script. ++ # Fill in some things with reasonable values. ++ $ENV{'OCF_RESOURCE_INSTANCE'} = 'default'; ++ return 1; ++} ++ ++$ENV{'OCF_RESOURCE_INSTANCE'} = "undef" if $__OCF_ACTION eq 'meta-data'; ++ ++unless ( defined $ENV{'OCF_RESOURCE_INSTANCE'} ++ and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ++) { ++ ha_log( "ERROR: Need to tell us our resource instance name." ); ++ $! = $OCF_ERR_ARGS; ++ die; ++} ++ ++1; ++ ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. +diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm +--- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/OCF_ReturnCodes.pm 2021-04-13 13:37:35.621267404 +0200 +@@ -0,0 +1,97 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++OCF_ReturnCodes - Common varibales for the OCF Resource Agents supplied by ++heartbeat. ++ ++=head1 SYNOPSIS ++ ++ use FindBin; ++ use lib "$FindBin::RealBin/../../lib/heartbeat/"; ++ ++ use OCF_ReturnCodes; ++ ++=head1 DESCRIPTION ++ ++This module has been ported from the ocf-retrurncodes shell script of the ++resource-agents project. See L. ++ ++=head1 VARIABLES ++ ++Here are the variables exported by this module: ++ ++=over ++ ++=item $OCF_SUCCESS ++ ++=item $OCF_ERR_GENERIC ++ ++=item $OCF_ERR_ARGS ++ ++=item $OCF_ERR_UNIMPLEMENTED ++ ++=item $OCF_ERR_PERM ++ ++=item $OCF_ERR_INSTALLED ++ ++=item $OCF_ERR_CONFIGURED ++ ++=item $OCF_NOT_RUNNING ++ ++=item $OCF_RUNNING_MASTER ++ ++=item $OCF_FAILED_MASTER ++ ++=back ++ ++=cut ++ ++package OCF_ReturnCodes; ++ ++use strict; ++use warnings; ++use 5.008; ++ ++BEGIN { ++ use Exporter; ++ ++ our $VERSION = 'v2.3.0'; ++ our @ISA = ('Exporter'); ++ our @EXPORT = qw( ++ $OCF_SUCCESS ++ $OCF_ERR_GENERIC ++ $OCF_ERR_ARGS ++ $OCF_ERR_UNIMPLEMENTED ++ $OCF_ERR_PERM ++ $OCF_ERR_INSTALLED ++ $OCF_ERR_CONFIGURED ++ $OCF_NOT_RUNNING ++ $OCF_RUNNING_MASTER ++ $OCF_FAILED_MASTER ++ ); ++ our @EXPORT_OK = ( @EXPORT ); ++} ++ ++our $OCF_SUCCESS = 0; ++our $OCF_ERR_GENERIC = 1; ++our $OCF_ERR_ARGS = 2; ++our $OCF_ERR_UNIMPLEMENTED = 3; ++our $OCF_ERR_PERM = 4; ++our $OCF_ERR_INSTALLED = 5; ++our $OCF_ERR_CONFIGURED = 6; ++our $OCF_NOT_RUNNING = 7; ++our $OCF_RUNNING_MASTER = 8; ++our $OCF_FAILED_MASTER = 9; ++ ++1; ++ ++=head1 COPYRIGHT AND LICENSE ++ ++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++Licensed under the PostgreSQL License. +diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +--- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/pgsqlms 2021-04-13 13:37:40.934280411 +0200 +@@ -0,0 +1,2308 @@ ++#!/usr/bin/perl ++# This program is open source, licensed under the PostgreSQL License. ++# For license terms, see the LICENSE file. ++# ++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault ++ ++=head1 NAME ++ ++ocf_heartbeat_pgsqlms - A PostgreSQL multi-state resource agent for Pacemaker ++ ++=head1 SYNOPSIS ++ ++B [start | stop | monitor | promote | demote | notify | reload | methods | meta-data | validate-all] ++ ++=head1 DESCRIPTION ++ ++Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource. ++ ++=cut ++ ++use strict; ++use warnings; ++use 5.008; ++ ++use POSIX qw(locale_h); ++use Scalar::Util qw(looks_like_number); ++use File::Spec; ++use File::Temp; ++use Data::Dumper; ++ ++my $OCF_FUNCTIONS_DIR; ++BEGIN { ++ $OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat"; ++} ++use lib "$OCF_FUNCTIONS_DIR"; ++ ++use OCF_ReturnCodes; ++use OCF_Directories; ++use OCF_Functions; ++ ++our $VERSION = 'v2.3.0'; ++our $PROGRAM = 'pgsqlms'; ++ ++# OCF environment ++my $OCF_RESOURCE_INSTANCE = $ENV{'OCF_RESOURCE_INSTANCE'}; ++my $OCF_RUNNING_SLAVE = $OCF_SUCCESS; ++my %OCF_NOTIFY_ENV = ocf_notify_env() if $__OCF_ACTION eq 'notify'; ++ ++# Default parameters values ++my $system_user_default = "postgres"; ++my $bindir_default = "/usr/bin"; ++my $pgdata_default = "/var/lib/pgsql/data"; ++my $pghost_default = "/tmp"; ++my $pgport_default = 5432; ++my $start_opts_default = ""; ++my $maxlag_default = "0"; ++ ++# Set default values if not found in environment ++my $system_user = $ENV{'OCF_RESKEY_system_user'} || $system_user_default; ++my $bindir = $ENV{'OCF_RESKEY_bindir'} || $bindir_default; ++my $pgdata = $ENV{'OCF_RESKEY_pgdata'} || $pgdata_default; ++my $datadir = $ENV{'OCF_RESKEY_datadir'} || $pgdata; ++my $pghost = $ENV{'OCF_RESKEY_pghost'} || $pghost_default; ++my $pgport = $ENV{'OCF_RESKEY_pgport'} || $pgport_default; ++my $start_opts = $ENV{'OCF_RESKEY_start_opts'} || $start_opts_default; ++my $maxlag = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default; ++my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'} ++ || "$pgdata/recovery.conf.pcmk"; ++ ++ ++# PostgreSQL commands path ++my $POSTGRES = "$bindir/postgres"; ++my $PGCTL = "$bindir/pg_ctl"; ++my $PGPSQL = "$bindir/psql"; ++my $PGCTRLDATA = "$bindir/pg_controldata"; ++my $PGISREADY = "$bindir/pg_isready"; ++my $PGWALDUMP = "$bindir/pg_waldump"; ++ ++# pacemaker commands path ++my $CRM_MASTER = "$HA_SBIN_DIR/crm_master --lifetime forever"; ++my $CRM_NODE = "$HA_SBIN_DIR/crm_node"; ++my $CRM_RESOURCE = "$HA_SBIN_DIR/crm_resource"; ++my $ATTRD_PRIV = "$HA_SBIN_DIR/attrd_updater --private --lifetime reboot"; ++ ++# Global vars ++my $nodename; ++my $exit_code = 0; ++# numeric pgsql versions ++my $PGVERNUM; ++my $PGVER_93 = 90300; ++my $PGVER_10 = 100000; ++my $PGVER_12 = 120000; ++ ++# Run a query using psql. ++# ++# This function returns an array with psql return code as first element and ++# the result as second one. ++# ++sub _query { ++ my $query = shift; ++ my $res = shift; ++ my $connstr = "dbname=postgres"; ++ my $RS = chr(30); # ASCII RS (record separator) ++ my $FS = chr(3); # ASCII ETX (end of text) ++ my $postgres_uid = getpwnam( $system_user ); ++ my $oldeuid = $>; ++ my $tmpfile; ++ my @res; ++ my $ans; ++ my $pid; ++ my $rc; ++ ++ unless ( defined $res and defined $query and $query ne '' ) { ++ ocf_log( 'debug', '_query: wrong parameters!' ); ++ return -1; ++ } ++ ++ unless ( $tmpfile = File::Temp->new( ++ TEMPLATE => 'pgsqlms-XXXXXXXX', ++ DIR => $HA_RSCTMP ++ ) ) ++ { ++ ocf_exit_reason( 'Could not create or write in a temp file' ); ++ exit $OCF_ERR_INSTALLED; ++ } ++ ++ print $tmpfile $query; ++ chmod 0644, $tmpfile; ++ ++ ocf_log( 'debug', '_query: %s', $query ); ++ ++ # Change the effective user to the given system_user so after forking ++ # the given uid to the process should allow psql to connect w/o password ++ $> = $postgres_uid; ++ ++ # Forking + piping ++ $pid = open(my $KID, "-|"); ++ ++ if ( $pid == 0 ) { # child ++ exec $PGPSQL, '--set', 'ON_ERROR_STOP=1', '-qXAtf', $tmpfile, ++ '-R', $RS, '-F', $FS, '--port', $pgport, '--host', $pghost, ++ $connstr; ++ } ++ ++ # parent ++ $> = $oldeuid; ++ ++ { ++ local $/; ++ $ans = <$KID>; ++ } ++ ++ close $KID; ++ $rc = $? >> 8; ++ ++ ocf_log( 'debug', '_query: psql return code: %d', $rc ); ++ ++ if ( defined $ans ) { ++ chop $ans; ++ ++ push @{ $res }, [ split(chr(3) => $_, -1) ] ++ foreach split (chr(30) => $ans, -1); ++ ++ ocf_log( 'debug', '_query: @res: %s', ++ Data::Dumper->new( [ $res ] )->Terse(1)->Dump ); ++ } ++ ++ # Possible return codes: ++ # -1: wrong parameters ++ # 0: OK ++ # 1: failed to get resources (memory, missing file, ...) ++ # 2: unable to connect ++ # 3: query failed ++ return $rc; ++} ++ ++# Get the last received location on a standby ++# if the first argument is true, returns the value as decimal ++# if the first argument is false, returns the value as LSN ++# Returns undef if query failed ++sub _get_last_received_lsn { ++ my ( $dec ) = @_; ++ my $pg_last_wal_receive_lsn = 'pg_last_wal_receive_lsn()'; ++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff'; ++ my $query; ++ my $rc; ++ my @rs; ++ ++ if ( $PGVERNUM < $PGVER_10 ) { ++ $pg_last_wal_receive_lsn = 'pg_last_xlog_receive_location()'; ++ $pg_wal_lsn_diff = 'pg_xlog_location_diff'; ++ } ++ ++ if ( $dec ) { ++ $query = "SELECT $pg_wal_lsn_diff( $pg_last_wal_receive_lsn, '0/0' )"; ++ } ++ else { ++ $query = "SELECT $pg_last_wal_receive_lsn"; ++ } ++ ++ $rc = _query( $query, \@rs ); ++ ++ return $rs[0][0] if $rc == 0 and $rs[0][0]; ++ ++ ocf_log( 'err', 'Could not query last received LSN (%s)', $rc ) if $rc != 0; ++ ocf_log( 'err', 'No values for last received LSN' ) ++ if $rc == 0 and not $rs[0][0]; ++ ++ return undef; ++} ++ ++# Get the master score for each connected standby ++# Returns directly the result set of the query or exit with an error. ++# Exits with OCF_ERR_GENERIC if the query failed ++sub _get_lag_scores { ++ my $pg_current_wal_lsn = 'pg_current_wal_lsn()'; ++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff'; ++ my $write_lsn = 'write_lsn'; ++ my $query; ++ my $rc; ++ my @rs; ++ ++ if ( $PGVERNUM < $PGVER_10 ) { ++ $pg_current_wal_lsn = 'pg_current_xlog_location()'; ++ $pg_wal_lsn_diff = 'pg_xlog_location_diff'; ++ $write_lsn = 'write_location'; ++ } ++ ++ # We check locations of connected standbies by querying the ++ # "pg_stat_replication" view. ++ # The row_number applies on the result set ordered on write_location ASC so ++ # the highest row_number should be given to the closest node from the ++ # master, then the lowest node name (alphanumeric sort) in case of equality. ++ # The result set itself is order by priority DESC to process best known ++ # candidate first. ++ $query = qq{ ++ SELECT application_name, priority, location, state, current_lag ++ FROM ( ++ SELECT application_name, ++ (1000 - ( ++ row_number() OVER ( ++ PARTITION BY state IN ('startup', 'backup') ++ ORDER BY location ASC, application_name ASC ++ ) - 1 ++ ) * 10 ++ ) * CASE WHEN ( $maxlag > 0 ++ AND current_lag > $maxlag) ++ THEN -1 ++ ELSE 1 ++ END AS priority, ++ location, state, current_lag ++ FROM ( ++ SELECT application_name, $write_lsn AS location, state, ++ $pg_wal_lsn_diff($pg_current_wal_lsn, $write_lsn) AS current_lag ++ FROM pg_stat_replication ++ ) AS s2 ++ ) AS s1 ++ ORDER BY priority DESC ++ }; ++ ++ $rc = _query( $query, \@rs ); ++ ++ if ( $rc != 0 ) { ++ ocf_exit_reason( 'Query to get standby locations failed (%d)', $rc ); ++ exit $OCF_ERR_GENERIC; ++ } ++ ++ return \@rs; ++} ++ ++# get the timeout for the current action given from environment var ++# Returns timeout as integer ++# undef if unknown ++sub _get_action_timeout { ++ my $timeout = $ENV{'OCF_RESKEY_CRM_meta_timeout'} / 1000; ++ ++ ocf_log( 'debug', '_get_action_timeout: known timeout: %s', ++ defined $timeout ? $timeout : 'undef' ); ++ ++ return $timeout if defined $timeout and $timeout =~ /^\d+$/; ++ ++ return undef; ++} ++ ++# Get, parse and return the value of the given private attribute name ++# Returns an empty string if not found. ++sub _get_priv_attr { ++ my ( $name, $node ) = @_; ++ my $val = ''; ++ my $node_arg = ''; ++ my $ans; ++ ++ $node = '' unless defined $node; ++ $name = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ $node_arg= "--node $node" if $node ne ''; ++ ++ $ans = qx{ $ATTRD_PRIV --name "$name" --query $node_arg }; ++ ++ $ans =~ m/^name=".*" host=".*" value="(.*)"$/; ++ ++ $val = $1 if defined $1; ++ ++ ocf_log( 'debug', '_get_priv_attr: value of "%s"%s is "%s"', $name, ++ ( $node ? " on \"$node\"": ""), ++ $val ); ++ ++ return $val; ++} ++ ++# Set the given private attribute name to the given value ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really set by attrd and available. ++sub _set_priv_attr { ++ my ( $name, $val ) = @_; ++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ ocf_log( 'debug', '_set_priv_attr: set "%s=%s"...', $name_instance, $val ); ++ ++ qx{ $ATTRD_PRIV --name "$name_instance" --update "$val" }; ++ ++ # give attr name without the resource instance name as _get_priv_attr adds ++ # it as well ++ while ( _get_priv_attr( $name ) ne $val ) { ++ ocf_log( 'debug', '_set_priv_attr: waiting attrd ack for "%s"...', $name_instance ); ++ select( undef, undef, undef, 0.1 ); ++ } ++ ++ return; ++} ++ ++# Delete the given private attribute. ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really deleted by attrd. ++sub _delete_priv_attr { ++ my ( $name ) = @_; ++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE"; ++ ++ ocf_log( 'debug', '_delete_priv_attr: delete "%s"...', $name_instance ); ++ ++ qx{ $ATTRD_PRIV --name "$name_instance" --delete }; ++ ++ # give attr name without the resource instance name as _get_priv_attr adds ++ # it as well ++ while ( _get_priv_attr( $name ) ne '' ) { ++ ocf_log( 'debug', '_delete_priv_attr: waiting attrd ack for "%s"...', ++ $name_instance ); ++ select( undef, undef, undef, 0.1 ); ++ } ++ ++ return; ++} ++ ++# Get, parse and return the resource master score on given node. ++# Returns an empty string if not found. ++# Returns undef on crm_master call on error ++sub _get_master_score { ++ my ( $node ) = @_; ++ my $node_arg = ''; ++ my $score; ++ ++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne ''; ++ ++ $score = qx{ $CRM_MASTER --quiet --get-value $node_arg 2> /dev/null }; ++ ++ return '' unless $? == 0 and defined $score; ++ ++ chomp $score; ++ ++ return $score; ++} ++ ++# Set the master score of the local node or the optionally given node. ++# As setting an attribute is asynchronous, this will return as soon as the ++# attribute is really set by attrd and available everywhere. ++sub _set_master_score { ++ my ( $score, $node ) = @_; ++ my $node_arg = ''; ++ my $tmp; ++ ++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne ''; ++ ++ qx{ $CRM_MASTER $node_arg --quiet --update "$score" }; ++ ++ while ( ( $tmp = _get_master_score( $node ) ) ne $score ) { ++ ocf_log( 'debug', ++ '_set_master_score: waiting to set score to "%s" (currently "%s")...', ++ $score, $tmp ); ++ select(undef, undef, undef, 0.1); ++ } ++ ++ return; ++} ++ ++# _master_score_exists ++# This subroutine checks if a master score is set for one of the relative clones ++# in the cluster and the score is greater or equal of 0. ++# Returns 1 if at least one master score >= 0 is found. ++# Returns 0 otherwise ++sub _master_score_exists { ++ my @partition_nodes = split /\s+/ => qx{ $CRM_NODE --partition }; ++ ++ foreach my $node ( @partition_nodes ) { ++ my $score = _get_master_score( $node ); ++ ++ return 1 if defined $score and $score ne '' and $score > -1; ++ } ++ ++ return 0; ++} ++ ++# Check if the current transiation is a recover of a master clone on given node. ++sub _is_master_recover { ++ my ( $n ) = @_; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'master'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} } ++ ); ++} ++ ++# Check if the current transition is a recover of a slave clone on given node. ++sub _is_slave_recover { ++ my ( $n ) = @_; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'start'} } ++ ); ++} ++ ++# check if th current transition is a switchover to the given node. ++sub _is_switchover { ++ my ( $n ) = @_; ++ my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'}; ++ ++ return 0 if scalar @{ $OCF_NOTIFY_ENV{'master'} } != 1 ++ or scalar @{ $OCF_NOTIFY_ENV{'demote'} } != 1 ++ or scalar @{ $OCF_NOTIFY_ENV{'promote'} } != 1; ++ ++ return ( ++ scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'demote'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} } ++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} } ++ and not scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'stop'} } ++ ); ++} ++ ++# Run the given command as the "system_user" given as parameter. ++# It basically forks and seteuid/setuid away from root. ++# ++sub _runas { ++ my $rc; ++ my $pid; ++ my @cmd = @_; ++ my (undef, undef, $postgres_uid, $postgres_gid ) = getpwnam( $system_user ); ++ ++ $pid = fork; ++ ++ if ( $pid == 0 ) { # in child ++ $) = "$postgres_gid $postgres_gid"; ++ while ( my ( undef, undef, $gid, $members ) = getgrent ) { ++ $) .= " $gid" if grep { $system_user eq $_ } split /\s+/, $members ++ } ++ $( = $postgres_gid; ++ ++ $< = $> = $postgres_uid; ++ ++ exec @cmd; ++ } ++ ++ ocf_log( 'debug', '_runas: launching as "%s" command "%s"', $system_user, ++ join(' ', @cmd) ); ++ ++ waitpid $pid, 0; ++ $rc = $? >> 8; ++ ++ return $rc; ++} ++ ++# Check if instance is listening on the given host/port. ++# ++sub _pg_isready { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ my $rc = _runas( $PGISREADY, '-h', $pghost, '-p', $pgport, '-d', 'postgres', '-t', $timeout ); ++ ++ # Possible error codes: ++ # 1: ping rejected (usually when instance is in startup, in crash ++ # recovery, in warm standby, or when a shutdown is in progress) ++ # 2: no response, usually means the instance is down ++ # 3: no attempt, probably a syntax error, should not happen ++ return $rc; ++} ++ ++# Check the postmaster.pid file and the postmaster process. ++# WARNING: we do not distinguish the scenario where postmaster.pid does not ++# exist from the scenario where the process is still alive. It should be ok ++# though, as this is considered a hard error from monitor. ++# ++sub _pg_ctl_status { ++ my $rc = _runas( $PGCTL, '--pgdata', $pgdata, 'status' ); ++ ++ # Possible error codes: ++ # 3: postmaster.pid file does not exist OR it does but the process ++ # with the PID found in the file is not alive ++ return $rc; ++} ++ ++# Start the local instance using pg_ctl ++# ++sub _pg_ctl_start { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ my @cmd = ( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, 'start' ); ++ ++ push @cmd => ( '-o', $start_opts ) if $start_opts ne ''; ++ ++ return _runas( @cmd ); ++} ++ ++# Enable the Standby mode. ++# ++# Up to v11, creates the recovery.conf file based on the given template. ++# Since v12, creates standby.signal. ++sub _enable_recovery { ++ my $fh; ++ my $content = ''; ++ my $standby_file = "$datadir/standby.signal"; ++ my (undef, undef, $uid, $gid) = getpwnam($system_user); ++ ++ if ( $PGVERNUM < $PGVER_12 ) { ++ $standby_file = "$datadir/recovery.conf"; ++ ++ ocf_log( 'debug', ++ '_enable_recovery: get replication configuration from the template file "%s"', ++ $recovery_tpl ); ++ ++ # Create the recovery.conf file to start the instance as a secondary. ++ # NOTE: the recovery.conf is supposed to be set up so the secondary can ++ # connect to the primary instance, eg. using a virtual IP address. ++ # As there is no primary instance available at startup, secondaries will ++ # complain about failing to connect. ++ # As we can not reload a recovery.conf file on a standby without restarting ++ # it, we will leave with this. ++ # FIXME how would the reload help us in this case ? ++ unless ( defined open( $fh, '<', $recovery_tpl ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! ); ++ exit $OCF_ERR_CONFIGURED; ++ } ++ ++ # Copy all parameters from the template file ++ while (my $line = <$fh>) { ++ chomp $line; ++ $content .= "$line\n"; ++ } ++ close $fh; ++ } ++ ++ ocf_log( 'debug', '_enable_recovery: write the standby file "%s"', $standby_file ); ++ ++ unless ( open( $fh, '>', $standby_file ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $standby_file, $! ); ++ exit $OCF_ERR_CONFIGURED; ++ } ++ ++ # Write the recovery.conf file using configuration from the template file ++ print $fh $content; ++ ++ close $fh; ++ ++ unless ( chown $uid, $gid, $standby_file ) { ++ ocf_exit_reason( 'Could not set owner of "%s"', $standby_file ); ++ exit $OCF_ERR_CONFIGURED; ++ }; ++} ++ ++# Parse and return various informations about the local PostgreSQL instance as ++# reported by its controldata file. ++# ++# WARNING: the status is NOT updated in case of crash. ++# ++# This sub exit the script with an error on failure ++sub _get_controldata { ++ my %controldata; ++ my $ans; ++ ++ $ans = qx{ $PGCTRLDATA "$datadir" 2>/dev/null }; ++ ++ # Parse the output of pg_controldata. ++ # This output is quite stable between pg versions, but we might need to sort ++ # it at some point if things are moving in there... ++ $ans =~ m{ ++ # get the current state ++ ^\QDatabase cluster state\E:\s+(.*?)\s*$ ++ .* ++ # Get the latest known REDO location ++ ^\QLatest checkpoint's REDO location\E:\s+([/0-9A-F]+)\s*$ ++ .* ++ # Get the latest known TL ++ ^\QLatest checkpoint's TimeLineID\E:\s+(\d+)\s*$ ++ .* ++ # Get the wal level ++ # NOTE: pg_controldata output changed with PostgreSQL 9.5, so we need to ++ # account for both syntaxes ++ ^(?:\QCurrent \E)?\Qwal_level setting\E:\s+(.*?)\s*$ ++ }smx; ++ ++ $controldata{'state'} = $1 if defined $1; ++ $controldata{'redo'} = $2 if defined $2; ++ $controldata{'tl'} = $3 if defined $3; ++ $controldata{'wal_level'} = $4 if defined $4; ++ ++ ocf_log( 'debug', ++ "_get_controldata: found: %s", ++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump ); ++ ++ return %controldata if defined $controldata{'state'} ++ and defined $controldata{'tl'} ++ and defined $controldata{'redo'} ++ and defined $controldata{'wal_level'}; ++ ++ ocf_exit_reason( 'Could not read all datas from controldata file for "%s"', ++ $datadir ); ++ ++ ocf_log( 'debug', ++ "_get_controldata: controldata file: %s", ++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump, $ans ); ++ ++ exit $OCF_ERR_ARGS; ++} ++ ++# Pead major version from datadir/PG_VERSION and return it as numeric version ++sub _get_pg_version { ++ my $fh; ++ my $PGVERSION; ++ my $PGVERNUM; ++ ++ # check PG_VERSION ++ if ( ! -s "$datadir/PG_VERSION" ) { ++ ocf_exit_reason( 'PG_VERSION does not exist in "%s"', $datadir ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ unless ( open( $fh, '<', "$datadir/PG_VERSION" ) ) { ++ ocf_exit_reason( "Could not open file \"$datadir/PG_VERSION\": $!" ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ read( $fh, $PGVERSION, 32 ); ++ close $fh; ++ ++ chomp $PGVERSION; ++ ++ $PGVERSION =~ /^(\d+)(?:\.(\d+))?$/; ++ $PGVERNUM = $1 * 10000; ++ $PGVERNUM += $2 * 100 if $1 < 10; # no 2nd num in the major version from v10 ++ ++ return $PGVERNUM; ++} ++ ++# Use pg_controldata to check the state of the PostgreSQL server. This ++# function returns codes depending on this state, so we can find whether the ++# instance is a primary or a secondary, or use it to detect any inconsistency ++# that could indicate the instance has crashed. ++# ++sub _controldata_to_ocf { ++ my %cdata = _get_controldata(); ++ ++ while ( 1 ) { ++ ocf_log( 'debug', '_controldata: instance "%s" state is "%s"', ++ $OCF_RESOURCE_INSTANCE, $cdata{'state'} ); ++ ++ # Instance should be running as a primary. ++ return $OCF_RUNNING_MASTER if $cdata{'state'} eq "in production"; ++ ++ # Instance should be running as a secondary. ++ # This state includes warm standby (rejects connections attempts, ++ # including pg_isready) ++ return $OCF_SUCCESS if $cdata{'state'} eq "in archive recovery"; ++ ++ ++ # The instance should be stopped. ++ # We don't care if it was a primary or secondary before, because we ++ # always start instances as secondaries, and then promote if necessary. ++ return $OCF_NOT_RUNNING if $cdata{'state'} eq "shut down" ++ or $cdata{'state'} eq "shut down in recovery"; ++ ++ # The state is "in crash recovery", "starting up" or "shutting down". ++ # This state should be transitional, so we wait and loop to check if ++ # it changes. ++ # If it does not, pacemaker will eventually abort with a timeout. ++ ocf_log( 'debug', ++ '_controldata: waiting for transitionnal state "%s" to finish', ++ $cdata{'state'} ); ++ sleep 1; ++ %cdata = _get_controldata(); ++ } ++ ++ # If we reach this point, something went really wrong with this code or ++ # pg_controldata. ++ ocf_exit_reason( 'Unable get instance "%s" state using pg_controldata', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_ERR_INSTALLED ; ++} ++ ++# Check the write_location of all secondaries, and adapt their master score so ++# that the instance closest to the master will be the selected candidate should ++# a promotion be triggered. ++# NOTE: This is only a hint to pacemaker! The selected candidate to promotion ++# actually re-check it is the best candidate and force a re-election by failing ++# if a better one exists. This avoid a race condition between the call of the ++# monitor action and the promotion where another slave might have catchup faster ++# with the master. ++# NOTE: we cannot directly use the write_location, neither a lsn_diff value as ++# promotion score as Pacemaker considers any value greater than 1,000,000 as ++# INFINITY. ++# ++# This sub must be executed from a master monitor action. ++# ++sub _check_locations { ++ my $partition_nodes; ++ my $node_score; ++ my $row_num; ++ my $row; ++ my @rs; ++ ++ # Set the master score if not already done ++ $node_score = _get_master_score(); ++ _set_master_score( '1001' ) unless $node_score eq '1001'; ++ ++ # Ask crm_node what nodes are present in our current cluster partition ++ $partition_nodes = qx{ $CRM_NODE --partition }; ++ ++ @rs = @{ _get_lag_scores() }; ++ ++ $row_num = scalar @rs; ++ ++ # If no lag are reported at this point, it means that there is no ++ # secondary instance connected. ++ ocf_log( 'warning', 'No secondary connected to the master' ) ++ if $row_num == 0; ++ ++ # For each standby connected, set their master score based on the following ++ # rule: the first known node/application, with the highest priority and ++ # an acceptable state. ++ while ( $row = shift @rs ) { ++ ++ if ( $partition_nodes !~ /$row->[0]/ ) { ++ ocf_log( 'info', 'Ignoring unknown application_name/node "%s"', ++ $row->[0] ); ++ next; ++ } ++ ++ if ( $row->[0] eq $nodename ) { ++ ocf_log( 'warning', 'Streaming replication with myself!' ); ++ next; ++ } ++ ++ $node_score = _get_master_score( $row->[0] ); ++ ++ if ( $row->[3] =~ /^\s*(?:startup|backup)\s*$/ ) { ++ # We exclude any standby being in state backup (pg_basebackup) or ++ # startup (new standby or failing standby) ++ ocf_log( 'info', 'Forbidding promotion on "%s" in state "%s"', ++ $row->[0], $row->[3] ); ++ ++ _set_master_score( '-1', $row->[0] ) unless $node_score eq '-1'; ++ } ++ else { ++ ocf_log( 'debug', ++ '_check_locations: checking "%s" promotion ability (current_score: %s, priority: %s, location: %s, lag: %s)', ++ $row->[0], $node_score, $row->[1], $row->[2], $row->[4] ); ++ ++ if ( $node_score ne $row->[1] ) { ++ if ( $row->[1] < -1 ) { ++ ocf_log( 'info', 'Update score of "%s" from %s to %s because replication lag (%s) is higher than given maxlag (%s).', ++ $row->[0], $node_score, $row->[1], $row->[4], $maxlag ); ++ } ++ else { ++ ocf_log( 'info', 'Update score of "%s" from %s to %s because of a change in the replication lag (%s).', ++ $row->[0], $node_score, $row->[1], $row->[4] ); ++ } ++ _set_master_score( $row->[1], $row->[0] ); ++ } ++ else { ++ ocf_log( 'debug', ++ '_check_locations: "%s" keeps its current score of %s', ++ $row->[0], $row->[1] ); ++ } ++ } ++ ++ # Remove this node from the known nodes list. ++ $partition_nodes =~ s/(?:^|\s)$row->[0](?:\s|$)/ /g; ++ } ++ ++ $partition_nodes =~ s/(?:^\s+)|(?:\s+$)//g; ++ ++ # If there are still nodes in "partition_nodes", it means there is no ++ # corresponding line in "pg_stat_replication". ++ # Exclude these nodes that are not part of the cluster at this ++ # point. ++ foreach my $node (split /\s+/ => $partition_nodes) { ++ # Exclude the current node. ++ next if $node eq $nodename; ++ ++ # do not warn if the master score is already set to -1000. ++ # this avoid log flooding (gh #138) ++ $node_score = _get_master_score( $node ); ++ next if $node_score eq '-1000'; ++ ++ ocf_log( 'warning', '"%s" is not connected to the primary', $node ); ++ _set_master_score( '-1000', $node ); ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# _check_switchover ++# check if the pgsql switchover to the localnode is safe. ++# This is supposed to be called **after** the master has been stopped or demoted. ++# This sub checks if the local standby received the shutdown checkpoint from the ++# old master to make sure it can take over the master role and the old master ++# will be able to catchup as a standby after. ++# ++# Returns 0 if switchover is safe ++# Returns 1 if swithcover is not safe ++# Returns 2 for internal error ++sub _check_switchover { ++ my $has_sht_chk = 0; ++ my $last_redo; ++ my $last_lsn; ++ my $ans; ++ my $rc; ++ my $tl; ++ my %cdata; ++ ++ $PGWALDUMP = "$bindir/pg_xlogdump" if $PGVERNUM < $PGVER_10; ++ ++ ocf_log( 'info', 'Switchover in progress from "%s" to "%s".' ++ .' Need to check the last record in WAL', ++ $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename ); ++ ++ # check if we received the shutdown checkpoint of the master during its ++ # demote process. ++ # We need the last local checkpoint LSN and the last received LSN from ++ # master to check in the WAL between these adresses if we have a ++ # "checkpoint shutdown" using pg_xlogdump/pg_waldump. ++ # ++ # Force a checkpoint to make sure the controldata shows the very last TL ++ # and the master's shutdown checkpoint ++ _query( q{ CHECKPOINT }, {} ); ++ %cdata = _get_controldata(); ++ $tl = $cdata{'tl'}; ++ $last_redo = $cdata{'redo'}; ++ ++ # Get the last received LSN from master ++ $last_lsn = _get_last_received_lsn(); ++ ++ unless ( defined $last_lsn ) { ++ ocf_exit_reason( 'Could not fetch last received LSN!' ); ++ ++ return 2; ++ } ++ ++ $ans = qx{ $PGWALDUMP --path "$datadir" --timeline "$tl" \\ ++ --start "$last_redo" --end "$last_lsn" 2>&1 }; ++ $rc = $?; ++ ++ ocf_log( 'debug', ++ '_check_switchover: %s rc: "%s", tl: "%s", last_chk: %s, last_lsn: %s, output: "%s"', ++ $PGWALDUMP, $rc, $tl, $last_redo, $last_lsn, $ans ++ ); ++ ++ if ( $rc == 0 and ++ $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m ++ ) { ++ ocf_log( 'info', 'Slave received the shutdown checkpoint' ); ++ return 0; ++ } ++ ++ ocf_exit_reason( ++ 'Did not receive the shutdown checkpoint from the old master!' ); ++ ++ return 1; ++} ++ ++# Check to confirm if the instance is really started as _pg_isready stated and ++# check if the instance is primary or secondary. ++# ++sub _confirm_role { ++ my $is_in_recovery; ++ my $rc; ++ my @rs; ++ ++ $rc = _query( "SELECT pg_is_in_recovery()", \@rs ); ++ ++ $is_in_recovery = $rs[0][0]; ++ ++ if ( $rc == 0 ) { ++ # The query was executed, check the result. ++ if ( $is_in_recovery eq 't' ) { ++ # The instance is a secondary. ++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a secondary"); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $is_in_recovery eq 'f' ) { ++ # The instance is a primary. ++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary"); ++ # Check lsn diff with current slaves if any ++ _check_locations() if $__OCF_ACTION eq 'monitor'; ++ return $OCF_RUNNING_MASTER; ++ } ++ ++ # This should not happen, raise a hard configuration error. ++ ocf_exit_reason( ++ 'Unexpected result from query to check if "%s" is a primary or a secondary: "%s"', ++ $OCF_RESOURCE_INSTANCE, $is_in_recovery ); ++ ++ return $OCF_ERR_CONFIGURED; ++ } ++ elsif ( $rc == 1 or $rc == 2 ) { ++ # psql cound not connect to the instance. ++ # As pg_isready reported the instance was listening, this error ++ # could be a max_connection saturation. Just report a soft error. ++ ocf_exit_reason( 'psql could not connect to instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The query failed (rc: 3) or bad parameters (rc: -1). ++ # This should not happen, raise a hard configuration error. ++ ocf_exit_reason( ++ 'The query to check if instance "%s" is a primary or a secondary failed (rc: %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_CONFIGURED; ++} ++ ++ ++# Check to confirm if the instance is really stopped as _pg_isready stated ++# and if it was propertly shut down. ++# ++sub _confirm_stopped { ++ my $pgctlstatus_rc; ++ my $controldata_rc; ++ ++ # Check the postmaster process status. ++ $pgctlstatus_rc = _pg_ctl_status(); ++ ++ if ( $pgctlstatus_rc == 0 ) { ++ # The PID file exists and the process is available. ++ # That should not be the case, return an error. ++ ocf_exit_reason( ++ 'Instance "%s" is not listening, but the process referenced in postmaster.pid exists', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The PID file does not exist or the process is not available. ++ ocf_log( 'debug', ++ '_confirm_stopped: no postmaster process found for instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ if ( -f "$datadir/backup_label" ) { ++ # We are probably on a freshly built secondary that was not started yet. ++ ocf_log( 'debug', ++ '_confirm_stopped: backup_label file exists: probably on a never started secondary', ++ ); ++ return $OCF_NOT_RUNNING; ++ } ++ ++ # Continue the check with pg_controldata. ++ $controldata_rc = _controldata_to_ocf(); ++ if ( $controldata_rc == $OCF_RUNNING_MASTER ) { ++ # The controldata has not been updated to "shutdown". ++ # It should mean we had a crash on a primary instance. ++ ocf_exit_reason( ++ 'Instance "%s" controldata indicates a running primary instance, the instance has probably crashed', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_FAILED_MASTER; ++ } ++ elsif ( $controldata_rc == $OCF_SUCCESS ) { ++ # The controldata has not been updated to "shutdown in recovery". ++ # It should mean we had a crash on a secondary instance. ++ # There is no "FAILED_SLAVE" return code, so we return a generic error. ++ ocf_exit_reason( ++ 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ elsif ( $controldata_rc == $OCF_NOT_RUNNING ) { ++ # The controldata state is consistent, the instance was probably ++ # propertly shut down. ++ ocf_log( 'debug', ++ '_confirm_stopped: instance "%s" controldata indicates that the instance was propertly shut down', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_NOT_RUNNING; ++ } ++ ++ # Something went wrong with the controldata check. ++ ocf_exit_reason( ++ 'Could not get instance "%s" status from controldata (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $controldata_rc ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++############################################################ ++#### OCF FUNCS ++ ++ ++ ++=head1 SUPPORTED PARAMETERS ++ ++=over ++ ++=item B ++ ++Location of the PGDATA of your instance ++ ++(optional, string, default "/var/lib/pgsql/data") ++ ++=item B ++ ++The socket directory or IP address to use to connect to the local instance ++ ++(optional, string, default "/tmp") ++ ++=item B ++ ++The port to connect to the local instance ++ ++(optional, integer, default "5432") ++ ++=item B ++ ++Location of the PostgreSQL binaries. ++ ++(optional, string, default "/usr/bin") ++ ++=item B ++ ++The system owner of your instance's process ++ ++(optional, string, default "postgres") ++ ++=item B ++ ++B for PostgreSQL 11 and bellow. ++ ++The local template that will be copied as the C file. ++This template file must exists on all node. ++ ++With PostgreSQL 12 and higher, the cluster will refuse to start if this ++parameter is set or a template file is found. ++ ++(optional, string, default "$PGDATA/recovery.conf.pcmk") ++ ++=item B ++ ++Maximum lag allowed on a standby before we set a negative master score on it. ++The calculation is based on the difference between the current xlog location on ++the master and the write location on the standby. ++ ++(optional, integer, default "0" disables this feature) ++ ++=item B ++ ++Path to the directory set in C from your postgresql.conf file. ++This parameter has same default than PostgreSQL itself: the C parameter ++value. ++ ++Unless you have a special PostgreSQL setup and you understand this parameter, ++B ++ ++(optional, string, default to the value of C) ++ ++=item B ++ ++Additional arguments given to the postgres process on startup. See ++"postgres --help" for available options. Useful when the postgresql.conf file ++is not in the data directory (PGDATA), eg.: ++ ++ -c config_file=/etc/postgresql/9.3/main/postgresql.conf ++ ++(optinal, string, default "") ++ ++=back ++ ++=cut ++ ++sub ocf_meta_data { ++ print qq{ ++ ++ ++ 1.0 ++ ++ ++ Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource. ++ ++ Manages PostgreSQL servers in replication ++ ++ ++ ++ System user account used to run the PostgreSQL server ++ ++ PostgreSQL system User ++ ++ ++ ++ ++ ++ Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl. ++ ++ Path to the PostgreSQL binaries ++ ++ ++ ++ ++ ++ Path to the data directory, e.g. PGDATA ++ ++ Path to the data directory ++ ++ ++ ++ ++ ++ Path to the directory set in data_directory from your postgresql.conf file. This parameter ++ has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a ++ special PostgreSQL setup and you understand this parameter, ignore it. ++ ++ Path to the directory set in data_directory from your postgresql.conf file ++ ++ ++ ++ ++ ++ Host IP address or unix socket folder the instance is listening on. ++ ++ Instance IP or unix socket folder ++ ++ ++ ++ ++ ++ Port the instance is listening on. ++ ++ Instance port ++ ++ ++ ++ ++ ++ Maximum lag allowed on a standby before we set a negative master score on it. The calculation ++ is based on the difference between the current LSN on the master and the LSN ++ written on the standby. ++ This parameter must be a valid positive number as described in PostgreSQL documentation. ++ See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC ++ ++ Maximum write lag before we mark a standby as inappropriate to promote ++ ++ ++ ++ ++ ++ Path to the recovery.conf template. This file is simply copied to \$PGDATA ++ before starting the instance as slave. ++ ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for ++ PostgreSQL 12 and higher. The cluster will refuse to start if a template ++ file is found. ++ ++ Path to the recovery.conf template for PostgreSQL 11 and older. ++ ++ ++ ++ ++ ++ Additionnal arguments given to the postgres process on startup. ++ See "postgres --help" for available options. Usefull when the ++ postgresql.conf file is not in the data directory (PGDATA), eg.: ++ "-c config_file=/etc/postgresql/9.3/main/postgresql.conf". ++ ++ Additionnal arguments given to the postgres process on startup. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ }; ++ return $OCF_SUCCESS; ++} ++ ++ ++=head1 SUPPORTED ACTIONS ++ ++This resource agent supports the following actions (operations): ++ ++=over ++ ++=item B ++ ++Starts the resource. Suggested minimum timeout: 60. ++ ++=item B ++ ++Stops the resource. Suggested minimum timeout: 60. ++ ++=item B ++ ++Suggested minimum timeout: 20. ++ ++=item B ++ ++Promotes the resource to the Master role. Suggested minimum timeout: 30. ++ ++=item B ++ ++Demotes the resource to the Slave role. Suggested minimum timeout: 120. ++ ++=item B ++ ++Performs a detailed status check. Suggested minimum timeout: 10. ++Suggested interval: 15. ++ ++=item B ++ ++Performs a detailed status check. Suggested minimum timeout: 10. ++Suggested interval: 16. ++ ++=item B ++ ++Suggested minimum timeout: 60 ++ ++=item B ++ ++Retrieves resource agent metadata (internal use only). ++Suggested minimum timeout: 5. ++ ++=item B ++ ++Suggested minimum timeout: 5. ++ ++=item B ++ ++Performs a validation of the resource configuration. ++Suggested minimum timeout: 5. ++ ++=back ++ ++=cut ++ ++sub ocf_methods { ++ print q{ ++ start ++ stop ++ reload ++ promote ++ demote ++ monitor ++ notify ++ methods ++ meta-data ++ validate-all ++ }; ++ ++ return $OCF_SUCCESS; ++} ++ ++############################################################ ++#### RA FUNCS ++ ++sub pgsql_validate_all { ++ my $fh; ++ my $ans = ''; ++ my %cdata; ++ ++ unless ( ++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2 ++ ) { ++ ocf_exit_reason( ++ 'PAF %s is compatible with Pacemaker 1.1.13 and greater', ++ $VERSION ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check notify=true ++ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\ ++ --meta --get-parameter notify 2>/dev/null }; ++ chomp $ans; ++ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { ++ ocf_exit_reason( ++ 'You must set meta parameter notify=true for your master resource' ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check master-max=1 ++ unless ( ++ defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} ++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' ++ ) { ++ ocf_exit_reason( ++ 'You must set meta parameter master-max=1 for your master resource' ++ ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ if ( $PGVERNUM >= $PGVER_12 ) { ++ # check PostgreSQL setup: checks related to v12 and after ++ my $guc; ++ ++ # recovery.conf template must not exists ++ if ( -f $recovery_tpl ) { ++ ocf_exit_reason( ++ 'Recovery template file "%s" is forbidden for PostgreSQL 12 and above', ++ $recovery_tpl ); ++ exit $OCF_ERR_ARGS; ++ } ++ ++ # WARNING: you MUST put -C as first argument to bypass the root check ++ $guc = qx{ $POSTGRES -C recovery_target_timeline -D "$pgdata" $start_opts}; ++ chomp $guc; ++ unless ( $guc eq 'latest' ) { ++ ocf_exit_reason( ++ q{Parameter "recovery_target_timeline" MUST be set to 'latest'. } . ++ q{It is currently set to '%s'}, $guc ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts}; ++ unless ($guc =~ /\bapplication_name='?$nodename'?\b/) { ++ ocf_exit_reason( ++ q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }. ++ q{It is currently set to '%s'}, $nodename, $guc ); ++ return $OCF_ERR_ARGS; ++ } ++ } ++ else { ++ my @content; ++ ++ # check recovery template ++ if ( ! -f $recovery_tpl ) { ++ ocf_exit_reason( 'Recovery template file "%s" does not exist', ++ $recovery_tpl ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # check content of the recovery template file ++ unless ( open( $fh, '<', $recovery_tpl ) ) { ++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! ); ++ return $OCF_ERR_ARGS; ++ } ++ @content = <$fh>; ++ close $fh; ++ ++ ++ unless ( grep /^\s*standby_mode\s*=\s*'?on'?\s*$/, @content ) { ++ ocf_exit_reason( ++ 'Recovery template file must contain "standby_mode = on"' ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ unless ( grep /^\s*recovery_target_timeline\s*=\s*'?latest'?\s*$/, @content ) { ++ ocf_exit_reason( ++ "Recovery template file must contain \"recovery_target_timeline = 'latest'\"" ++ ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ unless ( ++ grep /^\s*primary_conninfo\s*=.*['\s]application_name=$nodename['\s]/, ++ @content ++ ) { ++ ocf_exit_reason( ++ 'Recovery template file must contain in primary_conninfo parameter "application_name=%s"', ++ $nodename ); ++ return $OCF_ERR_ARGS; ++ } ++ } ++ ++ unless ( looks_like_number($maxlag) ) { ++ ocf_exit_reason( 'maxlag is not a number: "%s"', $maxlag ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check system user ++ unless ( defined getpwnam $system_user ) { ++ ocf_exit_reason( 'System user "%s" does not exist', $system_user ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # require 9.3 minimum ++ if ( $PGVERNUM < $PGVER_93 ) { ++ ocf_exit_reason( "Require 9.3 and more" ); ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ # check binaries ++ unless ( -x $PGCTL and -x $PGPSQL and -x $PGCTRLDATA and -x $PGISREADY ++ and ( -x $PGWALDUMP or -x "$bindir/pg_xlogdump") ++ ) { ++ ocf_exit_reason( ++ "Missing one or more binary. Check following path: %s, %s, %s, %s, %s or %s", ++ $PGCTL, $PGPSQL, $PGCTRLDATA, $PGISREADY, $PGWALDUMP, "$bindir/pg_xlogdump" ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ # require wal_level >= hot_standby ++ %cdata = _get_controldata(); ++ unless ( $cdata{'wal_level'} =~ m{hot_standby|logical|replica} ) { ++ ocf_exit_reason( ++ 'wal_level must be one of "hot_standby", "logical" or "replica"' ); ++ return $OCF_ERR_ARGS; ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++ ++# Start the PostgreSQL instance as a *secondary* ++# ++sub pgsql_start { ++ my $rc = pgsql_monitor(); ++ my %cdata = _get_controldata(); ++ my $prev_state = $cdata{'state'}; ++ ++ # Instance must be running as secondary or being stopped. ++ # Anything else is an error. ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'Instance "%s" already started', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc != $OCF_NOT_RUNNING ) { ++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # From here, the instance is NOT running for sure. ++ # ++ ++ ocf_log( 'debug', ++ 'pgsql_start: instance "%s" is not running, starting it as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Must start as a standby, so enable recovery. ++ _enable_recovery(); ++ ++ # Start the instance as a secondary. ++ $rc = _pg_ctl_start(); ++ ++ if ( $rc == 0 ) { ++ ++ # Wait for the start to finish. ++ sleep 1 while ( $rc = pgsql_monitor() ) == $OCF_NOT_RUNNING; ++ ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'Instance "%s" started', $OCF_RESOURCE_INSTANCE ); ++ ++ # Check if a master score exists in the cluster. ++ # During the very first start of the cluster, no master score will ++ # exists on any of the existing slaves, unless an admin designated ++ # one of them using crm_master. If no master exists the cluster will ++ # not promote a master among the slaves. ++ # To solve this situation, we check if there is at least one master ++ # score existing on one node in the cluster. Do nothing if at least ++ # one master score is found among the clones of the resource. If no ++ # master score exists, set a score of 1 only if the resource was a ++ # shut downed master before the start. ++ if ( $prev_state eq "shut down" and not _master_score_exists() ) { ++ ocf_log( 'info', 'No master score around. Set mine to 1' ); ++ ++ _set_master_score( '1' ); ++ } ++ ++ return $OCF_SUCCESS; ++ } ++ ++ ocf_exit_reason( ++ 'Instance "%s" is not running as a slave (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ ocf_exit_reason( 'Instance "%s" failed to start (rc: %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++# Stop the PostgreSQL instance ++# ++sub pgsql_stop { ++ my $rc; ++ my $state; ++ my $pidfile = "$datadir/postmaster.pid"; ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ # Instance must be running as secondary or primary or being stopped. ++ # Anything else is an error. ++ $rc = pgsql_monitor(); ++ if ( $rc == $OCF_NOT_RUNNING ) { ++ ocf_log( 'info', 'Instance "%s" already stopped', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc != $OCF_SUCCESS and $rc != $OCF_RUNNING_MASTER ) { ++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # From here, the instance is running for sure. ++ # ++ ++ ocf_log( 'debug', 'pgsql_stop: instance "%s" is running, stopping it', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Try to quit with proper shutdown. ++ ++ ++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, ++ '-m', 'fast', 'stop' ); ++ ++ if ( $rc == 0 ) { ++ # Wait for the stop to finish. ++ sleep 1 while ( $rc = pgsql_monitor() ) != $OCF_NOT_RUNNING ; ++ ++ ocf_log( 'info', 'Instance "%s" stopped', $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_SUCCESS; ++ } ++ ++ ocf_exit_reason( 'Instance "%s" failed to stop', $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++} ++ ++# Monitor the PostgreSQL instance ++# ++sub pgsql_monitor { ++ my $pgisready_rc; ++ my $controldata_rc; ++ ++ ocf_log( 'debug', 'pgsql_monitor: monitor is a probe' ) if ocf_is_probe(); ++ ++ # First check, verify if the instance is listening. ++ $pgisready_rc = _pg_isready(); ++ ++ if ( $pgisready_rc == 0 ) { ++ # The instance is listening. ++ # We confirm that the instance is up and return if it is a primary or a ++ # secondary ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_role(); ++ } ++ ++ if ( $pgisready_rc == 1 ) { ++ # The attempt was rejected. ++ # This could happen in several cases: ++ # - at startup ++ # - during shutdown ++ # - during crash recovery ++ # - if instance is a warm standby ++ # Except for the warm standby case, this should be a transitional state. ++ # We try to confirm using pg_controldata. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" rejects connections - checking again...', ++ $OCF_RESOURCE_INSTANCE ); ++ $controldata_rc = _controldata_to_ocf(); ++ ++ if ( $controldata_rc == $OCF_RUNNING_MASTER ++ or $controldata_rc == $OCF_SUCCESS ++ ) { ++ # This state indicates that pg_isready check should succeed. ++ # We check again. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" controldata shows a running status', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ $pgisready_rc = _pg_isready(); ++ if ( $pgisready_rc == 0 ) { ++ # Consistent with pg_controdata output. ++ # We can check if the instance is primary or secondary ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_role(); ++ } ++ ++ # Still not consistent, raise an error. ++ # NOTE: if the instance is a warm standby, we end here. ++ # TODO raise an hard error here ? ++ ocf_exit_reason( ++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ocf_log( 'info', ++ 'If this instance is in warm standby, this resource agent only supports hot standby', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ if ( $controldata_rc == $OCF_NOT_RUNNING ) { ++ # This state indicates that pg_isready check should fail with rc 2. ++ # We check again. ++ $pgisready_rc = _pg_isready(); ++ if ( $pgisready_rc == 2 ) { ++ # Consistent with pg_controdata output. ++ # We check the process status using pg_ctl status and check ++ # if it was propertly shut down using pg_controldata. ++ ocf_log( 'debug', ++ 'pgsql_monitor: instance "%s" is not listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_stopped(); ++ } ++ # Still not consistent, raise an error. ++ # TODO raise an hard error here ? ++ ocf_exit_reason( ++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Something went wrong with the controldata check, hard fail. ++ ocf_exit_reason( ++ 'Could not get instance "%s" status from controldata (returned: %d)', ++ $OCF_RESOURCE_INSTANCE, $controldata_rc ); ++ ++ return $OCF_ERR_INSTALLED; ++ } ++ ++ elsif ( $pgisready_rc == 2 ) { ++ # The instance is not listening. ++ # We check the process status using pg_ctl status and check ++ # if it was propertly shut down using pg_controldata. ++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is not listening', ++ $OCF_RESOURCE_INSTANCE ); ++ return _confirm_stopped(); ++ } ++ ++ elsif ( $pgisready_rc == 3 ) { ++ # No attempt was done, probably a syntax error. ++ # Hard configuration error, we don't want to retry or failover here. ++ ocf_exit_reason( ++ 'Unknown error while checking if instance "%s" is listening (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $pgisready_rc ); ++ ++ return $OCF_ERR_CONFIGURED; ++ } ++ ++ ocf_exit_reason( 'Unexpected result when checking instance "%s" status', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ return $OCF_ERR_GENERIC; ++} ++ ++ ++# Demote the PostgreSQL instance from primary to secondary ++# To demote a PostgreSQL instance, we must: ++# * stop it gracefully ++# * create recovery.conf with standby_mode = on ++# * start it ++# ++sub pgsql_demote { ++ my $rc; ++ ++ $rc = pgsql_monitor(); ++ ++ # Running as primary. Normal, expected behavior. ++ if ( $rc == $OCF_RUNNING_MASTER ) { ++ ocf_log( 'debug', 'pgsql_demote: "%s" currently running as a primary', ++ $OCF_RESOURCE_INSTANCE ) ; ++ } ++ elsif ( $rc == $OCF_SUCCESS ) { ++ # Already running as secondary. Nothing to do. ++ ocf_log( 'debug', ++ 'pgsql_demote: "%s" currently running as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc == $OCF_NOT_RUNNING ) { ++ # Instance is stopped. Nothing to do. ++ ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down', ++ $OCF_RESOURCE_INSTANCE ); ++ } ++ elsif ( $rc == $OCF_ERR_CONFIGURED ) { ++ # We actually prefer raising a hard or fatal error instead of leaving ++ # the CRM abording its transition for a new one because of a soft error. ++ # The hard error will force the CRM to move the resource immediately. ++ return $OCF_ERR_CONFIGURED; ++ } ++ else { ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # TODO we need to make sure at least one slave is connected!! ++ ++ # WARNING if the resource state is stopped instead of master, the ocf ra dev ++ # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where ++ # it computes transitions of demote(failing)->stop->start->promote actions ++ # until failcount == migration-threshold. ++ # This is a really ugly trick to keep going with the demode action if the ++ # rsc is already stopped gracefully. ++ # See discussion "CRM trying to demote a stopped resource" on ++ # developers@clusterlabs.org ++ unless ( $rc == $OCF_NOT_RUNNING ) { ++ # Add 60s to the timeout or use a 24h timeout fallback to make sure ++ # Pacemaker will give up before us and take decisions ++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60; ++ ++ # WARNING the instance **MUST** be stopped gracefully. ++ # Do **not** use pg_stop() or service or systemctl here as these ++ # commands might force-stop the PostgreSQL instance using immediate ++ # after some timeout and return success, which is misleading. ++ ++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '--mode', 'fast', '-w', ++ '--timeout', $timeout , 'stop' ); ++ ++ # No need to wait for stop to complete, this is handled in pg_ctl ++ # using -w option. ++ unless ( $rc == 0 ) { ++ ocf_exit_reason( 'Failed to stop "%s" using pg_ctl (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Double check that the instance is stopped correctly. ++ $rc = pgsql_monitor(); ++ unless ( $rc == $OCF_NOT_RUNNING ) { ++ ocf_exit_reason( ++ 'Unexpected "%s" state: monitor status (%d) disagree with pg_ctl return code', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++ } ++ } ++ ++ # ++ # At this point, the instance **MUST** be stopped gracefully. ++ # ++ ++ # Note: We do not need to handle the recovery.conf file here as pgsql_start ++ # deal with that itself. Equally, no need to wait for the start to complete ++ # here, handled in pgsql_start. ++ $rc = pgsql_start(); ++ if ( $rc == $OCF_SUCCESS ) { ++ ocf_log( 'info', 'pgsql_demote: "%s" started as a secondary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ ++ # NOTE: No need to double check the instance state as pgsql_start already use ++ # pgsql_monitor to check the state before returning. ++ ++ ocf_exit_reason( 'Starting "%s" as a standby failed (returned %d)', ++ $OCF_RESOURCE_INSTANCE, $rc ); ++ return $OCF_ERR_GENERIC; ++} ++ ++ ++# Promote the secondary instance to primary ++# ++sub pgsql_promote { ++ my $rc; ++ my $cancel_switchover; ++ ++ $rc = pgsql_monitor(); ++ ++ if ( $rc == $OCF_SUCCESS ) { ++ # Running as slave. Normal, expected behavior. ++ ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby', ++ $OCF_RESOURCE_INSTANCE ); ++ } ++ elsif ( $rc == $OCF_RUNNING_MASTER ) { ++ # Already a master. Unexpected, but not a problem. ++ ocf_log( 'info', '"%s" already running as a primary', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ } ++ elsif ( $rc == $OCF_NOT_RUNNING ) { # INFO this is not supposed to happen. ++ # Currently not running. Need to start before promoting. ++ ocf_log( 'info', '"%s" currently not running, starting it', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ $rc = pgsql_start(); ++ if ( $rc != $OCF_SUCCESS ) { ++ ocf_exit_reason( 'Failed to start the instance "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ } ++ else { ++ ocf_exit_reason( 'Unexpected error, cannot promote "%s"', ++ $OCF_RESOURCE_INSTANCE ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # ++ # At this point, the instance **MUST** be started as a secondary. ++ # ++ ++ # Cancel the switchover if it has been considered not safe during the ++ # pre-promote action ++ $cancel_switchover = _get_priv_attr('cancel_switchover'); ++ if ( $cancel_switchover ) { # if not empty or not 0 ++ ocf_exit_reason( 'Switchover has been canceled from pre-promote action' ); ++ ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ return $OCF_ERR_GENERIC if $cancel_switchover eq '1'; ++ return $OCF_ERR_ARGS; # ban the resource from the node if we have an ++ # internal error during _check_switchover ++ } ++ ++ # Do not check for a better candidate if we try to recover the master ++ # Recover of a master is detected during the pre-promote action. It sets the ++ # private attribute 'recover_master' to '1' if this is a master recover. ++ if ( _get_priv_attr( 'recover_master' ) eq '1' ) { ++ ocf_log( 'info', 'Recovering old master, no election needed'); ++ } ++ else { ++ ++ # The promotion is occurring on the best known candidate (highest ++ # master score), as chosen by pacemaker during the last working monitor ++ # on previous master (see pgsql_monitor/_check_locations subs). ++ # To avoid any race condition between the last monitor action on the ++ # previous master and the **real** most up-to-date standby, we ++ # set each standby location during the "pre-promote" action, and stored ++ # them using the "lsn_location" resource attribute. ++ # ++ # The best standby to promote would have the highest known LSN. If the ++ # current resource is not the best one, we need to modify the master ++ # scores accordingly, and abort the current promotion. ++ ocf_log( 'debug', ++ 'pgsql_promote: checking if current node is the best candidate for promotion' ); ++ ++ # Exclude nodes that are known to be unavailable (not in the current ++ # partition) using the "crm_node" command ++ my @active_nodes = split /\s+/ => _get_priv_attr( 'nodes' ); ++ my $node_to_promote = ''; ++ my $ans; ++ my $max_tl; ++ my $max_lsn; ++ my $node_tl; ++ my $node_lsn; ++ my $wal_num; ++ my $wal_off; ++ ++ # Get the "lsn_location" attribute value for the current node, as set ++ # during the "pre-promote" action. ++ # It should be the greatest among the secondary instances. ++ $ans = _get_priv_attr( 'lsn_location' ); ++ ++ if ( $ans eq '' ) { ++ # This should not happen as the "lsn_location" attribute should have ++ # been updated during the "pre-promote" action. ++ ocf_exit_reason( 'Can not get current node LSN location' ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ chomp $ans; ++ ( $max_tl, $max_lsn ) = split /#/, $ans; ++ ++ ocf_log( 'debug', 'pgsql_promote: current node TL#LSN location: %s#%s', ++ $max_tl, $max_lsn ); ++ ++ # Now we compare with the other available nodes. ++ foreach my $node ( @active_nodes ) { ++ # We exclude the current node from the check. ++ next if $node eq $nodename; ++ ++ # Get the "lsn_location" attribute value for the node, as set during ++ # the "pre-promote" action. ++ # This is implemented as a loop as private attributes are asynchronously ++ # available from other nodes. ++ # see: https://github.com/ClusterLabs/PAF/issues/131 ++ # NOTE: if a node did not set its lsn_location for some reason, this will end ++ # with a timeout and the whole promotion will start again. ++ WAIT_FOR_LSN: { ++ $ans = _get_priv_attr( 'lsn_location', $node ); ++ if ( $ans eq '' ) { ++ ocf_log( 'info', 'pgsql_promote: waiting for LSN from %s', $node ); ++ select( undef, undef, undef, 0.1 ); ++ redo WAIT_FOR_LSN; ++ } ++ } ++ ++ chomp $ans; ++ ( $node_tl, $node_lsn ) = split /#/, $ans; ++ ++ ocf_log( 'debug', ++ 'pgsql_promote: comparing with "%s": TL#LSN is %s#%s', ++ $node, $node_tl, $node_lsn ); ++ ++ # If the node has a higher LSN, select it as a best candidate to ++ # promotion and keep looping to check the TL/LSN of other nodes. ++ if ( $node_tl > $max_tl ++ or ( $node_tl == $max_tl and $node_lsn > $max_lsn ) ++ ) { ++ ocf_log( 'debug', ++ 'pgsql_promote: "%s" is a better candidate to promote (%s#%s > %s#%s)', ++ $node, $node_tl, $node_lsn, $max_tl, $max_lsn ); ++ $node_to_promote = $node; ++ $max_tl = $node_tl; ++ $max_lsn = $node_lsn; ++ } ++ } ++ ++ # If any node has been selected, we adapt the master scores accordingly ++ # and break the current promotion. ++ if ( $node_to_promote ne '' ) { ++ ocf_exit_reason( ++ '%s is the best candidate to promote, aborting current promotion', ++ $node_to_promote ); ++ ++ # Reset current node master score. ++ _set_master_score( '1' ); ++ ++ # Set promotion candidate master score. ++ _set_master_score( '1000', $node_to_promote ); ++ ++ # We fail the promotion to trigger another promotion transition ++ # with the new scores. ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # Else, we will keep on promoting the current node. ++ } ++ ++ unless ( ++ # Promote the instance on the current node. ++ _runas( $PGCTL, '--pgdata', $pgdata, '-w', 'promote' ) == 0 ) ++ { ++ ocf_exit_reason( 'Error during promotion command' ); ++ return $OCF_ERR_GENERIC; ++ } ++ ++ # The instance promotion is asynchronous, so we need to wait for this ++ # process to complete. ++ while ( pgsql_monitor() != $OCF_RUNNING_MASTER ) { ++ ocf_log( 'info', 'Waiting for the promote to complete' ); ++ sleep 1; ++ } ++ ++ ocf_log( 'info', 'Promote complete' ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This action is called **before** the actual promotion when a failing master is ++# considered unreclaimable, recoverable or a new master must be promoted ++# (switchover or first start). ++# As every "notify" action, it is executed almost simultaneously on all ++# available nodes. ++sub pgsql_notify_pre_promote { ++ my $rc; ++ my $node_tl; ++ my $node_lsn; ++ my %cdata; ++ my %active_nodes; ++ my $attr_nodes; ++ ++ ocf_log( 'info', 'Promoting instance on node "%s"', ++ $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ); ++ ++ # No need to do an election between slaves if this is recovery of the master ++ if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { ++ ocf_log( 'warning', 'This is a master recovery!' ); ++ ++ _set_priv_attr( 'recover_master', '1' ) ++ if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename; ++ ++ return $OCF_SUCCESS; ++ } ++ ++ # Environment cleanup! ++ _delete_priv_attr( 'lsn_location' ); ++ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'nodes' ); ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ # check for the last received entry of WAL from the master if we are ++ # the designated slave to promote ++ if ( _is_switchover( $nodename ) and scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'promote'} } ++ ) { ++ $rc = _check_switchover(); ++ ++ unless ( $rc == 0 ) { ++ # Shortcut the election process as the switchover will be ++ # canceled ++ _set_priv_attr( 'cancel_switchover', $rc ); ++ return $OCF_SUCCESS; # return code is ignored during notify ++ } ++ ++ # If the sub keeps going, that means the switchover is safe. ++ # Keep going with the election process in case the switchover was ++ # instruct to the wrong node. ++ # FIXME: should we allow a switchover to a lagging slave? ++ } ++ ++ # We need to trigger an election between existing slaves to promote the best ++ # one based on its current LSN location. Each node set a private attribute ++ # "lsn_location" with its TL and LSN location. ++ # ++ # During the following promote action, The designated standby for ++ # promotion use these attributes to check if the instance to be promoted ++ # is the best one, so we can avoid a race condition between the last ++ # successful monitor on the previous master and the current promotion. ++ ++ # As we can not break the transition from a notification action, we check ++ # during the promotion if each node TL and LSN are valid. ++ ++ # Force a checpoint to make sure the controldata shows the very last TL ++ _query( q{ CHECKPOINT }, {} ); ++ %cdata = _get_controldata(); ++ $node_lsn = _get_last_received_lsn( 'in decimal' ); ++ ++ unless ( defined $node_lsn ) { ++ ocf_log( 'warning', 'Unknown current node LSN' ); ++ # Return code are ignored during notifications... ++ return $OCF_SUCCESS; ++ } ++ ++ $node_lsn = "$cdata{'tl'}#$node_lsn"; ++ ++ ocf_log( 'info', 'Current node TL#LSN: %s', $node_lsn ); ++ ++ # Set the "lsn_location" attribute value for this node so we can use it ++ # during the following "promote" action. ++ _set_priv_attr( 'lsn_location', $node_lsn ); ++ ++ ocf_log( 'warning', 'Could not set the current node LSN' ) ++ if $? != 0 ; ++ ++ # If this node is the future master, keep track of the slaves that ++ # received the same notification to compare our LSN with them during ++ # promotion ++ if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) { ++ # Build the list of active nodes: ++ # master + slave + start - stop ++ # FIXME: Deal with rsc started during the same transaction but **after** ++ # the promotion ? ++ $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} }, ++ @{ $OCF_NOTIFY_ENV{'start'} }; ++ $active_nodes{ $_->{'uname'} }-- foreach @{ $OCF_NOTIFY_ENV{'stop'} }; ++ ++ $attr_nodes = join " " ++ => grep { $active_nodes{$_} > 0 } keys %active_nodes; ++ ++ _set_priv_attr( 'nodes', $attr_nodes ); ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# This action is called after a promote action. ++sub pgsql_notify_post_promote { ++ ++ # We have a new master (or the previous one recovered). ++ # Environment cleanup! ++ _delete_priv_attr( 'lsn_location' ); ++ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'nodes' ); ++ _delete_priv_attr( 'cancel_switchover' ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This is called before a demote occurs. ++sub pgsql_notify_pre_demote { ++ my $rc; ++ my %cdata; ++ ++ # do nothing if the local node will not be demoted ++ return $OCF_SUCCESS unless scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'demote'} }; ++ ++ $rc = pgsql_monitor(); ++ ++ # do nothing if this is not a master recovery ++ return $OCF_SUCCESS unless _is_master_recover( $nodename ) ++ and $rc == $OCF_FAILED_MASTER; ++ ++ # in case of master crash, we need to detect if the CRM tries to recover ++ # the master clone. The usual transition is to do: ++ # demote->stop->start->promote ++ # ++ # There are multiple flaws with this transition: ++ # * the 1st and 2nd actions will fail because the instance is in ++ # OCF_FAILED_MASTER step ++ # * the usual start action is dangerous as the instance will start with ++ # a recovery.conf instead of entering a normal recovery process ++ # ++ # To avoid this, we try to start the instance in recovery from here. ++ # If it success, at least it will be demoted correctly with a normal ++ # status. If it fails, it will be catched up in next steps. ++ ++ ocf_log( 'info', 'Trying to start failing master "%s"...', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Either the instance managed to start or it couldn't. ++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't ++ # start, this error will be catched up later during the various checks ++ _pg_ctl_start(); ++ ++ %cdata = _get_controldata(); ++ ++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# This is called before a stop occurs. ++sub pgsql_notify_pre_stop { ++ my $rc; ++ my %cdata; ++ ++ # do nothing if the local node will not be stopped ++ return $OCF_SUCCESS unless scalar ++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'stop'} }; ++ ++ $rc = _controldata_to_ocf(); ++ ++ # do nothing if this is not a slave recovery ++ return $OCF_SUCCESS unless _is_slave_recover( $nodename ) ++ and $rc == $OCF_RUNNING_SLAVE; ++ ++ # in case of slave crash, we need to detect if the CRM tries to recover ++ # the slaveclone. The usual transition is to do: stop->start ++ # ++ # This transition can no twork because the instance is in ++ # OCF_ERR_GENERIC step. So the stop action will fail, leading most ++ # probably to fencing action. ++ # ++ # To avoid this, we try to start the instance in recovery from here. ++ # If it success, at least it will be stopped correctly with a normal ++ # status. If it fails, it will be catched up in next steps. ++ ++ ocf_log( 'info', 'Trying to start failing slave "%s"...', ++ $OCF_RESOURCE_INSTANCE ); ++ ++ # Either the instance managed to start or it couldn't. ++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't ++ # start, this error will be catched up later during the various checks ++ _pg_ctl_start(); ++ ++ %cdata = _get_controldata(); ++ ++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); ++ ++ return $OCF_SUCCESS; ++} ++ ++# Notify type actions, called on all available nodes before (pre) and after ++# (post) other actions, like promote, start, ... ++# ++sub pgsql_notify { ++ my $type_op; ++ ++ ocf_log( 'debug', "pgsql_notify: environment variables: %s", ++ Data::Dumper->new( [ \%OCF_NOTIFY_ENV ] )->Sortkeys(1)->Terse(1)->Dump ); ++ ++ return unless %OCF_NOTIFY_ENV; ++ ++ $type_op = "$OCF_NOTIFY_ENV{'type'}-$OCF_NOTIFY_ENV{'operation'}"; ++ ++ for ( $type_op ) { ++ if ( /^pre-promote$/ ) { return pgsql_notify_pre_promote() } ++ elsif ( /^post-promote$/ ) { return pgsql_notify_post_promote() } ++ elsif ( /^pre-demote$/ ) { return pgsql_notify_pre_demote() } ++ elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() } ++ } ++ ++ return $OCF_SUCCESS; ++} ++ ++# Action used to allow for online modification of resource parameters value. ++# ++sub pgsql_reload { ++ ++ # No action necessary, the action declaration is enough to inform pacemaker ++ # that the modification of any non-unique parameter can be applied without ++ # having to restart the resource. ++ ocf_log( 'info', 'Instance "%s" reloaded', $OCF_RESOURCE_INSTANCE ); ++ return $OCF_SUCCESS; ++ ++} ++ ++############################################################ ++#### MAIN ++ ++exit ocf_meta_data() if $__OCF_ACTION eq 'meta-data'; ++exit ocf_methods() if $__OCF_ACTION eq 'methods'; ++ ++# Avoid "could not change directory" when executing commands as "system-user". ++chdir File::Spec->tmpdir(); ++ ++# mandatory sanity checks ++# check pgdata ++if ( ! -d $pgdata ) { ++ ocf_exit_reason( 'PGDATA "%s" does not exist', $pgdata ); ++ exit $OCF_ERR_ARGS; ++} ++ ++# check datadir ++if ( ! -d $datadir ) { ++ ocf_exit_reason( 'data_directory "%s" does not exist', $datadir ); ++ exit $OCF_ERR_ARGS; ++} ++ ++# Set PostgreSQL version ++$PGVERNUM = _get_pg_version(); ++ ++# Set current node name. ++$nodename = ocf_local_nodename(); ++ ++$exit_code = pgsql_validate_all(); ++ ++exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all'; ++ ++# Run action ++for ( $__OCF_ACTION ) { ++ if ( /^start$/ ) { $exit_code = pgsql_start() } ++ elsif ( /^stop$/ ) { $exit_code = pgsql_stop() } ++ elsif ( /^monitor$/ ) { $exit_code = pgsql_monitor() } ++ elsif ( /^promote$/ ) { $exit_code = pgsql_promote() } ++ elsif ( /^demote$/ ) { $exit_code = pgsql_demote() } ++ elsif ( /^notify$/ ) { $exit_code = pgsql_notify() } ++ elsif ( /^reload$/ ) { $exit_code = pgsql_reload() } ++ else { $exit_code = $OCF_ERR_UNIMPLEMENTED } ++} ++ ++exit $exit_code; ++ ++ ++=head1 EXAMPLE CRM SHELL ++ ++The following is an example configuration for a pgsqlms resource using the ++crm(8) shell: ++ ++ primitive pgsqld pgsqlms \ ++ params pgdata="/var/lib/postgresql/9.6/main" \ ++ bindir="/usr/lib/postgresql/9.6/bin" \ ++ pghost="/var/run/postgresql" \ ++ recovery_template="/etc/postgresql/9.6/main/recovery.conf.pcmk" \ ++ start_opts="-c config_file=/etc/postgresql/9.6/main/postgresql.conf" \ ++ op start timeout=60s \ ++ op stop timeout=60s \ ++ op promote timeout=30s \ ++ op demote timeout=120s \ ++ op monitor interval=15s timeout=10s role="Master" \ ++ op monitor interval=16s timeout=10s role="Slave" \ ++ op notify timeout=60s ++ ++ ms pgsql-ha pgsqld meta notify=true ++ ++ ++=head1 EXAMPLE PCS ++ ++The following is an example configuration for a pgsqlms resource using pcs(8): ++ ++ pcs resource create pgsqld ocf:heartbeat:pgsqlms \ ++ bindir=/usr/pgsql-9.6/bin pgdata=/var/lib/pgsql/9.6/data \ ++ op start timeout=60s \ ++ op stop timeout=60s \ ++ op promote timeout=30s \ ++ op demote timeout=120s \ ++ op monitor interval=15s timeout=10s role="Master" \ ++ op monitor interval=16s timeout=10s role="Slave" \ ++ op notify timeout=60s --master notify=true ++ ++=head1 SEE ALSO ++ ++http://clusterlabs.org/ ++ ++=head1 AUTHOR ++ ++Jehan-Guillaume de Rorthais and Mael Rimbault. ++ ++=cut +diff --color -uNr a/paf_LICENSE b/paf_LICENSE +--- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100 ++++ b/paf_LICENSE 2021-04-14 09:16:39.083555835 +0200 +@@ -0,0 +1,19 @@ ++Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault. ++ ++Permission to use, copy, modify, and distribute this software and its ++documentation for any purpose, without fee, and without a written agreement ++is hereby granted, provided that the above copyright notice and this ++paragraph and the following two paragraphs appear in all copies. ++ ++IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR ++DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING ++LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS ++DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE ++POSSIBILITY OF SUCH DAMAGE. ++ ++THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES, ++INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY ++AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ++ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO ++PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ++ +diff --color -uNr a/paf_README.md b/paf_README.md +--- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100 ++++ b/paf_README.md 2021-04-14 09:18:57.450968048 +0200 +@@ -0,0 +1,86 @@ ++# PostgreSQL Automatic Failover ++ ++High-Availibility for Postgres, based on industry references Pacemaker and ++Corosync. ++ ++## Description ++ ++Pacemaker is nowadays the industry reference for High Availability. In the same ++fashion than for Systemd, all Linux distributions moved (or are moving) to this ++unique Pacemaker+Corosync stack, removing all other existing high availability ++stacks (CMAN, RGManager, OpenAIS, ...). It is able to detect failure on various ++services and automatically decide to failover the failing resource to another ++node when possible. ++ ++To be able to manage a specific service resource, Pacemaker interact with it ++through a so-called "Resource Agent". Resource agents must comply to the OCF ++specification which define what they must implement (start, stop, promote, ++etc), how they should behave and inform Pacemaker of their results. ++ ++PostgreSQL Automatic Failover is a new OCF resource Agent dedicated to ++PostgreSQL. Its original wish is to keep a clear limit between the Pacemaker ++administration and the PostgreSQL one, to keep things simple, documented and ++yet powerful. ++ ++Once your PostgreSQL cluster built using internal streaming replication, PAF is ++able to expose to Pacemaker what is the current status of the PostgreSQL ++instance on each node: master, slave, stopped, catching up, etc. Should a ++failure occurs on the master, Pacemaker will try to recover it by default. ++Should the failure be non-recoverable, PAF allows the slaves to be able to ++elect the best of them (the closest one to the old master) and promote it as ++the new master. All of this thanks to the robust, feature-full and most ++importantly experienced project: Pacemaker. ++ ++For information about how to install this agent, see `INSTALL.md`. ++ ++## Setup and requirements ++ ++PAF supports PostgreSQL 9.3 and higher. It has been extensively tested under ++CentOS 6 and 7 in various scenario. ++ ++PAF has been written to give to the administrator the maximum control ++over their PostgreSQL configuration and architecture. Thus, you are 100% ++responsible for the master/slave creations and their setup. The agent ++will NOT edit your setup. It only requires you to follow these pre-requisites: ++ ++ * slave __must__ be in hot_standby (accept read-only connections) ; ++ * the following parameters __must__ be configured in the appropriate place : ++ * `standby_mode = on` (for PostgreSQL 11 and before) ++ * `recovery_target_timeline = 'latest'` ++ * `primary_conninfo` wih `application_name` set to the node name as seen ++ in Pacemaker. ++ * these last parameters has been merged inside the instance configuration ++ file with PostgreSQL 12. For PostgreSQL 11 and before, you __must__ ++ provide a `recovery.conf` template file. ++ ++When setting up the resource in Pacemaker, here are the available parameters you ++can set: ++ ++ * `bindir`: location of the PostgreSQL binaries (default: `/usr/bin`) ++ * `pgdata`: location of the PGDATA of your instance (default: ++ `/var/lib/pgsql/data`) ++ * `datadir`: path to the directory set in `data_directory` from your ++ postgresql.conf file. This parameter has same default than PostgreSQL ++ itself: the `pgdata` parameter value. Unless you have a special PostgreSQL ++ setup and you understand this parameter, __ignore it__ ++ * `pghost`: the socket directory or IP address to use to connect to the ++ local instance (default: `/tmp` or `/var/run/postgresql` for DEBIAN) ++ * `pgport`: the port to connect to the local instance (default: `5432`) ++ * `recovery_template`: __only__ for PostgreSQL 11 and before. The local ++ template that will be copied as the `PGDATA/recovery.conf` file. This ++ file must not exist on any node for PostgreSQL 12 and after. ++ (default: `$PGDATA/recovery.conf.pcmk`) ++ * `start_opts`: Additional arguments given to the postgres process on startup. ++ See "postgres --help" for available options. Useful when the postgresql.conf ++ file is not in the data directory (PGDATA), eg.: ++ `-c config_file=/etc/postgresql/9.3/main/postgresql.conf` ++ * `system_user`: the system owner of your instance's process (default: ++ `postgres`) ++ * `maxlag`: maximum lag allowed on a standby before we set a negative master ++ score on it. The calculation is based on the difference between the current ++ xlog location on the master and the write location on the standby. ++ (default: 0, which disables this feature) ++ ++For a demonstration about how to setup a cluster, see ++[http://clusterlabs.github.io/PAF/documentation.html](http://clusterlabs.github.io/PAF/documentation.html). ++ diff --git a/SOURCES/bz1872999-aws-vpc-move-ip-add-region-parameter.patch b/SOURCES/bz1872999-aws-vpc-move-ip-add-region-parameter.patch new file mode 100644 index 0000000..4fef3d5 --- /dev/null +++ b/SOURCES/bz1872999-aws-vpc-move-ip-add-region-parameter.patch @@ -0,0 +1,81 @@ +--- ClusterLabs-resource-agents-e711383f/heartbeat/aws-vpc-move-ip 2020-09-23 11:57:38.855067216 +0200 ++++ aws-vpc-move-ip.tmp 2020-09-23 11:57:17.993045991 +0200 +@@ -37,13 +37,17 @@ + # Defaults + OCF_RESKEY_awscli_default="/usr/bin/aws" + OCF_RESKEY_profile_default="default" ++OCF_RESKEY_region_default="" + OCF_RESKEY_routing_table_role_default="" + OCF_RESKEY_monapi_default="false" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} ++: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} + : ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}} + : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} ++ ++[ -n "$OCF_RESKEY_region" ] && region_opt="--region $OCF_RESKEY_region" + ####################################################################### + + +@@ -87,6 +91,14 @@ + + + ++ ++ ++Valid AWS region name (e.g., 'us-west-2') ++ ++region name ++ ++ ++ + + + VPC private IP address +@@ -151,7 +163,7 @@ + execute_cmd_as_role(){ + cmd=$1 + role=$2 +- output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile --output=text)" ++ output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile $region_opt --output=text)" + export AWS_ACCESS_KEY_ID="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $5}')" + export AWS_SECRET_ACCESS_KEY="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $7}')" + export AWS_SESSION_TOKEN="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $8}')" +@@ -198,11 +210,11 @@ + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + ocf_log info "monitor: check routing table (API call) - $rtb" + if [[ -z "${OCF_RESKEY_routing_table_role}" ]]; then +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE="$($cmd)" + else +- cmd="$OCF_RESKEY_awscli --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" + ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +@@ -283,11 +295,11 @@ + + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + if [[ -z "${OCF_RESKEY_routing_table_role}" ]]; then +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + ocf_log debug "executing command: $cmd" + $cmd + else +- cmd="$OCF_RESKEY_awscli --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" ++ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + update_response="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + rc=$? +@@ -397,7 +409,7 @@ + ec2ip_monitor;; + validate-all) + exit $?;; +- *) ++ *) + echo $USAGE + exit $OCF_ERR_UNIMPLEMENTED + ;; diff --git a/SOURCES/bz1881114-galera-recover-joining-non-existing-cluster.patch b/SOURCES/bz1881114-galera-recover-joining-non-existing-cluster.patch new file mode 100644 index 0000000..17b823a --- /dev/null +++ b/SOURCES/bz1881114-galera-recover-joining-non-existing-cluster.patch @@ -0,0 +1,51 @@ +From 028bd6aab181104fe68166c8ec9c0485e12f9376 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Fri, 18 Sep 2020 18:34:22 +0200 +Subject: [PATCH] galera: recover from joining a non existing cluster + +galera being a M/S resource, the resource agent decides +when and how to promote a resource based on the current +state of the galera cluster. If there's no cluster, +a resource is promoted as the bootstrap node. Otherwise +it is promoted as a joiner node. + +There can be some time between the moment when a node is +promoted and when the promote operation effectively +takes place. So if a node is promoted for joining a cluster, +all the running galera nodes are stopped before the promote +operation start, the joining node won't be able to join the +cluster, and it can't bootstrap a new one either because it +doesn't have the most recent copy of the DB. + +In that case, do not make the promotion fail, and force +a demotion instead. This ensures that a normal bootstrap +election will take place eventually, without blocking +the joining node due to a failed promotion. +--- + heartbeat/galera | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 74f11d8c5..d2f4faa86 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -727,9 +727,16 @@ galera_promote() + ocf_log info "Node <${NODENAME}> is bootstrapping the cluster" + extra_opts="--wsrep-cluster-address=gcomm://" + else +- ocf_exit_reason "Failure, Attempted to promote Master instance of $OCF_RESOURCE_INSTANCE before bootstrap node has been detected." +- clear_last_commit +- return $OCF_ERR_GENERIC ++ # We are being promoted without having the bootstrap ++ # attribute in the CIB, which means we are supposed to ++ # join a cluster; however if we end up here, there is no ++ # Master remaining right now, which means there is no ++ # cluster to join anymore. So force a demotion, and and ++ # let the RA decide later which node should be the next ++ # bootstrap node. ++ ocf_log warn "There is no running cluster to join, demoting ourself" ++ clear_master_score ++ return $OCF_SUCCESS + fi + fi + diff --git a/SOURCES/bz1886262-podman-recover-from-killed-conmon.patch b/SOURCES/bz1886262-podman-recover-from-killed-conmon.patch new file mode 100644 index 0000000..3fa5934 --- /dev/null +++ b/SOURCES/bz1886262-podman-recover-from-killed-conmon.patch @@ -0,0 +1,63 @@ +From 3aa0dda4e0c2a3b801d65aeacc4fdfd713a604f2 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Tue, 27 Oct 2020 18:01:36 +0100 +Subject: [PATCH] podman: recover from killed conmon side process + +When podman containers are created by the resource-agent, the podman +runtime spawns a side process (conmon) to monitor the container and +record the exit status. + +If the conmon process dies unexpectedly (e.g. kill -9), the podman +container can still be stopped, even if the cli returns a generic +error. + +Try to distinguish this specific failure condition and make the stop +operation resilient; when it happens, just log a warning and finish +the usual stop actions. +--- + heartbeat/podman | 18 +++++++++++++++--- + 1 file changed, 15 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 81b00ee6f..9f8c2a091 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -419,6 +419,7 @@ podman_start() + podman_stop() + { + local timeout=60 ++ local rc + podman_simple_status + if [ $? -eq $OCF_NOT_RUNNING ]; then + remove_container +@@ -434,16 +435,27 @@ podman_stop() + + if ocf_is_true "$OCF_RESKEY_force_kill"; then + ocf_run podman kill $CONTAINER ++ rc=$? + else + ocf_log debug "waiting $timeout second[s] before killing container" + ocf_run podman stop -t=$timeout $CONTAINER ++ rc=$? + # on stop, systemd will automatically delete any transient + # drop-in conf that has been created earlier + fi + +- if [ $? -ne 0 ]; then +- ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." +- return $OCF_ERR_GENERIC ++ if [ $rc -ne 0 ]; then ++ # If the stop failed, it could be because the controlling conmon ++ # process died unexpectedly. If so, a generic error code is returned ++ # but the associated container exit code is -1. If that's the case, ++ # assume there's no failure and continue with the rm as usual. ++ if [ $rc -eq 125 ] && \ ++ podman inspect --format '{{.State.Status}}:{{.State.ExitCode}}' $CONTAINER | grep -wq "stopped:-1"; then ++ ocf_log warn "Container ${CONTAINER} had an unexpected stop outcome. Trying to remove it anyway." ++ else ++ ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}." ++ return $OCF_ERR_GENERIC ++ fi + fi + + remove_container diff --git a/SOURCES/bz1890068-gcp-pd-move-fix-partially-matched-disk_name.patch b/SOURCES/bz1890068-gcp-pd-move-fix-partially-matched-disk_name.patch new file mode 100644 index 0000000..83aef93 --- /dev/null +++ b/SOURCES/bz1890068-gcp-pd-move-fix-partially-matched-disk_name.patch @@ -0,0 +1,58 @@ +From 2927279ba1677e9dda202121176a8245a7ef76ca Mon Sep 17 00:00:00 2001 +From: tositaka77 <45960626+tositaka77@users.noreply.github.com> +Date: Wed, 14 Oct 2020 22:22:56 +0900 +Subject: [PATCH] fixes and improvements + +- Fixed "regional" PD functionality in attach_disk() +- Improve to exact match disk_name with disks.source in detach_disk() +--- + heartbeat/gcp-pd-move.in | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index f82bd25e5..e99cc71f8 100644 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -49,6 +49,7 @@ else: + CONN = None + PROJECT = None + ZONE = None ++REGION = None + LIST_DISK_ATTACHED_INSTANCES = None + INSTANCE_NAME = None + +@@ -148,6 +149,7 @@ def populate_vars(): + global INSTANCE_NAME + global PROJECT + global ZONE ++ global REGION + global LIST_DISK_ATTACHED_INSTANCES + + # Populate global vars +@@ -175,6 +177,7 @@ def populate_vars(): + PROJECT = get_metadata('project/project-id') + if PARAMETERS['disk_scope'] in ['detect', 'regional']: + ZONE = get_metadata('instance/zone').split('/')[-1] ++ REGION = ZONE[:-2] + else: + ZONE = PARAMETERS['disk_scope'] + LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( +@@ -255,7 +258,7 @@ def detach_disk(instance, disk_name): + + device_name = None + for disk in response['disks']: +- if disk_name in disk['source']: ++ if disk_name == re.sub('.*disks/',"",disk['source']): + device_name = disk['deviceName'] + break + +@@ -273,6 +276,9 @@ def detach_disk(instance, disk_name): + + def attach_disk(instance, disk_name): + location = 'zones/%s' % ZONE ++ if PARAMETERS['disk_scope'] == 'regional': ++ location = 'regions/%s' % REGION ++ + prefix = 'https://www.googleapis.com/compute/v1' + body = { + 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { diff --git a/SOURCES/bz1891835-galera-set-bootstrap-attribute-before-promote.patch b/SOURCES/bz1891835-galera-set-bootstrap-attribute-before-promote.patch new file mode 100644 index 0000000..7dfdc48 --- /dev/null +++ b/SOURCES/bz1891835-galera-set-bootstrap-attribute-before-promote.patch @@ -0,0 +1,36 @@ +From ac213f158ff851422d78ae8f56b022e8e30751bc Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Mon, 26 Oct 2020 14:54:05 +0100 +Subject: [PATCH] galera: set bootstrap attribute before promote + +When the master detection takes place, the node chosen for +becoming the master is given two attributes in the CIB: +a master score and a bootstrap flag. The master score makes +pacemaker schedule a promote operation, and the bootstrap flag +drives how the galera server is started. + +The order in which we set the attributes is racy; it may happen +that a promote operation is started before the current master +detection function has set the bootstrap flag, in which case +the promotion will fail. + +Reverse the order in which we set the attributes on a bootstrap +node to close the race. +--- + heartbeat/galera | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index d2f4faa86..b4d7e187d 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -587,8 +587,8 @@ detect_first_master() + fi + + ocf_log info "Promoting $best_node to be our bootstrap node" +- set_master_score $best_node + set_bootstrap_node $best_node ++ set_master_score $best_node + } + + detect_safe_to_bootstrap() diff --git a/SOURCES/bz1891855-galera-recover-2-node-cluster.patch b/SOURCES/bz1891855-galera-recover-2-node-cluster.patch new file mode 100644 index 0000000..22ab885 --- /dev/null +++ b/SOURCES/bz1891855-galera-recover-2-node-cluster.patch @@ -0,0 +1,80 @@ +--- a/heartbeat/galera 2020-10-28 16:28:48.125700714 +0100 ++++ b/heartbeat/galera 2020-10-28 16:31:14.932820752 +0100 +@@ -81,6 +81,11 @@ + . /etc/default/clustercheck + fi + ++# Parameter defaults ++ ++OCF_RESKEY_two_node_mode_default="false" ++: ${OCF_RESKEY_two_node_mode=${OCF_RESKEY_two_node_mode_default}} ++ + ####################################################################### + + usage() { +@@ -249,6 +254,16 @@ + + + ++ ++ ++If running in a 2-node pacemaker cluster, rely on pacemaker quorum ++to allow automatic recovery even when the other node is unreachable. ++Use it with caution! (and fencing) ++ ++Special recovery when running on a 2-node cluster ++ ++ ++ + + + +@@ -400,6 +415,27 @@ + return 0 + } + ++is_two_node_mode_active() ++{ ++ # crm_node or corosync-quorumtool cannot access various corosync ++ # flags when running inside a bundle, so only count the cluster ++ # members ++ ocf_is_true "$OCF_RESKEY_two_node_mode" && ${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath "count(//nodes/node[@type='member'])" - | grep -q -w 2 ++} ++ ++is_last_node_in_quorate_partition() ++{ ++ # when a network split occurs in a 2-node cluster, pacemaker ++ # fences the other node and try to retain quorum. So until ++ # the fencing is resolved (and the status of the peer node ++ # is clean), we shouldn't consider ourself quorate. ++ local partition_members=$(${HA_SBIN_DIR}/crm_node -p | wc -w) ++ local quorate=$(${HA_SBIN_DIR}/crm_node -q) ++ local clean_members=$(${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath 'count(//nodes/node[@type="member" and @unclean="false"])' -) ++ ++ [ "$partition_members" = 1 ] && [ "$quorate" = 1 ] && [ "$clean_members" = 2 ] ++} ++ + master_exists() + { + if [ "$__OCF_ACTION" = "demote" ]; then +@@ -518,8 +554,20 @@ + done + + for node in $nodes_recovered $nodes; do ++ # On clean shutdown, galera sets the last stopped node as 'safe to bootstrap', ++ # so use this hint when we can + safe_to_bootstrap=$(get_safe_to_bootstrap $node) + ++ # Special case for 2-node clusters: during a network split, rely on ++ # pacemaker's quorum to check whether we can restart galera ++ if [ "$safe_to_bootstrap" != "1" ] && [ "$node" = "$NODENAME" ] && is_two_node_mode_active; then ++ is_last_node_in_quorate_partition ++ if [ $? -eq 0 ]; then ++ ocf_log warn "Survived a split in a 2-node cluster, considering ourselves safe to bootstrap" ++ safe_to_bootstrap=1 ++ fi ++ fi ++ + if [ "$safe_to_bootstrap" = "1" ]; then + # Galera marked the node as safe to boostrap during shutdown. Let's just + # pick it as our bootstrap node. diff --git a/SOURCES/bz1891883-ethmonitor-vlan-fix.patch b/SOURCES/bz1891883-ethmonitor-vlan-fix.patch new file mode 100644 index 0000000..ffe74d1 --- /dev/null +++ b/SOURCES/bz1891883-ethmonitor-vlan-fix.patch @@ -0,0 +1,25 @@ +From 7f7ca75100a846242ff1510fd9bcf299cd3d00eb Mon Sep 17 00:00:00 2001 +From: Aleksei Burlakov +Date: Mon, 26 Oct 2020 13:25:45 +0100 +Subject: [PATCH] ethmonitor: is_interface: RE matches vlan names + +Vlan names end not with : but are suffixed with the @devices-name +--- + heartbeat/ethmonitor | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor +index e791fbe9d..cf0321ab4 100755 +--- a/heartbeat/ethmonitor ++++ b/heartbeat/ethmonitor +@@ -230,8 +230,8 @@ is_interface() { + # + # List interfaces but exclude FreeS/WAN ipsecN virtual interfaces + # +- local iface=`$IP2UTIL -o -f inet addr show | grep " $1 " \ +- | cut -d ' ' -f2 | sort -u | grep -v '^ipsec[0-9][0-9]*$'` ++ local iface=`$IP2UTIL -o -f link addr show | grep -e " $1[:@]" \ ++ | cut -d ' ' -f2 | tr -d ':' | cut -d '@' -f1 | sort -u | grep -v '^ipsec[0-9][0-9]*$'` + [ "$iface" != "" ] + } + diff --git a/SOURCES/bz1895811-aws-vpc-move-ip-dont-warn-for-expected-scenarios.patch b/SOURCES/bz1895811-aws-vpc-move-ip-dont-warn-for-expected-scenarios.patch new file mode 100644 index 0000000..385e034 --- /dev/null +++ b/SOURCES/bz1895811-aws-vpc-move-ip-dont-warn-for-expected-scenarios.patch @@ -0,0 +1,84 @@ +From 8d459216c9718269303b9caf227a971d73ec4df9 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 27 Aug 2020 01:34:01 -0700 +Subject: [PATCH] aws-vpc-move-ip: Don't warn for expected scenarios + +Make log levels more appropriate to the situation. Before this patch, a +normal start looked like this: + +Operation start for aws-vip (ocf:heartbeat:aws-vpc-move-ip) returned: 'ok' (0) + > stderr: Nov 09 02:38:20 INFO: EC2: Moving IP address 10.0.1.65 to this host by adjusting routing table rtb-01b4ea1ae0ec0a4d9 + > stderr: Nov 09 02:38:20 INFO: monitor: check routing table (API call) - rtb-01b4ea1ae0ec0a4d9 + > stderr: Nov 09 02:38:22 WARNING: IP 10.0.1.65 not assigned to running interface + > stderr: Nov 09 02:38:22 INFO: EC2: Adjusting routing table and locally configuring IP address + > stderr: RTNETLINK answers: Cannot assign requested address + > stderr: Nov 09 02:38:24 WARNING: command failed, rc 2 + > stderr: Nov 09 02:38:24 INFO: monitor: check routing table (API call) - rtb-01b4ea1ae0ec0a4d9 + +Now it looks like this: + +Operation start for aws-vip (ocf:heartbeat:aws-vpc-move-ip) returned: 'ok' (0) + > stderr: Nov 09 02:40:43 INFO: EC2: Moving IP address 10.0.1.65 to this host by adjusting routing table rtb-01b4ea1ae0ec0a4d9 + > stderr: Nov 09 02:40:43 INFO: monitor: check routing table (API call) - rtb-01b4ea1ae0ec0a4d9 + > stderr: Nov 09 02:40:44 INFO: IP 10.0.1.65 not assigned to running interface + > stderr: Nov 09 02:40:44 INFO: EC2: Adjusting routing table and locally configuring IP address + > stderr: Nov 09 02:40:46 INFO: monitor: check routing table (API call) - rtb-01b4ea1ae0ec0a4d9 + +Under normal circumstances, the call to `ec2ip_drop()` within +`ec2ip_get_and_configure` should not be required at all. The drop +function deletes the address before the get_and_configure function +immediately re-adds the address. This call could probably be removed +altogether. Instead, I left the call and silenced its output just in +case of unexpected edge cases. + +Resolves: RHBZ#1895811 + +Signed-off-by: Reid Wahl +--- + heartbeat/aws-vpc-move-ip | 23 ++++++++++++++++++++--- + 1 file changed, 20 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index a5b28ad92..72a89ecb1 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -270,7 +270,13 @@ ec2ip_monitor() { + ocf_log debug "executing command: $cmd" + RESULT=$($cmd | grep "$OCF_RESKEY_ip") + if [ -z "$RESULT" ]; then +- ocf_log warn "IP $OCF_RESKEY_ip not assigned to running interface" ++ if [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe; then ++ level="error" ++ else ++ level="info" ++ fi ++ ++ ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to running interface" + return $OCF_NOT_RUNNING + fi + +@@ -282,11 +288,22 @@ ec2ip_monitor() { + ec2ip_drop() { + cmd="ip addr delete ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface" + ocf_log debug "executing command: $cmd" +- $cmd ++ output=$($cmd 2>&1) + rc=$? ++ + if [ "$rc" -gt 0 ]; then +- ocf_log warn "command failed, rc $rc" ++ if [ "$__OCF_ACTION" = "start" ]; then ++ # expected to fail during start ++ level="debug" ++ else ++ level="warn" ++ fi ++ ++ ocf_log "$level" "command failed, rc $rc" ++ ocf_log "$level" "output/error: $output" + return $OCF_ERR_GENERIC ++ else ++ ocf_log debug "output/error: $output" + fi + + # delete remaining route-entries if any diff --git a/SOURCES/bz1897570-aws-add-imdsv2-support.patch b/SOURCES/bz1897570-aws-add-imdsv2-support.patch new file mode 100644 index 0000000..09772cc --- /dev/null +++ b/SOURCES/bz1897570-aws-add-imdsv2-support.patch @@ -0,0 +1,97 @@ +From 8f10d0eb1e33d38ab6e89015a903620c54edd7c1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Nov 2020 16:36:20 +0100 +Subject: [PATCH] AWS agents: add support for IMDSv2 + +--- + heartbeat/aws-vpc-move-ip | 5 +++-- + heartbeat/aws-vpc-route53.in | 3 ++- + heartbeat/awseip | 9 +++++---- + heartbeat/awsvip | 7 ++++--- + 4 files changed, 14 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 72a89ecb1..cbb629b00 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -215,7 +215,8 @@ ec2ip_validate() { + return $OCF_ERR_CONFIGURED + fi + +- EC2_INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + if [ -z "${EC2_INSTANCE_ID}" ]; then + ocf_exit_reason "Instance ID not found. Is this a EC2 instance?" +@@ -329,7 +330,7 @@ ec2ip_get_instance_eni() { + fi + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + +- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" ++ cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\"" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + rc=$? +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index b06b93726..4fb17019b 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -347,7 +347,8 @@ r53_monitor() { + _get_ip() { + case $OCF_RESKEY_ip in + local|public) +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4)";; ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");; + *.*.*.*) + IPADDRESS="${OCF_RESKEY_ip}";; + esac +diff --git a/heartbeat/awseip b/heartbeat/awseip +index 445a03666..de1967774 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -149,12 +149,12 @@ awseip_start() { + awseip_monitor && return $OCF_SUCCESS + + if [ -n "${PRIVATE_IP_ADDRESS}" ]; then +- NETWORK_INTERFACES_MACS="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/)" ++ NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN") + for MAC in ${NETWORK_INTERFACES_MACS}; do +- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s | ++ curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" | + grep -q "^${PRIVATE_IP_ADDRESS}$" + if [ $? -eq 0 ]; then +- NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id)" ++ NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + fi + done + $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \ +@@ -244,7 +244,8 @@ AWSCLI="${OCF_RESKEY_awscli}" + ELASTIC_IP="${OCF_RESKEY_elastic_ip}" + ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 3eb31e6ae..8050107e8 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -206,9 +206,10 @@ esac + + AWSCLI="${OCF_RESKEY_awscli}" + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" +-MAC_ADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/mac)" +-NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") ++MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN") ++NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) diff --git a/SOURCES/bz1898690-crypt-make-key_file-crypt_type_not-unique.patch b/SOURCES/bz1898690-crypt-make-key_file-crypt_type_not-unique.patch new file mode 100644 index 0000000..8cecc16 --- /dev/null +++ b/SOURCES/bz1898690-crypt-make-key_file-crypt_type_not-unique.patch @@ -0,0 +1,31 @@ +From 16236f76d086187f6ae6202153519c1eb2fe4f87 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 24 Nov 2020 10:49:14 +0100 +Subject: [PATCH] crypt: make key_file and crypt_type parameters not unique + +--- + heartbeat/crypt | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 0e49b6c2d..7d0a5607c 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -86,7 +86,7 @@ The resulting block device path is /dev/mapper/name. + + + +- ++ + + Key file path containing the encryption passphrase + (aka key; see cryptsetup(8)). For LUKS, the passphrase as of the key_file +@@ -96,7 +96,7 @@ parameter is used to decrypt a randomly selected key when the device was created + + + +- ++ + + Encryption (device) type (e.g. "luks" or "luks2"). + diff --git a/SOURCES/bz1899551-NovaEvacuate-fix-delay_evacuate-unset.patch b/SOURCES/bz1899551-NovaEvacuate-fix-delay_evacuate-unset.patch new file mode 100644 index 0000000..7af35cc --- /dev/null +++ b/SOURCES/bz1899551-NovaEvacuate-fix-delay_evacuate-unset.patch @@ -0,0 +1,33 @@ +From 11ac2db8f55aa3e6858d6c1b2ab29ee36b612f03 Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Tue, 17 Nov 2020 15:16:29 +0100 +Subject: [PATCH] Fix delay_evacuate being unset + +In Ie2fe784202d754eda38092479b1ab3ff4d02136a we added an additional +parameter to allow for setting a delay on the evacuation. +While it was tested with a specific delay, the case with a delay +being unset was missed. +Since OCF does not set the defaults from the metadata specification +for a parameter, we need to manually set it ourselves. + +This fixes the following error: +Nov 17 13:00:21 database-1.foo.local pacemaker-execd [185805] (log_op_output) notice: nova-evacuate_monitor_10000[1038417] error output [ /usr/lib/ocf/resource.d/openstack/NovaEvacuate: line 228: [: !=: unary operator expected ] + +Change-Id: I0b7aacd67b77bc44c67fe7da4c494807abbbb4f3 +--- + +diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +index 596f520..8aa778c 100644 +--- a/heartbeat/NovaEvacuate ++++ b/heartbeat/NovaEvacuate +@@ -359,6 +359,10 @@ + fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}" + fi + ++ if [ -z "${OCF_RESKEY_evacuate_delay}" ]; then ++ OCF_RESKEY_evacuate_delay=0 ++ fi ++ + if [ $rc != $OCF_SUCCESS ]; then + exit $rc + fi diff --git a/SOURCES/bz1900015-podman-recover-from-storage-out-of-sync.patch b/SOURCES/bz1900015-podman-recover-from-storage-out-of-sync.patch new file mode 100644 index 0000000..e022612 --- /dev/null +++ b/SOURCES/bz1900015-podman-recover-from-storage-out-of-sync.patch @@ -0,0 +1,64 @@ +From 52d09b57a499ed7b3757e0e2954c2783198d5b23 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Mon, 9 Nov 2020 20:42:19 +0100 +Subject: [PATCH] podman: recover from podman's storage being out of sync + +If a system crash while podman is stopping a container (e.g. a fencing action +took place), it might happen that on reboot, podman is not able to recreate +a container as requested by the resource agent. + +When such a start operation fails, it might be because the internal storage +layer still references an old container with the same name, even though podman +itself thinks there is no such container. If so, purge the storage layer to try +to clean the corruption and try recreating the container. +--- + heartbeat/podman | 29 +++++++++++++++++++++++++++-- + 1 file changed, 27 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 81b00ee6f..d4d608ca3 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -345,6 +345,32 @@ create_transient_drop_in_dependency() + } + + ++run_new_container() ++{ ++ local opts=$1 ++ local image=$2 ++ local cmd=$3 ++ local rc ++ ++ ocf_log info "running container $CONTAINER for the first time" ++ ocf_run podman run $opts $image $cmd ++ rc=$? ++ if [ $rc -eq 125 ]; then ++ # If an internal podman error occurred, it might be because ++ # the internal storage layer still references an old container ++ # with the same name, even though podman itself thinks there ++ # is no such container. If so, purge the storage layer to try ++ # to clean the corruption and try again. ++ ocf_log warn "Internal podman error while creating new container $CONTAINER. Retrying." ++ ocf_run podman rm --storage $CONTAINER ++ ocf_run podman run $opts $image $cmd ++ rc=$? ++ fi ++ ++ return $rc ++} ++ ++ + podman_start() + { + local cid +@@ -378,8 +404,7 @@ podman_start() + # make sure any previous container matching our container name is cleaned up first. + # we already know at this point it wouldn't be running + remove_container +- ocf_log info "running container $CONTAINER for the first time" +- ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd ++ run_new_container "$run_opts" $OCF_RESKEY_image "$OCF_RESKEY_run_cmd" + fi + rc=$? + diff --git a/SOURCES/bz1901357-crypt-1-support-symlink-devices.patch b/SOURCES/bz1901357-crypt-1-support-symlink-devices.patch new file mode 100644 index 0000000..6b4f385 --- /dev/null +++ b/SOURCES/bz1901357-crypt-1-support-symlink-devices.patch @@ -0,0 +1,23 @@ +From 4ded33d34505af19ddf19bfa125b5e6c243ebd94 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 26 Nov 2020 12:56:03 +0100 +Subject: [PATCH] crypt: allow encrypted_dev to be symlink to support using + devices in /dev/disk/... or UUID + +--- + heartbeat/crypt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 7d0a5607c..3ca28b92d 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -177,7 +177,7 @@ crypt_validate_all() { + esac + esac + fi +- if [ ! -b "$encrypted_dev" ]; then ++ if [ ! -b "$encrypted_dev" ] && [ ! -L "$encrypted_dev" ]; then + ocf_exit_reason "Encrypted device $encrypted_dev not accessible" + return $OCF_ERR_ARGS + fi diff --git a/SOURCES/bz1901357-crypt-2-dont-sanity-check-during-probe.patch b/SOURCES/bz1901357-crypt-2-dont-sanity-check-during-probe.patch new file mode 100644 index 0000000..4e259d4 --- /dev/null +++ b/SOURCES/bz1901357-crypt-2-dont-sanity-check-during-probe.patch @@ -0,0 +1,44 @@ +From 6a45c28cd074e14a7bc2e2531b15595b9985965c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 1 Dec 2020 10:11:52 +0100 +Subject: [PATCH] crypt: avoid failing for LVM exclusive by not running full + sanity check during probes + +--- + heartbeat/crypt | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/crypt b/heartbeat/crypt +index 3ca28b92d..05bded7c5 100755 +--- a/heartbeat/crypt ++++ b/heartbeat/crypt +@@ -177,6 +177,13 @@ crypt_validate_all() { + esac + esac + fi ++ ++ # return early for probes where device might not be available yet ++ # e.g. LVM exclusive volumes ++ if ocf_is_probe; then ++ return $OCF_SUCCESS ++ fi ++ + if [ ! -b "$encrypted_dev" ] && [ ! -L "$encrypted_dev" ]; then + ocf_exit_reason "Encrypted device $encrypted_dev not accessible" + return $OCF_ERR_ARGS +@@ -294,11 +301,13 @@ crypt_stop() { + crypt_monitor() { + cryptsetup status $crypt_dev $disable_locks >/dev/null 2>&1 + if [ $? -eq 0 ]; then +- [ -L $crypt_dev_path ] && return $OCF_SUCCESS ++ if [ -b "$encrypted_dev" ] || [ -L $crypt_dev_path ]; then ++ return $OCF_SUCCESS ++ fi + return $OCF_ERR_GENERIC + fi + +- [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_exit_reason "Crypt resource not running" ++ [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_exit_reason "Crypt resource not running" + return $OCF_NOT_RUNNING + } + diff --git a/SOURCES/bz1902045-iface-vlan-vlan-not-unique.patch b/SOURCES/bz1902045-iface-vlan-vlan-not-unique.patch new file mode 100644 index 0000000..b8448cf --- /dev/null +++ b/SOURCES/bz1902045-iface-vlan-vlan-not-unique.patch @@ -0,0 +1,40 @@ +From 3dd051ed56418dc241417ea02e59db3982b7b92c Mon Sep 17 00:00:00 2001 +From: Oliver Freyermuth +Date: Thu, 26 Nov 2020 10:25:01 +0100 +Subject: [PATCH] heartbeat/iface-vlan: vlan_{interface,id} do not have to be + unique. + +Machines commonly have several vlan_id attached to one interface, +and may also have a vlan_id attached to several interfaces. + +vlan_name will still be unique, usual names are: +- bond_in.83@bond_in +- bond_in.84@bond_in + +fixes #1581 +--- + heartbeat/iface-vlan | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/iface-vlan b/heartbeat/iface-vlan +index cbe7e86da..d0481373c 100755 +--- a/heartbeat/iface-vlan ++++ b/heartbeat/iface-vlan +@@ -89,7 +89,7 @@ vlan_meta_data() { + + + +- ++ + + Define the interface where VLAN should be attached. + +@@ -99,7 +99,7 @@ vlan_meta_data() { + + + +- ++ + + Define the VLAN ID. It has to be a value between 0 and 4094. + diff --git a/SOURCES/bz1902208-LVM-activate-stop-before-storage-service.patch b/SOURCES/bz1902208-LVM-activate-stop-before-storage-service.patch new file mode 100644 index 0000000..1486b29 --- /dev/null +++ b/SOURCES/bz1902208-LVM-activate-stop-before-storage-service.patch @@ -0,0 +1,60 @@ +From 79fb4b2d3d862f4e83b1df72107b6322b420ea34 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Sat, 28 Nov 2020 18:10:03 -0800 +Subject: [PATCH] LVM-activate: Stop before blk-availability.service + +If storage services (e.g., iscsi-shutdown.service) stop before an +LVM-activate resource stops, the managed VG may become unavailable. Then +the LVM-activate resource may fail to deactivate the volume group and +thus fail its stop operation. + +This commit adds a systemd drop-in "After=blk-availability.service" +directive for resource-agents-deps.target during the LVM-activate start +op. blk-availability includes "After=" directives for other storage +services and thus serves as a convenient wrapper. + +blk-availability is not enabled by default, and a "Wants=" drop-in +that's created after Pacemaker starts would not be able to start +blk-availability automatically. So here we also start blk-availability +during LVM_start(). + +Resolves RHBZ#1902208 + +Signed-off-by: Reid Wahl +--- + heartbeat/LVM-activate | 22 ++++++++++++++++++++++ + 1 file changed, 22 insertions(+) + +diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate +index 94f9e5813..b8abd7579 100755 +--- a/heartbeat/LVM-activate ++++ b/heartbeat/LVM-activate +@@ -830,6 +830,28 @@ lvm_start() { + local rc + local vol + ++ if systemd_is_running ; then ++ # Create drop-in to deactivate VG before stopping ++ # storage services during shutdown/reboot. ++ after=$(systemctl show resource-agents-deps.target.d \ ++ --property=After | cut -d'=' -f2) ++ ++ case "$after" in ++ *" blk-availability.service "*) ++ ;; ++ *) ++ systemd_drop_in "99-LVM-activate" "After" \ ++ "blk-availability.service" ++ ;; ++ esac ++ ++ # If blk-availability isn't started, the "After=" ++ # directive has no effect. ++ if ! systemctl is-active blk-availability.service ; then ++ systemctl start blk-availability.service ++ fi ++ fi ++ + if lvm_status ; then + ocf_log info "${vol}: is already active." + return $OCF_SUCCESS diff --git a/SOURCES/bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch b/SOURCES/bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch new file mode 100644 index 0000000..8472065 --- /dev/null +++ b/SOURCES/bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch @@ -0,0 +1,45 @@ +From 908431d416076e3ceb70cc95871957d15265a949 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 2 Dec 2020 16:48:32 +0100 +Subject: [PATCH] ocf-shellfuncs: make ocf_is_bash4() detect Bash v4 or greater + (which it was supposed to according to the comments) + +--- + heartbeat/ocf-shellfuncs.in | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index b8d47e3d5..ac75dfc87 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -907,13 +907,18 @@ ocf_unique_rundir() + # NB: FD 9 may be used for tracing with bash >= v4 in case + # OCF_TRACE_FILE is set to a path. + # +-ocf_is_bash4() { ++ocf_bash_has_xtracefd() { + echo "$SHELL" | grep bash > /dev/null && +- [ ${BASH_VERSINFO[0]} = "4" ] ++ [ ${BASH_VERSINFO[0]} -ge 4 ] ++} ++# for backwards compatibility ++ocf_is_bash4() { ++ ocf_bash_has_xtracefd ++ return $? + } + ocf_trace_redirect_to_file() { + local dest=$1 +- if ocf_is_bash4; then ++ if ocf_bash_has_xtracefd; then + exec 9>$dest + BASH_XTRACEFD=9 + else +@@ -922,7 +927,7 @@ ocf_trace_redirect_to_file() { + } + ocf_trace_redirect_to_fd() { + local fd=$1 +- if ocf_is_bash4; then ++ if ocf_bash_has_xtracefd; then + BASH_XTRACEFD=$fd + else + exec 2>&$fd diff --git a/SOURCES/bz1913932-1-gcp-vpc-move-add-project-parameter.patch b/SOURCES/bz1913932-1-gcp-vpc-move-add-project-parameter.patch new file mode 100644 index 0000000..16cfb10 --- /dev/null +++ b/SOURCES/bz1913932-1-gcp-vpc-move-add-project-parameter.patch @@ -0,0 +1,86 @@ +From 560683500b3f9d5d8e183a569daea27422ae5268 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 7 Jan 2021 12:25:04 -0800 +Subject: [PATCH] gcp-vpc-move-route, gcp-vpc-move-vip: Parameterize project ID + +Resolves: RHBZ#1913932 +Resolves: RHBZ#1913936 + +Signed-off-by: Reid Wahl +--- + heartbeat/gcp-vpc-move-route.in | 13 ++++++++++++- + heartbeat/gcp-vpc-move-vip.in | 16 ++++++++++++++-- + 2 files changed, 26 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index d8e8ea8dd..179eba15a 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -106,6 +106,16 @@ Name of the VPC network + + + ++ ++ ++Project ID of the instance. It can be useful to set this attribute if ++the instance is in a shared service project. Otherwise, the agent should ++be able to determine the project ID automatically. ++ ++Project ID ++ ++ ++ + + + Name of the network interface +@@ -215,7 +225,8 @@ def validate(ctx): + try: + ctx.instance = get_metadata('instance/name') + ctx.zone = get_metadata('instance/zone').split('/')[-1] +- ctx.project = get_metadata('project/project-id') ++ ctx.project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + except Exception as e: + logger.error( + 'Instance information not found. Is this a GCE instance ?: %s', str(e)) +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 01d91a59d..e792f71d5 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -75,6 +75,16 @@ METADATA = \ + Host list + + ++ ++ ++ Project ID of the instance. It can be useful to set this ++ attribute if the instance is in a shared service project. ++ Otherwise, the agent should be able to determine the project ID ++ automatically. ++ ++ Project ID ++ ++ + + If enabled (set to true), IP failover logs will be posted to stackdriver logging + Stackdriver-logging support +@@ -267,7 +277,8 @@ def get_instances_list(project, exclude): + def gcp_alias_start(alias): + my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] +- project = get_metadata('project/project-id') ++ project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + + if alias in my_aliases: + # TODO: Do we need to check alias_range_name? +@@ -315,7 +326,8 @@ def gcp_alias_start(alias): + def gcp_alias_stop(alias): + my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] +- project = get_metadata('project/project-id') ++ project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + + if alias in my_aliases: + logger.info('Removing %s from %s' % (alias, THIS_VM)) diff --git a/SOURCES/bz1913932-2-gcp-vpc-move-route-fixes.patch b/SOURCES/bz1913932-2-gcp-vpc-move-route-fixes.patch new file mode 100644 index 0000000..a94f0ee --- /dev/null +++ b/SOURCES/bz1913932-2-gcp-vpc-move-route-fixes.patch @@ -0,0 +1,106 @@ +From 523c4cee64b3b8ee9f603a940d83a6628531078d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 19 Jan 2021 10:56:47 +0100 +Subject: [PATCH 1/2] gcp-vpc-move-route: fix stop-action when route stopped, + and fix check_conflicting_routes() + +--- + heartbeat/gcp-vpc-move-route.in | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 179eba15a..9fe985832 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -252,8 +252,19 @@ def validate(ctx): + def check_conflicting_routes(ctx): + fl = '(destRange = "%s*") AND (network = "%s") AND (name != "%s")' % ( + ctx.ip, ctx.vpc_network_url, ctx.route_name) +- request = ctx.conn.routes().list(project=ctx.project, filter=fl) +- response = request.execute() ++ try: ++ request = ctx.conn.routes().list(project=ctx.project, filter=fl) ++ response = request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logger.error('VPC network not found') ++ if 'stop' in sys.argv[1]: ++ sys.exit(OCF_SUCCESS) ++ else: ++ sys.exit(OCF_ERR_CONFIGURED) ++ else: ++ raise ++ + route_list = response.get('items', None) + if route_list: + logger.error( +@@ -353,16 +364,16 @@ def route_monitor(ctx): + logger.info('GCP route monitor: checking route table') + + # Ensure that there is no route that we are not aware of that is also handling our IP +- check_conflicting_routes ++ check_conflicting_routes(ctx) + + try: + request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) + response = request.execute() + except googleapiclient.errors.HttpError as e: +- if 'Insufficient Permission' in e.content: +- return OCF_ERR_PERM +- elif e.resp.status == 404: ++ if e.resp.status == 404: + return OCF_NOT_RUNNING ++ elif 'Insufficient Permission' in e.content: ++ return OCF_ERR_PERM + else: + raise + + +From 50dbfc3230e87b8d29163c235e6866d15fd6fc1b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 19 Jan 2021 11:50:22 +0100 +Subject: [PATCH 2/2] gcp-vpc-move-vip: correctly return error when no + instances are returned + +--- + heartbeat/gcp-vpc-move-vip.in | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index e792f71d5..bbbd87b7a 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -263,8 +263,14 @@ def get_instances_list(project, exclude): + hostlist = [] + request = CONN.instances().aggregatedList(project=project) + while request is not None: +- response = request.execute() +- zones = response.get('items', {}) ++ try: ++ response = request.execute() ++ zones = response.get('items', {}) ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logger.debug('get_instances_list(): no instances found') ++ return '' ++ + for zone in zones.values(): + for inst in zone.get('instances', []): + if inst['name'] != exclude: +@@ -303,9 +309,13 @@ def gcp_alias_start(alias): + break + + # Add alias IP range to localhost +- add_alias( +- project, my_zone, THIS_VM, alias, +- os.environ.get('OCF_RESKEY_alias_range_name')) ++ try: ++ add_alias( ++ project, my_zone, THIS_VM, alias, ++ os.environ.get('OCF_RESKEY_alias_range_name')) ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ sys.exit(OCF_ERR_CONFIGURED) + + # Verify that the IP range has been added + my_aliases = get_localhost_aliases() diff --git a/SOURCES/bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch b/SOURCES/bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch new file mode 100644 index 0000000..5def90a --- /dev/null +++ b/SOURCES/bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch @@ -0,0 +1,22 @@ +From 4812c67894063f8125a3915d32da168931f088c6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 25 Feb 2021 16:49:55 +0100 +Subject: [PATCH] gcp-vpc-move-route: make "vpc_network" optional + +--- + heartbeat/gcp-vpc-move-route.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 9fe985832..fd2d2ec59 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -98,7 +98,7 @@ subnet ranges + + + +- ++ + + Name of the VPC network + diff --git a/SOURCES/bz1920698-podman-return-not-running-probe.patch b/SOURCES/bz1920698-podman-return-not-running-probe.patch new file mode 100644 index 0000000..b8420f5 --- /dev/null +++ b/SOURCES/bz1920698-podman-return-not-running-probe.patch @@ -0,0 +1,42 @@ +From 6877b20a83cb691884996bf77385259388fdebb2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 3 Mar 2021 17:06:12 +0100 +Subject: [PATCH] podman: return OCF_NOT_RUNNING when monitor cmd fails (not + running) + +--- + heartbeat/podman | 21 +++++++++++++-------- + 1 file changed, 13 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/podman b/heartbeat/podman +index 82ea14624..5b707f3f5 100755 +--- a/heartbeat/podman ++++ b/heartbeat/podman +@@ -204,14 +204,19 @@ monitor_cmd_exec() + # 125: no container with name or ID ${CONTAINER} found + # 126: container state improper (not running) + # 127: any other error +- if [ $rc -eq 125 ] || [ $rc -eq 126 ]; then +- rc=$OCF_NOT_RUNNING +- elif [ $rc -ne 0 ]; then +- ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" +- rc=$OCF_ERR_GENERIC +- else +- ocf_log debug "monitor cmd passed: exit code = $rc" +- fi ++ # 255: podman 2+: container not running ++ case "$rc" in ++ 125|126|255) ++ rc=$OCF_NOT_RUNNING ++ ;; ++ 0) ++ ocf_log debug "monitor cmd passed: exit code = $rc" ++ ;; ++ *) ++ ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out" ++ rc=$OCF_ERR_GENERIC ++ ;; ++ esac + + return $rc + } diff --git a/SOURCES/bz1924363-nfsserver-error-check-unmount.patch b/SOURCES/bz1924363-nfsserver-error-check-unmount.patch new file mode 100644 index 0000000..e77e92d --- /dev/null +++ b/SOURCES/bz1924363-nfsserver-error-check-unmount.patch @@ -0,0 +1,57 @@ +From dc4fc6fb51481e62c763212129e7dbae4cb663fd Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 2 Feb 2021 17:55:40 -0800 +Subject: [PATCH] nfsserver: Error-check unbind_tree + +Fail to stop if unmounting rpcpipefs_dir or /var/lib/nfs fails. + +Resolves: RHBZ#1924363 + +Signed-off-by: Reid Wahl +--- + heartbeat/nfsserver | 23 ++++++++++++++++++++--- + 1 file changed, 20 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 80d20676b..96b19abe3 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -465,9 +465,20 @@ unbind_tree () + sleep 1 + i=$((i + 1)) + done ++ ++ if mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "; then ++ ocf_log err "Failed to unmount $OCF_RESKEY_rpcpipefs_dir" ++ return $OCF_ERR_GENERIC ++ fi ++ + if is_bound /var/lib/nfs; then +- umount /var/lib/nfs ++ if ! umount /var/lib/nfs; then ++ ocf_log err "Failed to unmount /var/lib/nfs" ++ return $OCF_ERR_GENERIC ++ fi + fi ++ ++ return $OCF_SUCCESS + } + + binary_status() +@@ -836,8 +847,14 @@ nfsserver_stop () + esac + + unbind_tree +- ocf_log info "NFS server stopped" +- return 0 ++ rc=$? ++ if [ "$rc" -ne $OCF_SUCCESS ]; then ++ ocf_exit_reason "Failed to unmount a bind mount" ++ else ++ ocf_log info "NFS server stopped" ++ fi ++ ++ return $rc + } + + nfsserver_validate () diff --git a/SOURCES/bz1932863-VirtualDomain-fix-pid-status.patch b/SOURCES/bz1932863-VirtualDomain-fix-pid-status.patch new file mode 100644 index 0000000..95f2f96 --- /dev/null +++ b/SOURCES/bz1932863-VirtualDomain-fix-pid-status.patch @@ -0,0 +1,31 @@ +From 500de79739cd39808fb48fa556c9b9b9fe2e8acd Mon Sep 17 00:00:00 2001 +From: Matthias Hensler +Date: Thu, 18 Feb 2021 12:49:49 +0100 +Subject: [PATCH] fix pid_status() for VirtualDomain on EL8 + +see https://github.com/ClusterLabs/resource-agents/issues/1613 +--- + heartbeat/VirtualDomain | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain +index eb41e3e22..f9cd21fc7 100755 +--- a/heartbeat/VirtualDomain ++++ b/heartbeat/VirtualDomain +@@ -421,14 +421,14 @@ pid_status() + case "$emulator" in + qemu-kvm|qemu-dm|qemu-system-*) + rc=$OCF_NOT_RUNNING +- ps awx | grep -E "[q]emu-(kvm|dm|system).*-name $DOMAIN_NAME " > /dev/null 2>&1 ++ ps awx | grep -E "[q]emu-(kvm|dm|system).*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1 + if [ $? -eq 0 ]; then + rc=$OCF_SUCCESS + fi + ;; + libvirt_lxc) + rc=$OCF_NOT_RUNNING +- ps awx | grep -E "[l]ibvirt_lxc.*-name $DOMAIN_NAME " > /dev/null 2>&1 ++ ps awx | grep -E "[l]ibvirt_lxc.*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1 + if [ $? -eq 0 ]; then + rc=$OCF_SUCCESS + fi diff --git a/SOURCES/bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch b/SOURCES/bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch new file mode 100644 index 0000000..59fb0ef --- /dev/null +++ b/SOURCES/bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch @@ -0,0 +1,23 @@ +From dd5394180267c652d0928db8c5508d9977893fe5 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 18 Mar 2021 16:23:10 +0100 +Subject: [PATCH] db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to + promote-check + +--- + heartbeat/db2 | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/db2 b/heartbeat/db2 +index a57fd2bb6..459136cbd 100755 +--- a/heartbeat/db2 ++++ b/heartbeat/db2 +@@ -767,7 +767,7 @@ db2_promote() { + return $OCF_SUCCESS + ;; + +- PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|Primary/Peer) ++ PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED|Primary/Peer) + # nothing to do, only update pacemaker's view + echo MASTER > $STATE_FILE + return $OCF_SUCCESS diff --git a/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch b/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch new file mode 100644 index 0000000..b9ed544 --- /dev/null +++ b/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch @@ -0,0 +1,52 @@ +From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001 +From: Georg Brandl +Date: Thu, 10 Dec 2020 08:19:21 +0100 +Subject: [PATCH] fixes #1625: infinite loop in SML lexer + +Reason was a lookahead-only pattern which was included in the state +where the lookahead was transitioning to. +--- + pygments/lexers/ml.py | 12 ++++++------ + 2 files changed, 14 insertions(+), 6 deletions(-) + +diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py +index 8ca8ce3eb..f2ac367c5 100644 +--- a/pygments/lexers/ml.py ++++ b/pygments/lexers/ml.py +@@ -142,7 +142,7 @@ def id_callback(self, match): + (r'#\s+(%s)' % symbolicid_re, Name.Label), + # Some reserved words trigger a special, local lexer state change + (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), +- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), ++ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'), + (r'\b(functor|include|open|signature|structure)\b(?!\')', + Keyword.Reserved, 'sname'), + (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), +@@ -315,15 +315,14 @@ def id_callback(self, match): + 'ename': [ + include('whitespace'), + +- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, ++ (r'(and\b)(\s+)(%s)' % alphanumid_re, + bygroups(Keyword.Reserved, Text, Name.Class)), +- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, ++ (r'(and\b)(\s*)(%s)' % symbolicid_re, + bygroups(Keyword.Reserved, Text, Name.Class)), + (r'\b(of)\b(?!\')', Keyword.Reserved), ++ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class), + +- include('breakout'), +- include('core'), +- (r'\S+', Error), ++ default('#pop'), + ], + + 'datcon': [ +@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer): + ], + } + ++ + class OpaLexer(RegexLexer): + """ + Lexer for the Opa language (http://opalang.org). diff --git a/SOURCES/bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch b/SOURCES/bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch new file mode 100644 index 0000000..699dc50 --- /dev/null +++ b/SOURCES/bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch @@ -0,0 +1,118 @@ +From 760680df771b6e2a9fbcd2f6d9862df4ec1a86de Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 9 Mar 2021 18:25:52 -0800 +Subject: [PATCH 1/2] azure-lb: Be quiet during stop operation + +Currently, it logs "kill () No such process" to stderr during stops. + +A stop operation is expected to run `kill -s 0 $pid` for a nonexistent +PID, so log that at debug level. + +A start or monitor operation's `kill -s 0 $pid` should always succeed, +so any output is unexpected and an error. + +Also remove "local" bashism. + +Signed-off-by: Reid Wahl +--- + heartbeat/azure-lb | 22 ++++++++++++++-------- + 1 file changed, 14 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 65a12235b..863132744 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -93,12 +93,18 @@ getpid() { + + lb_monitor() { + if test -f "$pidfile"; then +- if pid=`getpid $pidfile` && [ "$pid" ] && kill -s 0 $pid; then +- return $OCF_SUCCESS +- else +- # pidfile w/o process means the process died +- return $OCF_ERR_GENERIC ++ [ "$__OCF_ACTION" = "stop" ] && level="debug" || level="err" ++ ++ if pid=$(getpid "$pidfile") && [ -n "$pid" ]; then ++ output=$(kill -s 0 "$pid" 2>&1) ++ mon_rc=$? ++ ++ [ -n "$output" ] && ocf_log "$level" "$output" ++ [ "$mon_rc" -eq 0 ] && return $OCF_SUCCESS + fi ++ ++ # pidfile w/o process means the process died ++ return $OCF_ERR_GENERIC + else + return $OCF_NOT_RUNNING + fi +@@ -131,7 +137,7 @@ lb_start() { + } + + lb_stop() { +- local rc=$OCF_SUCCESS ++ stop_rc=$OCF_SUCCESS + + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + # Allow 2/3 of the action timeout for the orderly shutdown +@@ -160,7 +166,7 @@ lb_stop() { + while :; do + if ! lb_monitor; then + ocf_log warn "SIGKILL did the job." +- rc=$OCF_SUCCESS ++ stop_rc=$OCF_SUCCESS + break + fi + ocf_log info "The job still hasn't stopped yet. Waiting..." +@@ -168,7 +174,7 @@ lb_stop() { + done + fi + rm -f $pidfile +- return $rc ++ return $stop_rc + } + + lb_validate() { + +From 10f39e90d6b04c28752a4f9adc94dfc03d9d61b8 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 9 Mar 2021 18:32:45 -0800 +Subject: [PATCH 2/2] azure-lb: Redirect stdout and stderr to /dev/null + +This fixes a regression introduced in commit d22700fc. + +When the nc listener process created by an azure-lb resource attempts to +write to stdout, it dies with an EPIPE error. + +This can happen when random/garbage input is sent to the nc listener, as +may happen during a port scan. For example, if the listener is on port +62000, and a client sends some text (e.g., `echo test | nc node1 +62000`), then the listener attempts to echo "test" to its stdout. This +fails with an EPIPE. + +Prior to commit d22700fc, all output was redirected to the pid file. +This caused its own problems, but it prevented this particular behavior. + +The fix is to redirect the listener's stdout and stderr to /dev/null. + +Resolves: RHBZ#1937142 +Resolves: RHBZ#1937151 + +Signed-off-by: Reid Wahl +--- + heartbeat/azure-lb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 863132744..ade1b4577 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -119,7 +119,7 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- $cmd & ++ $cmd >/dev/null 2>&1 & + echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" diff --git a/SOURCES/bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch b/SOURCES/bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch new file mode 100644 index 0000000..94d4d95 --- /dev/null +++ b/SOURCES/bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch @@ -0,0 +1,141 @@ +From b727fe4e2a0f4c88fca0ed9f90f57e570253c961 Mon Sep 17 00:00:00 2001 +From: Costas Tyfoxylos +Date: Wed, 26 Aug 2020 15:18:00 +0300 +Subject: [PATCH 1/2] aws-vpc-move-ip: Implemented optional eni lookup instead + of the default instance id. + +In a shared network pattern where the cluster resides in shared subnets the instance ids of the nodes are not retrievable but the eni ids are and this optional feature gives transparent support in that situation. +--- + heartbeat/aws-vpc-move-ip | 41 +++++++++++++++++++++++++++++++-------- + 1 file changed, 33 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 1b540caec..bc82428e5 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -44,6 +44,7 @@ OCF_RESKEY_routing_table_default="" + OCF_RESKEY_routing_table_role_default="" + OCF_RESKEY_interface_default="eth0" + OCF_RESKEY_monapi_default="false" ++OCF_RESKEY_lookup_type_default="InstanceId" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} +@@ -54,6 +55,7 @@ OCF_RESKEY_monapi_default="false" + : ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}} + : ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} + : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} ++: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} + + [ -n "$OCF_RESKEY_region" ] && region_opt="--region $OCF_RESKEY_region" + ####################################################################### +@@ -154,6 +156,17 @@ Enable enhanced monitoring using AWS API calls to check route table entry + Enhanced Monitoring + + ++ ++ ++ ++Name of resource type to lookup in route table. ++"InstanceId" : EC2 instance ID. (default) ++"NetworkInterfaceId" : ENI ID. (useful in shared VPC setups). ++ ++lookup type for route table resource ++ ++ ++ + + + +@@ -187,7 +200,7 @@ execute_cmd_as_role(){ + + ec2ip_set_address_param_compat(){ + # Include backward compatibility for the deprecated address parameter +- if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then ++ if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then + OCF_RESKEY_ip="$OCF_RESKEY_address" + fi + } +@@ -213,16 +226,24 @@ ec2ip_validate() { + } + + ec2ip_monitor() { +- MON_RES="" ++ MON_RES="" ++ if [ "${OCF_RESKEY_lookup_type}" = "NetworkInterfaceId" ]; then ++ EC2_ID="$(ec2ip_get_instance_eni)" ++ RESOURCE_TYPE="interface" ++ else ++ EC2_ID="$EC2_INSTANCE_ID" ++ RESOURCE_TYPE="instance" ++ fi ++ + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + ocf_log info "monitor: check routing table (API call) - $rtb" + if [[ -z "${OCF_RESKEY_routing_table_role}" ]]; then +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE="$($cmd)" + else +- cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +@@ -230,8 +251,8 @@ ec2ip_monitor() { + ROUTE_TO_INSTANCE="" + fi + +- if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then +- ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb" ++ if [ "$EC2_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log warn "not routed to this $RESOURCE_TYPE ($EC2_ID) but to $RESOURCE_TYPE $ROUTE_TO_INSTANCE on $rtb" + MON_RES="$MON_RES $rtb" + fi + sleep 1 +@@ -275,7 +296,7 @@ ec2ip_drop() { + return $OCF_SUCCESS + } + +-ec2ip_get_and_configure() { ++ec2ip_get_instance_eni() { + MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" + if [ -f $MAC_FILE ]; then + cmd="cat ${MAC_FILE}" +@@ -300,7 +321,11 @@ ec2ip_get_and_configure() { + return $OCF_ERR_GENERIC + fi + ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ echo $EC2_NETWORK_INTERFACE_ID ++} + ++ec2ip_get_and_configure() { ++ EC2_NETWORK_INTERFACE_ID="$(ec2ip_get_instance_eni)" + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + if [ -z "${OCF_RESKEY_routing_table_role}" ]; then + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + +From f4c8daae098dd33bdd5136ca4846eb505110e006 Mon Sep 17 00:00:00 2001 +From: Sander Botman +Date: Fri, 28 Aug 2020 22:01:03 +0200 +Subject: [PATCH 2/2] aws-vpc-move-ip: Fix the region option + +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index bc82428e5..a5b28ad92 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -243,7 +243,7 @@ ec2ip_monitor() { + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE="$($cmd)" + else +- cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" ++ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" diff --git a/SOURCES/bz1939992-awsvip-dont-partially-match-IPs.patch b/SOURCES/bz1939992-awsvip-dont-partially-match-IPs.patch new file mode 100644 index 0000000..1a0e86e --- /dev/null +++ b/SOURCES/bz1939992-awsvip-dont-partially-match-IPs.patch @@ -0,0 +1,23 @@ +From 3491a6ad30830a8545defa5a417a7db46b093904 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 17 Mar 2021 12:39:10 +0100 +Subject: [PATCH] awsvip: dont partially match similar IPs during + monitor-action + +--- + heartbeat/awsvip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 7d0bf35b6..044d049c6 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -172,7 +172,7 @@ awsvip_monitor() { + --instance-id "${INSTANCE_ID}" \ + --query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \ + --output text | \ +- grep -q "${SECONDARY_PRIVATE_IP}" ++ grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)" + RET=$? + + if [ $RET -ne 0 ]; then diff --git a/SOURCES/bz1940094-aws-agents-dont-spam-logs.patch b/SOURCES/bz1940094-aws-agents-dont-spam-logs.patch new file mode 100644 index 0000000..97ff44e --- /dev/null +++ b/SOURCES/bz1940094-aws-agents-dont-spam-logs.patch @@ -0,0 +1,64 @@ +From 59b0840d262900d0eaa8b19df3ede55eea5250d2 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 17 Mar 2021 12:10:59 +0100 +Subject: [PATCH] AWS agents: dont spam log files when getting token + +--- + heartbeat/aws-vpc-move-ip | 2 +- + heartbeat/aws-vpc-route53.in | 2 +- + heartbeat/awseip | 2 +- + heartbeat/awsvip | 2 +- + 4 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index cbb629b00..3ca3d6bd6 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -215,7 +215,7 @@ ec2ip_validate() { + return $OCF_ERR_CONFIGURED + fi + +- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + if [ -z "${EC2_INSTANCE_ID}" ]; then +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index 4fb17019b..21948eaca 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -347,7 +347,7 @@ r53_monitor() { + _get_ip() { + case $OCF_RESKEY_ip in + local|public) +- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");; + *.*.*.*) + IPADDRESS="${OCF_RESKEY_ip}";; +diff --git a/heartbeat/awseip b/heartbeat/awseip +index de1967774..12ffffaa3 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -244,7 +244,7 @@ AWSCLI="${OCF_RESKEY_awscli}" + ELASTIC_IP="${OCF_RESKEY_elastic_ip}" + ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" +-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 8050107e8..7d0bf35b6 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -206,7 +206,7 @@ esac + + AWSCLI="${OCF_RESKEY_awscli}" + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" +-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN") + NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") diff --git a/SOURCES/bz1940363-1-galera-redis-use-output-as.patch b/SOURCES/bz1940363-1-galera-redis-use-output-as.patch new file mode 100644 index 0000000..2d6473f --- /dev/null +++ b/SOURCES/bz1940363-1-galera-redis-use-output-as.patch @@ -0,0 +1,100 @@ +From f510d8e78ce65736ca5a72bd8125d31dcb4ff621 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 16 Jun 2020 13:32:18 +0200 +Subject: [PATCH 1/2] galera/redis: use --output-as for crm_mon w/newer + Pacemaker, and prepare for Promoted role + +--- + heartbeat/galera | 9 ++++++++- + heartbeat/redis.in | 9 ++++++++- + 2 files changed, 16 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 4a313e24b..ba3de4b81 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -441,7 +441,14 @@ master_exists() + return 1 + fi + # determine if a master instance is already up and is healthy +- crm_mon --as-xml | grep "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1 ++ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.1.0" ++ res=$? ++ if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then ++ XMLOPT="--output-as=xml" ++ else ++ XMLOPT="--as-xml" ++ fi ++ crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" + return $? + } + +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index da7230a49..7e534db4a 100755 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -272,7 +272,14 @@ master_is_active() + { + if [ -z "$MASTER_ACTIVE_CACHED" ]; then + # determine if a master instance is already up and is healthy +- crm_mon --as-xml | grep "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1 ++ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.1.0" ++ res=$? ++ if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then ++ XMLOPT="--output-as=xml" ++ else ++ XMLOPT="--as-xml" ++ fi ++ crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\"" + MASTER_ACTIVE=$? + MASTER_ACTIVE_CACHED="true" + fi + +From 6f36172da222275124fb44736b4801ea884c3dd0 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 31 Jul 2020 14:31:47 +0200 +Subject: [PATCH 2/2] galera/redis: support RHEL 8.1 pacemaker + +based on dfdb4e645638948cd4dafaba9d65ebddb2152b2c that solves this issue +in pgsql +--- + heartbeat/galera | 7 +++++++ + heartbeat/redis.in | 7 +++++++ + 2 files changed, 14 insertions(+) + +diff --git a/heartbeat/galera b/heartbeat/galera +index ba3de4b81..69d75a854 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -445,6 +445,13 @@ master_exists() + res=$? + if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then + XMLOPT="--output-as=xml" ++ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0" ++ if [ $? -eq 1 ]; then ++ crm_mon -1 $XMLOPT >/dev/null 2>&1 ++ if [ $? -ne 0 ]; then ++ XMLOPT="--as-xml" ++ fi ++ fi + else + XMLOPT="--as-xml" + fi +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 7e534db4a..8afdf08a9 100755 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -276,6 +276,13 @@ master_is_active() + res=$? + if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then + XMLOPT="--output-as=xml" ++ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0" ++ if [ $? -eq 1 ]; then ++ crm_mon -1 $XMLOPT >/dev/null 2>&1 ++ if [ $? -ne 0 ]; then ++ XMLOPT="--as-xml" ++ fi ++ fi + else + XMLOPT="--as-xml" + fi diff --git a/SOURCES/bz1940363-2-bundle-disable-validate-with.patch b/SOURCES/bz1940363-2-bundle-disable-validate-with.patch new file mode 100644 index 0000000..c480f89 --- /dev/null +++ b/SOURCES/bz1940363-2-bundle-disable-validate-with.patch @@ -0,0 +1,176 @@ +From 716db89c1ab02ce4fed8ba0916ff1f6d01b4b636 Mon Sep 17 00:00:00 2001 +From: Damien Ciabrini +Date: Thu, 18 Mar 2021 14:11:11 +0100 +Subject: [PATCH] bundle: run crm_mon without performing validation + +We have a use case in OpenStack where the resource agents run +inside bundles (containerized pacemaker remotes), and we cannot +always guarantee an exact match of pacemaker version on the host +and in containers. This can make crm_mon fail to run because +it may not have the latest version of the schema to validate the +CIB that it's getting. + +Add a function crm_mon_no_validation to allow clustered services +like galera, redis and rabbitmq to work when there is a minor +version mismatch between host and containers. This doesn't +impact non-containerized use cases, there is a single version +of pacemaker binaries in this case. + +Related-Bug: rhbz#1940363 +--- + configure.ac | 2 ++ + heartbeat/{galera => galera.in} | 10 +++++----- + heartbeat/ocf-shellfuncs.in | 16 ++++++++++++++++ + .../{rabbitmq-cluster => rabbitmq-cluster.in} | 4 ++-- + heartbeat/redis.in | 4 ++-- + 5 files changed, 27 insertions(+), 9 deletions(-) + rename heartbeat/{galera => galera.in} (98%) + rename heartbeat/{rabbitmq-cluster => rabbitmq-cluster.in} (98%) + +diff --git a/configure.ac b/configure.ac +index ed9dc09bf..11c1b786b 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -979,6 +979,7 @@ AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) + AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) + AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) ++AC_CONFIG_FILES([heartbeat/galera], [chmod +x heartbeat/galera]) + AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) + AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) +@@ -993,6 +994,7 @@ AC_CONFIG_FILES([heartbeat/machine-info], [chmod +x heartbeat/machine-info]) + AC_CONFIG_FILES([heartbeat/mariadb], [chmod +x heartbeat/mariadb]) + AC_CONFIG_FILES([heartbeat/mpathpersist], [chmod +x heartbeat/mpathpersist]) + AC_CONFIG_FILES([heartbeat/nfsnotify], [chmod +x heartbeat/nfsnotify]) ++AC_CONFIG_FILES([heartbeat/rabbitmq-cluster], [chmod +x heartbeat/rabbitmq-cluster]) + AC_CONFIG_FILES([heartbeat/redis], [chmod +x heartbeat/redis]) + AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog]) + AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist]) +diff --git a/heartbeat/galera b/heartbeat/galera.in +similarity index 98% +rename from heartbeat/galera +rename to heartbeat/galera.in +index c2f636f0d..7f5f2f1eb 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera.in +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!@BASH_SHELL@ + # + # Copyright (c) 2014 David Vossel + # All Rights Reserved. +@@ -447,7 +447,7 @@ is_two_node_mode_active() + # crm_node or corosync-quorumtool cannot access various corosync + # flags when running inside a bundle, so only count the cluster + # members +- ocf_is_true "$OCF_RESKEY_two_node_mode" && ${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath "count(//nodes/node[@type='member'])" - | grep -q -w 2 ++ ocf_is_true "$OCF_RESKEY_two_node_mode" && crm_mon_no_validation -1X | xmllint --xpath "count(//nodes/node[@type='member'])" - | grep -q -w 2 + } + + is_last_node_in_quorate_partition() +@@ -458,7 +458,7 @@ is_last_node_in_quorate_partition() + # is clean), we shouldn't consider ourself quorate. + local partition_members=$(${HA_SBIN_DIR}/crm_node -p | wc -w) + local quorate=$(${HA_SBIN_DIR}/crm_node -q) +- local clean_members=$(${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath 'count(//nodes/node[@type="member" and @unclean="false"])' -) ++ local clean_members=$(crm_mon_no_validation -1X | xmllint --xpath 'count(//nodes/node[@type="member" and @unclean="false"])' -) + + [ "$partition_members" = 1 ] && [ "$quorate" = 1 ] && [ "$clean_members" = 2 ] + } +@@ -480,7 +480,7 @@ master_exists() + XMLOPT="--output-as=xml" + ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0" + if [ $? -eq 1 ]; then +- crm_mon -1 $XMLOPT >/dev/null 2>&1 ++ crm_mon_no_validation -1 $XMLOPT >/dev/null 2>&1 + if [ $? -ne 0 ]; then + XMLOPT="--as-xml" + fi +@@ -461,7 +461,7 @@ + else + XMLOPT="--as-xml" + fi +- crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" ++ crm_mon_no_validation -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" + return $? + } + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index ac75dfc87..760790cbd 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -41,6 +41,8 @@ + unset LC_ALL; export LC_ALL + unset LANGUAGE; export LANGUAGE + ++: ${HA_SBIN_DIR:=@sbindir@} ++ + __SCRIPT_NAME=`basename $0` + + if [ -z "$OCF_ROOT" ]; then +@@ -670,6 +672,20 @@ EOF + systemctl daemon-reload + } + ++# usage: crm_mon_no_validation args... ++# run crm_mon without any cib schema validation ++# This is useful when an agent runs in a bundle to avoid potential ++# schema validation errors when host and bundle are not perfectly aligned ++# To be used, your shell must support on process substitution (e.g. bash) ++# returns: ++# ++crm_mon_no_validation() ++{ ++ # The subshell prevents parsing error with incompatible shells ++ "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q | sed 's/validate-with=\"[^\"]*\"/validate-with=\"none\"/') \ ++ ${HA_SBIN_DIR}/crm_mon \$*" -- $* ++} ++ + # + # pseudo_resource status tracking function... + # +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster.in +similarity index 98% +rename from heartbeat/rabbitmq-cluster +rename to heartbeat/rabbitmq-cluster.in +index f7d48120c..abd0662f2 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster.in +@@ -1,4 +1,4 @@ +-#!/bin/sh ++#!@BASH_SHELL@ + # + # Copyright (c) 2014 David Vossel + # All Rights Reserved. +@@ -195,7 +195,7 @@ rmq_join_list() + # ... + local remote_join_list=$(cibadmin -Q --xpath "//node_state//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p") + # The following expression prepares a filter like '-e overcloud-rabbit-0 -e overcloud-rabbit-1 -e ...' +- local filter=$(crm_mon -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}') ++ local filter=$(crm_mon_no_validation -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}') + # export the intersection which gives us only the nodes that + # a) wrote their namein the cib attrd + # b) run on nodes where pacemaker_remote is enabled +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index 8afdf08a9..f53d46964 100755 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -278,7 +278,7 @@ master_is_active() + XMLOPT="--output-as=xml" + ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0" + if [ $? -eq 1 ]; then +- crm_mon -1 $XMLOPT >/dev/null 2>&1 ++ crm_mon_no_validation -1 $XMLOPT >/dev/null 2>&1 + if [ $? -ne 0 ]; then + XMLOPT="--as-xml" + fi +@@ -286,7 +286,7 @@ master_is_active() + else + XMLOPT="--as-xml" + fi +- crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\"" ++ crm_mon_no_validation -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\"" + MASTER_ACTIVE=$? + MASTER_ACTIVE_CACHED="true" + fi diff --git a/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch b/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch new file mode 100644 index 0000000..d28028c --- /dev/null +++ b/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch @@ -0,0 +1,138 @@ +From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001 +From: Georg Brandl +Date: Mon, 11 Jan 2021 09:46:34 +0100 +Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben + Caller/Doyensec + +--- + pygments/lexers/archetype.py | 2 +- + pygments/lexers/factor.py | 4 ++-- + pygments/lexers/jvm.py | 1 - + pygments/lexers/matlab.py | 6 +++--- + pygments/lexers/objective.py | 4 ++-- + pygments/lexers/templates.py | 2 +- + pygments/lexers/varnish.py | 2 +- + 8 files changed, 14 insertions(+), 12 deletions(-) + +diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py +index 65046613d..26f5ea8c9 100644 +--- a/pygments/lexers/archetype.py ++++ b/pygments/lexers/archetype.py +@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer): + (r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|' + r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date), + (r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), +- (r'[+-]?(\d+)*\.\d+%?', Number.Float), ++ (r'[+-]?\d*\.\d+%?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[+-]?\d+%?', Number.Integer), + ], +diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py +index be7b30dff..9200547f9 100644 +--- a/pygments/lexers/factor.py ++++ b/pygments/lexers/factor.py +@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer): + (r'(?:)\s', Keyword.Namespace), + + # strings +- (r'"""\s+(?:.|\n)*?\s+"""', String), ++ (r'"""\s(?:.|\n)*?\s"""', String), + (r'"(?:\\\\|\\"|[^"])*"', String), + (r'\S+"\s+(?:\\\\|\\"|[^"])*"', String), + (r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char), +@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer): + 'slots': [ + (r'\s+', Text), + (r';\s', Keyword, '#pop'), +- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)', ++ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)', + bygroups(Text, Name.Variable, Text)), + (r'\S+', Name.Variable), + ], +diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py +index 62dfd45e5..9a9397c2d 100644 +--- a/pygments/lexers/jvm.py ++++ b/pygments/lexers/jvm.py +@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer): + (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + (r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char), +- (r'".*``.*``.*"', String.Interpol), + (r'(\.)([a-z_]\w*)', + bygroups(Operator, Name.Attribute)), + (r'[a-zA-Z_]\w*:', Name.Label), +diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py +index 4823c6a7e..578848623 100644 +--- a/pygments/lexers/matlab.py ++++ b/pygments/lexers/matlab.py +@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer): + (r'.', Comment.Multiline), + ], + 'deffunc': [ +- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', ++ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', + bygroups(Whitespace, Text, Whitespace, Punctuation, + Whitespace, Name.Function, Punctuation, Text, + Punctuation, Whitespace), '#pop'), +@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer): + (r"[^']*'", String, '#pop'), + ], + 'deffunc': [ +- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', ++ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', + bygroups(Whitespace, Text, Whitespace, Punctuation, + Whitespace, Name.Function, Punctuation, Text, + Punctuation, Whitespace), '#pop'), +@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer): + (r'.', String, '#pop'), + ], + 'deffunc': [ +- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', ++ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', + bygroups(Whitespace, Text, Whitespace, Punctuation, + Whitespace, Name.Function, Punctuation, Text, + Punctuation, Whitespace), '#pop'), +diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py +index 34e4062f6..38ac9bb05 100644 +--- a/pygments/lexers/objective.py ++++ b/pygments/lexers/objective.py +@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer): + 'logos_classname'), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class)), +- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', ++ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)', + bygroups(Keyword, Text, Name.Variable, Text, String, Text)), + (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), + 'function'), +- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', ++ (r'(%new)(\s*)(\()(.*?)(\))', + bygroups(Keyword, Text, Keyword, String, Keyword)), + (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), + inherit, +diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py +index 33c06c4c4..5c3346b4c 100644 +--- a/pygments/lexers/templates.py ++++ b/pygments/lexers/templates.py +@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer): + # see doc for handling first name arg: /directives/evoque/ + # + minor inconsistency: the "name" in e.g. $overlay{name=site_base} + # should be using(PythonLexer), not passed out as String +- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?' ++ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?' + r'(.*?)((?(4)%)\})', + bygroups(Punctuation, Name.Builtin, Punctuation, None, + String, using(PythonLexer), Punctuation)), +diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py +index 23653f7a1..9d358bd7c 100644 +--- a/pygments/lexers/varnish.py ++++ b/pygments/lexers/varnish.py +@@ -61,7 +61,7 @@ def analyse_text(text): + bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)), + (r'(\.probe)(\s*=\s*)(\{)', + bygroups(Name.Attribute, Operator, Punctuation), 'probe'), +- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)', ++ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)', + bygroups(Name.Attribute, Operator, using(this), Punctuation)), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), diff --git a/SOURCES/bz1957765-gcp-vpc-move-vip-retry.patch b/SOURCES/bz1957765-gcp-vpc-move-vip-retry.patch new file mode 100644 index 0000000..2350f1a --- /dev/null +++ b/SOURCES/bz1957765-gcp-vpc-move-vip-retry.patch @@ -0,0 +1,102 @@ +From 3ae6d8f0a34d099945d9bf005ed45dbfe9452202 Mon Sep 17 00:00:00 2001 +From: kj1724 <78624900+kj1724@users.noreply.github.com> +Date: Wed, 28 Apr 2021 10:22:38 -0400 +Subject: [PATCH] gcp-vpc-move-vip.in: Adds retries + +If the cluster fails a monitoring event, it will try to restart the resource. If the resource agent makes an API/metadata call that fails at that time, the resource will be considered "failed", but in certain case also "unconfigured", which prevents further operations. + +These changes can help the agent recover on certain intermittent failures. +--- + heartbeat/gcp-vpc-move-vip.in | 62 ++++++++++++++++++++--------------- + 1 file changed, 35 insertions(+), 27 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index bbbd87b7a9..c411555110 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -50,6 +50,8 @@ REMOVE = 1 + CONN = None + THIS_VM = None + ALIAS = None ++MAX_RETRIES = 3 ++RETRY_BACKOFF_SECS = 1 + METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' + METADATA_HEADERS = {'Metadata-Flavor': 'Google'} + METADATA = \ +@@ -111,18 +113,37 @@ def get_metadata(metadata_key, params=None, timeout=None): + + Returns: + HTTP response from the GET request. +- +- Raises: +- urlerror.HTTPError: raises when the GET request fails. + """ +- timeout = timeout or 60 +- metadata_url = os.path.join(METADATA_SERVER, metadata_key) +- params = urlparse.urlencode(params or {}) +- url = '%s?%s' % (metadata_url, params) +- request = urlrequest.Request(url, headers=METADATA_HEADERS) +- request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open( +- request, timeout=timeout * 1.1).read().decode("utf-8") ++ for i in range(MAX_RETRIES): ++ try: ++ timeout = timeout or 60 ++ metadata_url = os.path.join(METADATA_SERVER, metadata_key) ++ params = urlparse.urlencode(params or {}) ++ url = '%s?%s' % (metadata_url, params) ++ request = urlrequest.Request(url, headers=METADATA_HEADERS) ++ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) ++ return request_opener.open( ++ request, timeout=timeout * 1.1).read().decode("utf-8") ++ except Exception as e: ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' ++ + str(e)) ++ time.sleep(RETRY_BACKOFF_SECS * (i + 1)) ++ ++ # If the retries are exhausted we exit with a generic error. ++ sys.exit(OCF_ERR_GENERIC) ++ ++ ++def create_api_connection(): ++ for i in range(MAX_RETRIES): ++ try: ++ return googleapiclient.discovery.build('compute', 'v1', ++ cache_discovery=False) ++ except Exception as e: ++ logger.error('Couldn\'t connect with google api: ' + str(e)) ++ time.sleep(RETRY_BACKOFF_SECS * (i + 1)) ++ ++ # If the retries are exhausted we exit with a generic error. ++ sys.exit(OCF_ERR_GENERIC) + + + def get_instance(project, zone, instance): +@@ -358,24 +379,11 @@ def gcp_alias_status(alias): + + def validate(): + global ALIAS +- global CONN + global THIS_VM ++ global CONN + +- # Populate global vars +- try: +- CONN = googleapiclient.discovery.build('compute', 'v1', +- cache_discovery=False) +- except Exception as e: +- logger.error('Couldn\'t connect with google api: ' + str(e)) +- sys.exit(OCF_ERR_CONFIGURED) +- +- try: +- THIS_VM = get_metadata('instance/name') +- except Exception as e: +- logger.error('Couldn\'t get instance name, is this running inside GCE?: ' +- + str(e)) +- sys.exit(OCF_ERR_CONFIGURED) +- ++ CONN = create_api_connection() ++ THIS_VM = get_metadata('instance/name') + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') + if not ALIAS: + logger.error('Missing alias_ip parameter') diff --git a/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch new file mode 100644 index 0000000..d868593 --- /dev/null +++ b/SOURCES/dont-use-ocf_attribute_target-for-metadata.patch @@ -0,0 +1,76 @@ +From 2dbfbd8ee3c1547f941507ab4109aa04eec0ef5a Mon Sep 17 00:00:00 2001 +From: Michele Baldessari +Date: Mon, 16 Jul 2018 20:24:04 +0200 +Subject: [PATCH] Do not call ocf_attribute_target in the meta-data function + +Starting with pacemaker-1.1.19 a "crm_node -n" call triggers +a CRM_OP_NODE_INFO cluster operation. If this is called +from a bundle with 1.1.19 code (or later) running on a 1.1.18 +cluster, during a meta-data call we will get the following error in the +cluster logs: +Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected request (node-info) sent to non-DC node +Jul 14 11:35:27 [20041] controller-0 crmd: error: handle_request: Unexpected + +By not calling ocf_attribute_target (which triggers a crm_node -n +call) when polling for meta-data we do not run into this issue. + +This can easily get triggered when creating a resource invoking +crm_node -n inside a 1.1.19 container with pcs, as that invokes +the 'meta-data' action explicitely. + +Co-Authored-By: Damien Ciabrini +Suggested-By: Ken Gaillot +--- + heartbeat/galera | 6 ++++-- + heartbeat/rabbitmq-cluster | 4 +++- + heartbeat/redis.in | 4 +++- + 3 files changed, 10 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/galera b/heartbeat/galera +index 270bdaf1b..4f341ceef 100755 +--- a/heartbeat/galera ++++ b/heartbeat/galera +@@ -66,9 +66,11 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs +-. ${OCF_FUNCTIONS_DIR}/mysql-common.sh + +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ . ${OCF_FUNCTIONS_DIR}/mysql-common.sh ++ NODENAME=$(ocf_attribute_target) ++fi + + # It is common for some galera instances to store + # check user that can be used to query status +diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster +index 9ff49e075..54a16c941 100755 +--- a/heartbeat/rabbitmq-cluster ++++ b/heartbeat/rabbitmq-cluster +@@ -37,7 +37,9 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" + RMQ_PID_DIR="/var/run/rabbitmq" + RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid" + RMQ_LOG_DIR="/var/log/rabbitmq" +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ NODENAME=$(ocf_attribute_target) ++fi + + # this attr represents the current active local rmq node name. + # when rmq stops or the node is fenced, this attr disappears +diff --git a/heartbeat/redis.in b/heartbeat/redis.in +index d5eb8f664..ddc62d8a7 100644 +--- a/heartbeat/redis.in ++++ b/heartbeat/redis.in +@@ -664,7 +664,9 @@ redis_validate() { + fi + } + +-NODENAME=$(ocf_attribute_target) ++if [ "$__OCF_ACTION" != "meta-data" ]; then ++ NODENAME=$(ocf_attribute_target) ++fi + if [ -r "$REDIS_CONFIG" ]; then + clientpasswd="$(sed -n -e 's/^\s*requirepass\s*\(.*\)\s*$/\1/p' < $REDIS_CONFIG | tail -n 1)" + fi diff --git a/SOURCES/findif-only-match-lines-with-netmasks.patch b/SOURCES/findif-only-match-lines-with-netmasks.patch new file mode 100644 index 0000000..6afdd55 --- /dev/null +++ b/SOURCES/findif-only-match-lines-with-netmasks.patch @@ -0,0 +1,25 @@ +From 2437d3879270f8febc5353e09898dd7d0aee08af Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 1 Aug 2018 09:54:39 +0200 +Subject: [PATCH] findif: only match lines containing netmasks + +--- + heartbeat/findif.sh | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh +index fc84cf0ec..66bc6d56a 100644 +--- a/heartbeat/findif.sh ++++ b/heartbeat/findif.sh +@@ -215,9 +215,9 @@ findif() + fi + if [ -n "$nic" ] ; then + # NIC supports more than two. +- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') ++ set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') + else +- set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') ++ set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') + fi + if [ $# = 0 ] ; then + case $OCF_RESKEY_ip in diff --git a/SOURCES/lvmlockd-add-cmirrord-support.patch b/SOURCES/lvmlockd-add-cmirrord-support.patch new file mode 100644 index 0000000..cfaf001 --- /dev/null +++ b/SOURCES/lvmlockd-add-cmirrord-support.patch @@ -0,0 +1,118 @@ +From d4c9de6264251e4dbc91b64aaf7f500919d08d60 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 17 Aug 2018 12:48:46 +0200 +Subject: [PATCH] lvmlockd: add cmirrord support + +--- + heartbeat/lvmlockd | 53 ++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 49 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd +index 7fe73e364..57f7fdc76 100755 +--- a/heartbeat/lvmlockd ++++ b/heartbeat/lvmlockd +@@ -59,6 +59,14 @@ For more information, refer to manpage lvmlockd.8. + This agent manages the lvmlockd daemon + + ++ ++ ++Start with cmirrord (cluster mirror log daemon). ++ ++activate cmirrord ++ ++ ++ + + pid file + pid file +@@ -102,6 +110,7 @@ END + : ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"} + + LOCKD="lvmlockd" ++CMIRRORD="cmirrord" + # 0.5s sleep each count + TIMEOUT_COUNT=20 + +@@ -138,6 +147,21 @@ silent_status() + + if [ -n "$pid" ] ; then + daemon_is_running "$pid" ++ rc=$? ++ mirror_rc=$rc ++ ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ pid=$(pgrep $CMIRRORD | head -n1) ++ daemon_is_running "$pid" ++ mirror_rc=$? ++ fi ++ ++ # If these ever don't match, return error to force recovery ++ if [ $mirror_rc -ne $rc ]; then ++ return $OCF_ERR_GENERIC ++ fi ++ ++ return $rc + else + # No pid file + false +@@ -199,6 +223,16 @@ lvmlockd_start() { + return $OCF_SUCCESS + fi + ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ ocf_log info "starting ${CMIRRORD}..." ++ $CMIRRORD ++ rc=$? ++ if [ $rc -ne $OCF_SUCCESS ] ; then ++ ocf_exit_reason "Failed to start ${CMIRRORD}, exit code: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ + if [ ! -z "$OCF_RESKEY_socket_path" ] ; then + extras="$extras -s ${OCF_RESKEY_socket_path}" + fi +@@ -252,10 +286,11 @@ wait_lockspaces_close() + + kill_stop() + { +- local pid=$1 ++ local proc=$1 ++ local pid=$2 + local retries=0 + +- ocf_log info "Killing ${LOCKD} (pid=$pid)" ++ ocf_log info "Killing $proc (pid=$pid)" + while + daemon_is_running $pid && [ $retries -lt "$TIMEOUT_COUNT" ] + do +@@ -292,9 +327,15 @@ lvmlockd_stop() { + wait_lockspaces_close + + pid=$(get_pid) +- kill_stop $pid ++ kill_stop $LOCKD $pid ++ ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ pid=$(pgrep $CMIRRORD) ++ kill_stop $CMIRRORD $pid ++ fi ++ + if silent_status ; then +- ocf_exit_reason "Failed to stop, ${LOCKD}[$pid] still running." ++ ocf_exit_reason "Failed to stop, ${LOCKD} or ${CMIRRORD} still running." + return $OCF_ERR_GENERIC + fi + +@@ -317,6 +358,10 @@ lvmlockd_validate() { + check_binary pgrep + check_binary lvmlockctl + ++ if ocf_is_true $OCF_RESKEY_with_cmirrord; then ++ check_binary $CMIRRORD ++ fi ++ + return $OCF_SUCCESS + } + diff --git a/SOURCES/metadata-add-missing-s-suffix.patch b/SOURCES/metadata-add-missing-s-suffix.patch new file mode 100644 index 0000000..62ab32b --- /dev/null +++ b/SOURCES/metadata-add-missing-s-suffix.patch @@ -0,0 +1,183 @@ +From 84083d83ff6049bcc99b959c00999496b3027317 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:30:26 +0200 +Subject: [PATCH 1/4] IPv6Addr/nagios/sybaseASE: add missing "s"-suffix in + metadata + +--- + heartbeat/IPv6addr.c | 12 ++++++------ + heartbeat/nagios | 2 +- + heartbeat/sybaseASE.in | 4 ++-- + 3 files changed, 9 insertions(+), 9 deletions(-) + +diff --git a/heartbeat/IPv6addr.c b/heartbeat/IPv6addr.c +index 68447de2e..d8562559b 100644 +--- a/heartbeat/IPv6addr.c ++++ b/heartbeat/IPv6addr.c +@@ -863,12 +863,12 @@ meta_data_addr6(void) + " \n" + " \n" + " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" + " \n" + "\n"; + printf("%s\n",meta_data); +diff --git a/heartbeat/nagios b/heartbeat/nagios +index 4cb462f6a..3d07b141c 100755 +--- a/heartbeat/nagios ++++ b/heartbeat/nagios +@@ -114,7 +114,7 @@ nagios_meta_data() { + + + +- ++ + + + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index b4809ea23..9ddd429be 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -234,8 +234,8 @@ meta_data() + + + +- +- ++ ++ + + + + +From d4bba27b171cb87698359dd300313ba5a6600cca Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:34:41 +0200 +Subject: [PATCH 2/4] CI: improvements + +- added "check"-command to skip build-process +- added check for "s"-suffix in agents +- added additional file-types to also check ocf-* and C agents +--- + ci/build.sh | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/ci/build.sh b/ci/build.sh +index c331e9ab4..22f4472d3 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -51,7 +51,7 @@ find_prunes() { + } + + find_cmd() { +- echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' \) $(find_prunes)" ++ echo "find heartbeat -type f -and \( -perm /111 -or -name '*.sh' -or -name '*.c' -or -name '*.in' \) $(find_prunes)" + } + + check_all_executables() { +@@ -59,6 +59,12 @@ check_all_executables() { + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue + file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue ++ file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue ++ ++ if grep -qE "\ +Date: Fri, 21 Sep 2018 11:41:40 +0200 +Subject: [PATCH 3/4] ocf-shellfuncs: fixes caught when improving CI + +--- + heartbeat/ocf-shellfuncs.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 8e44f09eb..043ab9bf2 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -457,7 +457,7 @@ ocf_pidfile_status() { + return 2 + fi + pid=`cat $pidfile` +- kill -0 $pid 2>&1 > /dev/null ++ kill -0 $pid > /dev/null 2>&1 + if [ $? = 0 ]; then + return 0 + fi +@@ -761,7 +761,7 @@ maketempfile() + { + if [ $# = 1 -a "$1" = "-d" ]; then + mktemp -d +- return -0 ++ return 0 + elif [ $# != 0 ]; then + return 1 + fi + +From d1579996d6f5aec57ece2bc31b106891d0bbb964 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 21 Sep 2018 11:50:08 +0200 +Subject: [PATCH 4/4] CI: fix upstream CI not detecting MIME-format correctly + for Makefiles + +--- + ci/build.sh | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ci/build.sh b/ci/build.sh +index 22f4472d3..b900ddc05 100755 +--- a/ci/build.sh ++++ b/ci/build.sh +@@ -59,7 +59,8 @@ check_all_executables() { + while read -r script; do + file --mime "$script" | grep 'charset=binary' >/dev/null 2>&1 && continue + file --mime "$script" | grep 'text/x-python' >/dev/null 2>&1 && continue +- file --mime "$script" | grep 'text/x-makefile' >/dev/null 2>&1 && continue ++ # upstream CI doesnt detect MIME-format correctly for Makefiles ++ [[ "$script" =~ .*/Makefile.in ]] && continue + + if grep -qE "\ +Date: Fri, 20 May 2016 15:47:33 +0200 +Subject: [PATCH 1/2] nfsserver: mount based on rpcpipefs_dir variable + +--- + heartbeat/nfsserver | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 3d036a98a..479082169 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -177,14 +177,8 @@ esac + fp="$OCF_RESKEY_nfs_shared_infodir" + : ${OCF_RESKEY_nfs_notify_cmd="$DEFAULT_NOTIFY_CMD"} + : ${OCF_RESKEY_nfs_notify_foreground="$DEFAULT_NOTIFY_FOREGROUND"} +- +-if [ -z ${OCF_RESKEY_rpcpipefs_dir} ]; then +- rpcpipefs_make_dir=$fp/rpc_pipefs +- rpcpipefs_umount_dir=${DEFAULT_RPCPIPEFS_DIR} +-else +- rpcpipefs_make_dir=${OCF_RESKEY_rpcpipefs_dir} +- rpcpipefs_umount_dir=${OCF_RESKEY_rpcpipefs_dir} +-fi ++: ${OCF_RESKEY_rpcpipefs_dir="$DEFAULT_RPCPIPEFS_DIR"} ++OCF_RESKEY_rpcpipefs_dir=${OCF_RESKEY_rpcpipefs_dir%/} + + # Use statd folder if it exists + if [ -d "/var/lib/nfs/statd" ]; then +@@ -409,7 +403,7 @@ prepare_directory () + fi + + [ -d "$fp" ] || mkdir -p $fp +- [ -d "$rpcpipefs_make_dir" ] || mkdir -p $rpcpipefs_make_dir ++ [ -d "$OCF_RESKEY_rpcpipefs_dir" ] || mkdir -p $OCF_RESKEY_rpcpipefs_dir + [ -d "$fp/v4recovery" ] || mkdir -p $fp/v4recovery + + [ -d "$fp/$STATD_DIR" ] || mkdir -p "$fp/$STATD_DIR" +@@ -453,9 +447,13 @@ bind_tree () + + unbind_tree () + { +- if `mount | grep -q " on $rpcpipefs_umount_dir"`; then +- umount -t rpc_pipefs $rpcpipefs_umount_dir +- fi ++ local i=1 ++ while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do ++ ocf_log info "Stop: umount ($i/10 attempts)" ++ umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir ++ sleep 1 ++ i=$((i + 1)) ++ done + if is_bound /var/lib/nfs; then + umount /var/lib/nfs + fi +@@ -617,6 +615,8 @@ nfsserver_start () + prepare_directory + bind_tree + ++ mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ + # remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot. + rm -f /var/run/sm-notify.pid + # + +From c92e8c84b73dde3254f53665a0ef3603418538dc Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 27 Sep 2018 16:09:09 +0200 +Subject: [PATCH 2/2] nfsserver: only mount rpc_pipefs if it's not mounted + +also added space to avoid matching similarly named mounts +--- + heartbeat/nfsserver | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver +index 479082169..5412f391b 100755 +--- a/heartbeat/nfsserver ++++ b/heartbeat/nfsserver +@@ -448,7 +448,7 @@ bind_tree () + unbind_tree () + { + local i=1 +- while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir"` && [ "$i" -le 10 ]; do ++ while `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "` && [ "$i" -le 10 ]; do + ocf_log info "Stop: umount ($i/10 attempts)" + umount -t rpc_pipefs $OCF_RESKEY_rpcpipefs_dir + sleep 1 +@@ -615,7 +615,9 @@ nfsserver_start () + prepare_directory + bind_tree + +- mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then ++ mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir ++ fi + + # remove the sm-notify pid so sm-notify will be allowed to run again without requiring a reboot. + rm -f /var/run/sm-notify.pid diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch new file mode 100644 index 0000000..12b7ad5 --- /dev/null +++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch @@ -0,0 +1,747 @@ +diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am +--- a/doc/man/Makefile.am 2018-06-27 13:22:31.576628598 +0200 ++++ b/doc/man/Makefile.am 2018-06-27 13:47:15.902753673 +0200 +@@ -75,6 +75,8 @@ + ocf_heartbeat_ManageRAID.7 \ + ocf_heartbeat_ManageVE.7 \ + ocf_heartbeat_NodeUtilization.7 \ ++ ocf_heartbeat_nova-compute-wait.7 \ ++ ocf_heartbeat_NovaEvacuate.7 \ + ocf_heartbeat_Pure-FTPd.7 \ + ocf_heartbeat_Raid1.7 \ + ocf_heartbeat_Route.7 \ +diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am +--- a/heartbeat/Makefile.am 2018-06-27 13:22:31.574628625 +0200 ++++ b/heartbeat/Makefile.am 2018-06-27 13:46:23.621453631 +0200 +@@ -29,6 +29,8 @@ + + ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat + ++ospdir = $(OCF_RA_DIR_PREFIX)/openstack ++ + dtddir = $(datadir)/$(PACKAGE_NAME) + dtd_DATA = ra-api-1.dtd metadata.rng + +@@ -50,6 +52,9 @@ + IPv6addr_LDADD = -lplumb $(LIBNETLIBS) + send_ua_LDADD = $(LIBNETLIBS) + ++osp_SCRIPTS = nova-compute-wait \ ++ NovaEvacuate ++ + ocf_SCRIPTS = AoEtarget \ + AudibleAlarm \ + ClusterMon \ +diff -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait +--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/nova-compute-wait 2018-06-27 13:27:15.166830889 +0200 +@@ -0,0 +1,317 @@ ++#!/bin/sh ++# Copyright 2015 Red Hat, Inc. ++# ++# Description: Manages compute daemons ++# ++# Authors: Andrew Beekhof ++# ++# Support: openstack@lists.openstack.org ++# License: Apache Software License (ASL) 2.0 ++# ++ ++ ++####################################################################### ++# Initialization: ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++OpenStack Nova Compute Server. ++ ++OpenStack Nova Compute Server ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++ ++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN ++ ++DNS domain ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++Deprecated - do not use anymore. ++Deprecated - do not use anymore ++ ++ ++ ++ ++ ++How long to wait for nova to finish evacuating instances elsewhere ++before starting nova-compute. Only used when the agent detects ++evacuations might be in progress. ++ ++You may need to increase the start timeout when increasing this value. ++ ++Delay to allow evacuations time to complete ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++nova_usage() { ++ cat </run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf ++[Service] ++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST} ++EOF ++} ++ ++nova_validate() { ++ rc=$OCF_SUCCESS ++ ++ check_binary crudini ++ check_binary nova-compute ++ check_binary fence_compute ++ ++ if [ ! -f /etc/nova/nova.conf ]; then ++ ocf_exit_reason "/etc/nova/nova.conf not found" ++ exit $OCF_ERR_CONFIGURED ++ fi ++ ++ # Is the state directory writable? ++ state_dir=$(dirname $statefile) ++ touch "$state_dir/$$" ++ if [ $? != 0 ]; then ++ ocf_exit_reason "Invalid state directory: $state_dir" ++ return $OCF_ERR_ARGS ++ fi ++ rm -f "$state_dir/$$" ++ ++ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null) ++ if [ $? = 1 ]; then ++ short_host=$(uname -n | awk -F. '{print $1}') ++ if [ "x${OCF_RESKEY_domain}" != x ]; then ++ NOVA_HOST=${short_host}.${OCF_RESKEY_domain} ++ else ++ NOVA_HOST=$(uname -n) ++ fi ++ fi ++ ++ if [ $rc != $OCF_SUCCESS ]; then ++ exit $rc ++ fi ++ return $rc ++} ++ ++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" ++ ++: ${OCF_RESKEY_evacuation_delay=120} ++case $__OCF_ACTION in ++meta-data) meta_data ++ exit $OCF_SUCCESS ++ ;; ++usage|help) nova_usage ++ exit $OCF_SUCCESS ++ ;; ++esac ++ ++case $__OCF_ACTION in ++start) nova_validate; nova_start;; ++stop) nova_stop;; ++monitor) nova_validate; nova_monitor;; ++notify) nova_notify;; ++validate-all) exit $OCF_SUCCESS;; ++*) nova_usage ++ exit $OCF_ERR_UNIMPLEMENTED ++ ;; ++esac ++rc=$? ++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" ++exit $rc ++ +diff -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate +--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100 ++++ b/heartbeat/NovaEvacuate 2018-06-27 13:27:18.835781756 +0200 +@@ -0,0 +1,388 @@ ++#!/bin/sh ++# ++# Copyright 2015 Red Hat, Inc. ++# ++# Description: Manages evacuation of nodes running nova-compute ++# ++# Authors: Andrew Beekhof ++# ++# Support: openstack@lists.openstack.org ++# License: Apache Software License (ASL) 2.0 ++# ++ ++ ++####################################################################### ++# Initialization: ++ ++### ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++### ++ ++: ${__OCF_ACTION=$1} ++ ++####################################################################### ++ ++meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged. ++ ++Evacuator for OpenStack Nova Compute Server ++ ++ ++ ++ ++ ++Authorization URL for connecting to keystone in admin context ++ ++Authorization URL ++ ++ ++ ++ ++ ++Username for connecting to keystone in admin context ++ ++Username ++ ++ ++ ++ ++Password for connecting to keystone in admin context ++ ++Password ++ ++ ++ ++ ++ ++Tenant name for connecting to keystone in admin context. ++Note that with Keystone V3 tenant names are only unique within a domain. ++ ++Keystone v2 Tenant or v3 Project Name ++ ++ ++ ++ ++ ++User's domain name. Used when authenticating to Keystone. ++ ++Keystone v3 User Domain ++ ++ ++ ++ ++ ++Domain name containing project. Used when authenticating to Keystone. ++ ++Keystone v3 Project Domain ++ ++ ++ ++ ++ ++Nova API location (internal, public or admin URL) ++ ++Nova API location (internal, public or admin URL) ++ ++ ++ ++ ++ ++Region name for connecting to nova. ++ ++Region name ++ ++ ++ ++ ++ ++Explicitly allow client to perform "insecure" TLS (https) requests. ++The server's certificate will not be verified against any certificate authorities. ++This option should be used with caution. ++ ++Allow insecure TLS requests ++ ++ ++ ++ ++ ++Disable shared storage recovery for instances. Use at your own risk! ++ ++Disable shared storage recovery for instances ++ ++ ++ ++ ++ ++Enable extra logging from the evacuation process ++ ++Enable debug logging ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++ ++# don't exit on TERM, to test that lrmd makes sure that we do exit ++trap sigterm_handler TERM ++sigterm_handler() { ++ ocf_log info "They use TERM to bring us down. No such luck." ++ return ++} ++ ++evacuate_usage() { ++ cat < 0: + filename = keyValues['--filename'][0] + else: +- print "A profile is needed! please use \'--filename\' and add the profile name." ++ print("A profile is needed! please use \'--filename\' and add the profile name.") + return filename + + def getInstanceCount(self,keyValues): +@@ -25,7 +25,7 @@ + if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: + count = keyValues['--instancecount'][0] + else: +- print "InstanceCount should be a positive number! The default value(1) will be used!" ++ print("InstanceCount should be a positive number! The default value(1) will be used!") + return int(count) + + def getSubOperations(self,cmd,operation): +@@ -65,8 +65,8 @@ + _newkeyValues["RegionId"] = newkeyValues["RegionId"] + self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest) + else: +- print "InstanceId is need!" +- except Exception,e: ++ print("InstanceId is need!") ++ except Exception as e: + print(e) + + def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False): +@@ -81,7 +81,7 @@ + response.display_response("error", result, "json") + else: + response.display_response(extraOperation, result, "json") +- except Exception,e: ++ except Exception as e: + print(e) + + +@@ -127,7 +127,7 @@ + ''' + if data.has_key('InstanceId') and len(data['InstanceId']) > 0: + instanceId = data['InstanceId'] +- except Exception,e: ++ except Exception as e: + pass + finally: + return instanceId +@@ -156,5 +156,5 @@ + if __name__ == "__main__": + handler = EcsImportHandler() + handler.getKVFromJson('ttt') +- print handler.getKVFromJson('ttt') ++ print(handler.getKVFromJson('ttt')) + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200 +@@ -77,8 +77,8 @@ + if not filename == None: + self.exportInstanceToFile(result,filename) + else: +- print 'Filename is needed' +- except Exception,e: ++ print('Filename is needed') ++ except Exception as e: + print(e) + + def exportInstanceToFile(self, result, filename): +@@ -96,9 +96,9 @@ + fp = open(fileName,'w') + try : + fp.write(json.dumps(result,indent=4)) +- print "success" ++ print("success") + except IOError: +- print "Error: can\'t find file or read data" ++ print("Error: can\'t find file or read data") + finally: + fp.close() + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200 +@@ -26,7 +26,7 @@ + count = keyValues[import_count][0] + else: + pass +- # print "InstanceCount should be a positive number! The default value(1) will be used!" ++ # print("InstanceCount should be a positive number! The default value(1) will be used!") + return int(count), "InstanceCount is "+str(count)+" created." + + def getSubOperations(self,cmd,operation): +@@ -46,7 +46,7 @@ + if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues): + newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()] + newkeyValues["ClientToken"] = [self.random_str()] +- # print newkeyValues.keys() ++ # print(newkeyValues.keys()) + # return + # self._setAttr(cmdInstance, newkeyValues) # set all key values in instance + # self.apiHandler.changeEndPoint(cmdInstance, newkeyValues) +@@ -58,7 +58,7 @@ + response.display_response("error", result, "json") + else: + response.display_response(item, result, "json") +- except Exception,e: ++ except Exception as e: + print(e) + + def getKVFromJson(self,filename): +@@ -77,7 +77,7 @@ + fp = open(fileName,'r') + data=json.loads(fp.read()) + keys = data.keys() +- # print keys, type(data['Items']['DBInstanceAttribute'][0]) ++ # print(keys, type(data['Items']['DBInstanceAttribute'][0])) + # instanceAttribute = data['Items']['DBInstanceAttribute'][0] + items = data['Items']['DBInstanceAttribute'][0] + keys = items.keys() +@@ -130,7 +130,7 @@ + if __name__ == "__main__": + handler = RdsImportDBInstanceHandler() + # handler.getKVFromJson('ttt') +- # print handler.getKVFromJson('ttt') +- print handler.random_str() ++ # print(handler.getKVFromJson('ttt')) ++ print(handler.random_str()) + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200 +@@ -24,9 +24,9 @@ + _value = keyValues[ProfileCmd.name][0] # use the first value + self.extensionCliHandler.setUserProfile(_value) + else: +- print "Do your forget profile name? please use \'--name\' and add the profile name." ++ print("Do your forget profile name? please use \'--name\' and add the profile name.") + else: +- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?" ++ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?") + + def addProfileCmd(self, cmd, keyValues): + userKey = '' +@@ -52,12 +52,12 @@ + finally: + f.close() + else: +- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?" ++ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?") + + + if __name__ == "__main__": + handler = ProfileHandler() + handler.handleProfileCmd("useprofile", {'--name':["profile444"]}) +- print handler.extensionCliHandler.getUserProfile() ++ print(handler.extensionCliHandler.getUserProfile()) + handler.addProfileCmd("addProfile", {}) +- handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) +\ No newline at end of file ++ handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200 +@@ -24,14 +24,14 @@ + self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler() + + def showUsage(self): +- print "usage: aliyuncli [options and parameters]" ++ print("usage: aliyuncli [options and parameters]") + + def showExample(self): +- print "show example" ++ print("show example") + + def showCmdError(self, cmd): + self.showUsage() +- print " the valid command as follows:\n" ++ print(" the valid command as follows:\n") + cmds = self.openApiDataHandler.getApiCmds() + self.printAsFormat(cmds) + +@@ -44,7 +44,7 @@ + error.printInFormat("Wrong version", "The sdk version is not exit.") + return None + self.showUsage() +- print "["+cmd+"]","valid operations as follows:\n" ++ print("["+cmd+"]","valid operations as follows:\n") + operations = self.openApiDataHandler.getApiOperations(cmd, version) + extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd) + operations.update(extensions) +@@ -56,8 +56,8 @@ + self.printAsFormat(operations) + + def showParameterError(self, cmd, operation, parameterlist): +- print 'usage: aliyuncli [options and parameters]' +- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n' ++ print('usage: aliyuncli [options and parameters]') ++ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n') + self.printAsFormat(parameterlist) + pass + +@@ -72,7 +72,7 @@ + tmpList.append(item) + count = count+1 + if len(tmpList) == 2: +- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10') ++ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')) + tmpList = list() + if len(tmpList) == 1 and count == len(mlist): +- print tmpList[0] +\ No newline at end of file ++ print(tmpList[0]) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200 +@@ -91,7 +91,7 @@ + keyValues["RegionId"] = [self.extensionHandler.getUserRegion()] + #check necessaryArgs as:accesskeyid accesskeysecret regionId + if not self.handler.hasNecessaryArgs(keyValues): +- print 'accesskeyid/accesskeysecret/regionId is absence' ++ print('accesskeyid/accesskeysecret/regionId is absence') + return + result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest) + if result is None: +@@ -102,7 +102,7 @@ + else: + response.display_response(operation, result, outPutFormat,keyValues) + else: +- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com' ++ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com') + elif self.handler.isAvailableExtensionOperation(cmd, operation): + if self.args.__len__() >= 3 and self.args[2] == 'help': + import commandConfigure +@@ -125,7 +125,7 @@ + def showInstanceAttribute(self, cmd, operation, classname): + if self.args.__len__() >= 3 and self.args[2] == "help": + self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname)) +- #print self.completer._help_to_show_instance_attribute(cmdInstance) ++ #print(self.completer._help_to_show_instance_attribute(cmdInstance)) + return True + return False + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200 +@@ -141,7 +141,7 @@ + _key = keyValues[keystr][0] + if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: + _secret = keyValues[secretstr][0] +- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret ++ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) + return _key, _secret + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200 +@@ -161,12 +161,12 @@ + + if __name__ == "__main__": + upgradeHandler = aliyunCliUpgradeHandler() +- # print upgradeHandler.getLatestTimeFromServer() ++ # print(upgradeHandler.getLatestTimeFromServer()) + # flag, url = upgradeHandler.isNewVersionReady() + # if flag: +- # print url ++ # print(url) + # else: +- # print "current version is latest one" +- # print "final test:" +- print upgradeHandler.checkForUpgrade() +- print upgradeHandler.handleUserChoice("N") ++ # print("current version is latest one") ++ # print("final test:") ++ print(upgradeHandler.checkForUpgrade()) ++ print(upgradeHandler.handleUserChoice("N")) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200 +@@ -127,35 +127,35 @@ + + # this api will show help page when user input aliyuncli help(-h or --help) + def showAliyunCliHelp(self): +- print color.bold+"ALIYUNCLI()"+color.end +- print color.bold+"\nNAME"+color.end +- print "\taliyuncli -" +- print color.bold+"\nDESCRIPTION"+color.end +- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. " +- print color.bold+"\nSYNOPSIS"+color.end +- print "\taliyuncli [options and parameters]" +- print "\n\taliyuncli has supported command completion now. The detail you can check our site." +- print color.bold+"OPTIONS"+color.end +- print color.bold+"\tconfigure"+color.end +- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)" +- print color.bold+"\n\t--output"+color.end+" (string)" +- print "\n\tThe formatting style for command output." +- print "\n\to json" +- print "\n\to text" +- print "\n\to table" ++ print(color.bold+"ALIYUNCLI()"+color.end) ++ print(color.bold+"\nNAME"+color.end) ++ print("\taliyuncli -") ++ print(color.bold+"\nDESCRIPTION"+color.end) ++ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ") ++ print(color.bold+"\nSYNOPSIS"+color.end) ++ print("\taliyuncli [options and parameters]") ++ print("\n\taliyuncli has supported command completion now. The detail you can check our site.") ++ print(color.bold+"OPTIONS"+color.end) ++ print(color.bold+"\tconfigure"+color.end) ++ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)") ++ print(color.bold+"\n\t--output"+color.end+" (string)") ++ print("\n\tThe formatting style for command output.") ++ print("\n\to json") ++ print("\n\to text") ++ print("\n\to table") + +- print color.bold+"\n\t--secure"+color.end +- print "\n\tMaking secure requests(HTTPS) to service" ++ print(color.bold+"\n\t--secure"+color.end) ++ print("\n\tMaking secure requests(HTTPS) to service") + +- print color.bold+"\nAVAILABLE SERVICES"+color.end +- print "\n\to ecs" +- print "\n\to ess" +- print "\n\to mts" +- print "\n\to rds" +- print "\n\to slb" ++ print(color.bold+"\nAVAILABLE SERVICES"+color.end) ++ print("\n\to ecs") ++ print("\n\to ess") ++ print("\n\to mts") ++ print("\n\to rds") ++ print("\n\to slb") + + def showCurrentVersion(self): +- print self._version ++ print(self._version) + + def findConfigureFilePath(self): + homePath = "" +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200 +@@ -39,9 +39,9 @@ + + + def oss_notice(): +- print "OSS operation in aliyuncli is not supported." +- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation." +- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n" ++ print("OSS operation in aliyuncli is not supported.") ++ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.") ++ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n") + + + try: +@@ -391,22 +391,22 @@ + return jsonobj + + except ImportError as e: +- print module, 'is not exist!' ++ print(module, 'is not exist!') + sys.exit(1) + + except ServerException as e: + error = cliError.error() + error.printInFormat(e.get_error_code(), e.get_error_msg()) +- print "Detail of Server Exception:\n" +- print str(e) ++ print("Detail of Server Exception:\n") ++ print(str(e)) + sys.exit(1) + + except ClientException as e: +- # print e.get_error_msg() ++ # print(e.get_error_msg()) + error = cliError.error() + error.printInFormat(e.get_error_code(), e.get_error_msg()) +- print "Detail of Client Exception:\n" +- print str(e) ++ print("Detail of Client Exception:\n") ++ print(str(e)) + sys.exit(1) + + def getSetFuncs(self,classname): +@@ -549,6 +549,6 @@ + + if __name__ == '__main__': + handler = aliyunOpenApiDataHandler() +- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance') +- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances') +- print "###############",handler.getExtensionOperationsFromCmd('ecs') ++ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')) ++ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances')) ++ print("###############",handler.getExtensionOperationsFromCmd('ecs')) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200 +@@ -44,7 +44,7 @@ + filename=self.fileName + self.writeCmdVersionToFile(cmd,version,filename) + else: +- print "A argument is needed! please use \'--version\' and add the sdk version." ++ print("A argument is needed! please use \'--version\' and add the sdk version.") + return + def showVersions(self,cmd,operation,stream=None): + configureVersion='(not configure)' +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200 +@@ -55,7 +55,7 @@ + # _mlist = self.rds.extensionOptions[self.rds.exportDBInstance] + self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance]) + if operation.lower() == self.rds.importDBInstance.lower(): +- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance]) ++ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance])) + # parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance]) + self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance]) + +@@ -89,8 +89,8 @@ + importInstance:['count','filename']} + + if __name__ == '__main__': +- # print type(rds.extensionOperations) +- # print type(rds.extensionOptions) +- # print rds.extensionOptions['ll'] ++ # print(type(rds.extensionOperations)) ++ # print(type(rds.extensionOptions)) ++ # print(rds.extensionOptions['ll']) + configure = commandConfigure() +- print configure.showExtensionOperationHelp("ecs", "ExportInstance") ++ print(configure.showExtensionOperationHelp("ecs", "ExportInstance")) +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200 +@@ -577,7 +577,7 @@ + operation = operations[i].strip() + self._getKeyFromSection(profilename,operation) + else: +- print 'The correct usage:aliyuncli configure get key --profile profilename' ++ print('The correct usage:aliyuncli configure get key --profile profilename') + return + + def _getKeyFromSection(self,profilename,key): +@@ -591,7 +591,7 @@ + elif key in _WRITE_TO_CONFIG_FILE : + self._getKeyFromFile(config_filename,sectionName,key) + else: +- print key,'=','None' ++ print(key,'=','None') + def _getKeyFromFile(self,filename,section,key): + if os.path.isfile(filename): + with open(filename, 'r') as f: +@@ -600,9 +600,9 @@ + start = self._configWriter.hasSectionName(section,contents)[1] + end = self._configWriter._getSectionEnd(start,contents) + value = self._configWriter._getValueInSlice(start,end,key,contents) +- print key,'=',value ++ print(key,'=',value) + else: +- print key,'=None' ++ print(key,'=None') + + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200 +@@ -2,7 +2,7 @@ + + def handleEndPoint(cmd,operation,keyValues): + if not hasNecessaryArgs(keyValues): +- print 'RegionId/EndPoint is absence' ++ print('RegionId/EndPoint is absence') + return + if cmd is not None: + cmd = cmd.capitalize() +@@ -25,7 +25,7 @@ + from aliyunsdkcore.profile.region_provider import modify_point + modify_point(cmd,regionId,endPoint) + except Exception as e: +- print e ++ print(e) + pass + + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200 +@@ -111,14 +111,14 @@ + if os.path.isfile(cfgfile): + ans = raw_input('File existed. Do you wish to overwrite it?(y/n)') + if ans.lower() != 'y': +- print 'Answer is No. Quit now' ++ print('Answer is No. Quit now') + return + with open(cfgfile, 'w+') as f: + config.write(f) +- print 'Your configuration is saved to %s.' % cfgfile ++ print('Your configuration is saved to %s.' % cfgfile) + + def cmd_help(args): +- print HELP ++ print(HELP) + + def add_config(parser): + parser.add_argument('--host', type=str, help='service host') +@@ -161,7 +161,7 @@ + return CMD_LIST.keys() + def handleOas(pars=None): + if pars is None: +- print HELP ++ print(HELP) + sys.exit(0) + parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter) + +diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py +--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100 ++++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200 +@@ -61,7 +61,7 @@ + data = f.read() + return data + except (OSError, IOError) as e: +- print e ++ print(e) + def _getParamFromUrl(prefix,value,mode): + + req = urllib2.Request(value) +@@ -74,7 +74,7 @@ + errorMsg='Get the wrong content' + errorClass.printInFormat(response.getcode(), errorMsg) + except Exception as e: +- print e ++ print(e) + + PrefixMap = {'file://': _getParamFromFile, + 'fileb://': _getParamFromFile +@@ -86,4 +86,4 @@ + 'fileb://': {'mode': 'rb'}, + #'http://': {}, + #'https://': {} +- } +\ No newline at end of file ++ } +diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py +--- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100 ++++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200 +@@ -16,10 +16,10 @@ + 3a4 + """ + colorama.init() +- print "aaa" +- print "aaa" +- print "aaa" +- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4" ++ print("aaa") ++ print("aaa") ++ print("aaa") ++ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4") + + + if __name__ == '__main__': +diff -uNr a/bundled/aliyun/pycryptodome/Doc/conf.py b/bundled/aliyun/pycryptodome/Doc/conf.py +--- a/bundled/aliyun/pycryptodome/Doc/conf.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/Doc/conf.py 2018-10-08 12:08:11.122188094 +0200 +@@ -15,7 +15,7 @@ + + # Modules to document with autodoc are in another directory + sys.path.insert(0, os.path.abspath('../lib')) +-print sys.path ++print(sys.path) + + # Mock existance of native modules + from Crypto.Util import _raw_api +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-10-08 12:08:11.123188075 +0200 +@@ -302,7 +302,7 @@ + randfunc = kwargs.pop("randfunc", None) + prime_filter = kwargs.pop("prime_filter", lambda x: True) + if kwargs: +- print "Unknown parameters:", kwargs.keys() ++ print("Unknown parameters:", kwargs.keys()) + + if exact_bits is None: + raise ValueError("Missing exact_bits parameter") +@@ -341,7 +341,7 @@ + exact_bits = kwargs.pop("exact_bits", None) + randfunc = kwargs.pop("randfunc", None) + if kwargs: +- print "Unknown parameters:", kwargs.keys() ++ print("Unknown parameters:", kwargs.keys()) + + if randfunc is None: + randfunc = Random.new().read +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-10-08 12:08:11.124188057 +0200 +@@ -912,4 +912,4 @@ + count = 30 + for x in xrange(count): + _ = point * d +- print (time.time() - start) / count * 1000, "ms" ++ print((time.time() - start) / count * 1000, "ms") +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-10-08 12:08:11.124188057 +0200 +@@ -1276,7 +1276,7 @@ + tests += make_block_tests(AES, "AESNI", test_data, {'use_aesni': True}) + tests += [ TestMultipleBlocks(True) ] + else: +- print "Skipping AESNI tests" ++ print("Skipping AESNI tests") + return tests + + if __name__ == '__main__': +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-10-08 12:08:11.125188038 +0200 +@@ -894,7 +894,7 @@ + if config.get('slow_tests'): + tests += list_test_cases(NISTTestVectorsGCM_no_clmul) + else: +- print "Skipping test of PCLMULDQD in AES GCM" ++ print("Skipping test of PCLMULDQD in AES GCM") + + return tests + +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-10-08 12:08:11.125188038 +0200 +@@ -39,7 +39,7 @@ + """Convert a text string with bytes in hex form to a byte string""" + clean = b(rws(t)) + if len(clean)%2 == 1: +- print clean ++ print(clean) + raise ValueError("Even number of characters expected") + return a2b_hex(clean) + +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-10-08 12:08:11.126188020 +0200 +@@ -25,11 +25,11 @@ + + slow_tests = not "--skip-slow-tests" in sys.argv + if not slow_tests: +- print "Skipping slow tests" ++ print("Skipping slow tests") + + wycheproof_warnings = "--wycheproof-warnings" in sys.argv + if wycheproof_warnings: +- print "Printing Wycheproof warnings" ++ print("Printing Wycheproof warnings") + + config = {'slow_tests' : slow_tests, 'wycheproof_warnings' : wycheproof_warnings } + SelfTest.run(stream=sys.stdout, verbosity=1, config=config) +diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py +--- a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-07-10 21:32:46.000000000 +0200 ++++ b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-10-08 12:08:11.126188020 +0200 +@@ -369,13 +369,13 @@ + ] + + for key, words in data: +- print 'Trying key', key ++ print('Trying key', key) + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: +- print 'key_to_english fails on key', repr(key), ', producing', str(w2) ++ print('key_to_english fails on key', repr(key), ', producing', str(w2)) + k2=english_to_key(words) + if k2!=key: +- print 'english_to_key fails on key', repr(key), ', producing', repr(k2) ++ print('english_to_key fails on key', repr(key), ', producing', repr(k2)) diff --git a/SOURCES/timeout-interval-add-s-suffix.patch b/SOURCES/timeout-interval-add-s-suffix.patch new file mode 100644 index 0000000..74f584d --- /dev/null +++ b/SOURCES/timeout-interval-add-s-suffix.patch @@ -0,0 +1,161 @@ +From 1c23bbf9700eda44d0d64f34bcb538d7b9e4f6f6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 4 Sep 2018 09:19:59 +0200 +Subject: [PATCH] timeout/interval: add "s" suffix where it's missing + +--- + .gitignore | 1 + + heartbeat/SAPInstance | 2 +- + heartbeat/aliyun-vpc-move-ip | 10 +++++----- + heartbeat/gcp-vpc-move-vip.in | 10 +++++----- + heartbeat/mariadb.in | 22 +++++++++++----------- + heartbeat/sybaseASE.in | 32 ++++++++++++++++---------------- + 6 files changed, 39 insertions(+), 38 deletions(-) + +diff --git a/.gitignore b/.gitignore +index bbff032c3..3a9be36e5 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -44,6 +44,7 @@ heartbeat/ocf-directories + heartbeat/ocf-shellfuncs + heartbeat/send_ua + heartbeat/shellfuncs ++heartbeat/*.pyc + include/agent_config.h + include/config.h + include/config.h.in +diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +index e27952adb..ed446c9c1 100755 +--- a/heartbeat/aliyun-vpc-move-ip ++++ b/heartbeat/aliyun-vpc-move-ip +@@ -155,11 +155,11 @@ Valid Aliyun CLI profile name + + + +- +- +- +- +- ++ ++ ++ ++ ++ + + + END +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index ba61193b6..31d84643a 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -77,11 +77,11 @@ METADATA = \ + + + +- +- +- +- +- ++ ++ ++ ++ ++ + + ''' + +diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in +index 860fea7fd..c1969d70e 100644 +--- a/heartbeat/mariadb.in ++++ b/heartbeat/mariadb.in +@@ -250,17 +250,17 @@ The port on which the Master MariaDB instance is listening. + + + +- +- +- +- +- +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + END +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index a4a0b7a0c..b4809ea23 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -26,19 +26,19 @@ + # /$sybase_home/$sybase_ase/install/RUN_$server_name + # + # (2) You can customize the interval value in the meta-data section if needed: +-# +-# ++# ++# + # + # +-# +-# ++# ++# + # + # +-# +-# ++# ++# + # +-# +-# ++# ++# + # The timeout value is not supported by Redhat in RHCS5.0. + # + +@@ -226,19 +226,19 @@ meta_data() + + + +- +- ++ ++ + + +- +- ++ ++ + + +- +- ++ ++ + +- +- ++ ++ + + + EOT diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec new file mode 100644 index 0000000..baa5133 --- /dev/null +++ b/SPECS/resource-agents.spec @@ -0,0 +1,2123 @@ +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. +# + +# Below is the script used to generate a new source file +# from the resource-agent upstream git repo. +# +# TAG=$(git log --pretty="format:%h" -n 1) +# distdir="ClusterLabs-resource-agents-${TAG}" +# TARFILE="${distdir}.tar.gz" +# rm -rf $TARFILE $distdir +# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE +# + +%global upstream_prefix ClusterLabs-resource-agents +%global upstream_version e711383f + +# bundles +%global bundled_lib_dir bundled +## google cloud +# google-cloud-sdk bundle +%global googlecloudsdk google-cloud-sdk +%global googlecloudsdk_version 241.0.0 +%global googlecloudsdk_dir %{bundled_lib_dir}/gcp/%{googlecloudsdk} +# python-httplib2 bundle +%global httplib2 httplib2 +%global httplib2_version 0.18.1 +%global httplib2_dir %{bundled_lib_dir}/gcp/%{httplib2} +# python-pyroute2 bundle +%global pyroute2 pyroute2 +%global pyroute2_version 0.4.13 +%global pyroute2_dir %{bundled_lib_dir}/gcp/%{pyroute2} +## alibaba cloud +# python-colorama bundle +%global colorama colorama +%global colorama_version 0.3.3 +%global colorama_dir %{bundled_lib_dir}/aliyun/%{colorama} +# python-pycryptodome bundle +%global pycryptodome pycryptodome +%global pycryptodome_version 3.6.4 +%global pycryptodome_dir %{bundled_lib_dir}/aliyun/%{pycryptodome} +# python-aliyun-sdk-core bundle +%global aliyunsdkcore aliyun-python-sdk-core +%global aliyunsdkcore_version 2.13.1 +%global aliyunsdkcore_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkcore} +# python-aliyun-sdk-ecs bundle +%global aliyunsdkecs aliyun-python-sdk-ecs +%global aliyunsdkecs_version 4.9.3 +%global aliyunsdkecs_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkecs} +# python-aliyun-sdk-vpc bundle +%global aliyunsdkvpc aliyun-python-sdk-vpc +%global aliyunsdkvpc_version 3.0.2 +%global aliyunsdkvpc_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc} +# aliyuncli bundle +%global aliyuncli aliyun-cli +%global aliyuncli_version 2.1.10 +%global aliyuncli_dir %{bundled_lib_dir}/aliyun/%{aliyuncli} + +# determine the ras-set to process based on configure invokation +%bcond_with rgmanager +%bcond_without linuxha + +Name: resource-agents +Summary: Open Source HA Reusable Cluster Resource Scripts +Version: 4.1.1 +Release: 94%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +License: GPLv2+ and LGPLv2+ +URL: https://github.com/ClusterLabs/resource-agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Source0: %{upstream_prefix}-%{upstream_version}.tar.gz +Source1: %{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz +Source2: %{httplib2}-%{httplib2_version}.tar.gz +Source3: %{pyroute2}-%{pyroute2_version}.tar.gz +Source4: %{colorama}-%{colorama_version}.tar.gz +Source5: %{pycryptodome}-%{pycryptodome_version}.tar.gz +Source6: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz +Source7: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz +Source8: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz +Source9: %{aliyuncli}-%{aliyuncli_version}.tar.gz +Patch0: nova-compute-wait-NovaEvacuate.patch +Patch1: LVM-volume_group_check_only.patch +Patch2: bz1552330-vdo-vol.patch +Patch3: IPaddr2-monitor_retries.patch +Patch4: VirtualDomain-stateless-support.patch +Patch5: 1-configure-add-python-path-detection.patch +Patch6: 2-ci-skip-python-agents-in-shellcheck.patch +Patch7: 3-gcp-vpc-move-vip.patch +Patch8: 4-gcp-vpc-move-route.patch +Patch9: 5-python-library.patch +Patch10: dont-use-ocf_attribute_target-for-metadata.patch +Patch11: LVM-activate-fix-issue-with-dashes.patch +Patch12: 6-gcp-move-vip-filter-aggregatedlist.patch +Patch13: aliyun-vpc-move-ip-1.patch +Patch14: aliyun-vpc-move-ip-2-fixes.patch +Patch15: aliyun-vpc-move-ip-3-fix-manpage.patch +Patch16: build-add-missing-manpages.patch +Patch17: findif-only-match-lines-with-netmasks.patch +Patch18: 7-gcp-stackdriver-logging-note.patch +Patch19: LVM-fix-missing-dash.patch +Patch20: lvmlockd-add-cmirrord-support.patch +Patch21: LVM-activate-1-warn-vg_access_mode.patch +Patch22: bz1607607-podman.patch +Patch23: aliyun-vpc-move-ip-5-improve-metadata-manpage.patch +Patch24: aws-vpc-move-ip-1-avoid-false-positive-monitor.patch +Patch25: aws-vpc-move-ip-2-avoid-false-positive-monitor.patch +Patch26: LVM-activate-2-parameters-access-mode-fixes.patch +Patch27: timeout-interval-add-s-suffix.patch +Patch28: metadata-add-missing-s-suffix.patch +Patch29: bz1631291-systemd-tmpfiles-configurable-path.patch +Patch30: nfsserver-mount-rpc_pipefs.patch +Patch31: bz1635785-redis-pidof-basename.patch +Patch32: bz1642027-nfsserver-var-lib-nfs-fix.patch +Patch33: bz1662466-vdo-vol-fix-monitor-action.patch +Patch34: bz1643307-LVM-activate-dont-fail-initial-probe.patch +Patch35: bz1658664-LVM-activate-dont-require-locking_type.patch +Patch36: bz1689184-Squid-1-fix-pidfile-issue.patch +Patch37: bz1667414-1-LVM-activate-support-LVs-from-same-VG.patch +Patch38: bz1667414-2-LVM-activate-only-count-volumes.patch +Patch39: bz1666691-tomcat-use-systemd-when-catalina.sh-unavailable.patch +Patch40: bz1693662-aws-vpc-move-ip-avoid-possible-race-condition.patch +Patch41: bz1695656-gcp-vpc-move-route-vip-fix-python3-encoding.patch +Patch42: bz1697559-aws-vpc-move-ip-1-multi-route-table-support.patch +Patch43: bz1697559-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch +Patch44: bz1669140-Route-make-family-parameter-optional.patch +Patch45: bz1683548-redis-mute-password-warning.patch +Patch46: bz1692413-1-iSCSITarget-create-iqn-when-it-doesnt-exist.patch +Patch47: bz1689184-Squid-2-dont-run-pgrep-without-PID.patch +Patch48: bz1707969-1-ocf_log-do-not-log-debug-when-HA_debug-unset.patch +Patch49: bz1707969-2-ocf_is_true-add-True-to-regexp.patch +Patch50: bz1717759-Filesystem-remove-notify-action-from-metadata.patch +Patch51: bz1719684-dhcpd-keep-SELinux-context-chroot.patch +Patch52: bz1718219-podman-1-avoid-double-inspect-call.patch +Patch53: bz1718219-podman-2-improve-monitor-action.patch +Patch54: bz1718219-podman-3-remove-docker-remnant.patch +Patch55: bz1718219-podman-4-use-exec-to-avoid-performance-issues.patch +Patch56: bz1730455-LVM-activate-fix-monitor-hang.patch +Patch57: bz1732867-CTDB-1-explicitly-use-bash-shell.patch +Patch58: bz1732867-CTDB-2-add-ctdb_max_open_files-parameter.patch +Patch59: bz1732867-CTDB-3-fixes.patch +Patch60: bz1732867-CTDB-4-add-v4.9-support.patch +Patch61: bz1692413-2-iSCSILogicalUnit-create-acls-fix.patch +Patch62: bz1736746-podman-drop-in-support.patch +Patch63: bz1692960-mysql-galera-runuser-su-to-avoid-dac_override.patch +Patch64: bz1745713-rabbitmq-cluster-1-monitor-mnesia-status.patch +Patch65: bz1745713-rabbitmq-cluster-2-fail-when-in-minority-partition.patch +Patch66: bz1745713-rabbitmq-cluster-3-fix-stop-regression.patch +Patch67: bz1745713-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch +Patch68: bz1745713-rabbitmq-cluster-5-ensure-node-attribures-removed.patch +Patch69: bz1745713-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch +Patch70: bz1745713-rabbitmq-cluster-7-suppress-additional-output.patch +Patch71: bz1695039-LVM-activate-return-NOT_RUNNING-rejoin-cluster.patch +Patch72: bz1738428-LVM-activate-detect-volume-without-reboot.patch +Patch73: bz1744103-Filesystem-1-monitor-symlink-support.patch +Patch74: bz1744103-Filesystem-2-add-symlink-support.patch +Patch75: bz1744103-Filesystem-3-fix-umount-disk-failure.patch +Patch76: bz1744103-Filesystem-4-fix-readlink-issue.patch +Patch77: bz1744140-Filesystem-1-avoid-corrupt-mount-list.patch +Patch78: bz1744140-Filesystem-2-prevent-killing-bind-mount.patch +Patch79: bz1744140-Filesystem-3-improved-bind-mount-check.patch +Patch80: bz1757837-IPsrcaddr-fix-regression-without-NetworkManager.patch +Patch81: bz1744224-IPsrcaddr-1-add-destination-and-table-parameters.patch +Patch82: bz1748768-docker-fix-stop-issues.patch +Patch83: bz1750261-Route-1-dont-fence-when-parameters-not-set.patch +Patch84: bz1750352-rabbitmq-cluster-restore-users-single-node-mode.patch +Patch85: bz1751700-IPaddr2-1-sanitize-IPv6-IPs.patch +Patch86: bz1751700-IPaddr2-2-return-empty-when-sanitation-fails.patch +Patch87: bz1751962-nfsserver-1-systemd-perf-improvements.patch +Patch88: bz1751962-nfsserver-2-systemd-use-no-legend.patch +Patch89: bz1755760-NovaEvacuate-evacuate_delay.patch +Patch90: bz1750261-Route-2-validate-start-validate-all.patch +Patch91: bz1741843-LVM-activate-partial-activation.patch +Patch92: bz1764888-exportfs-allow-same-fsid.patch +Patch93: bz1765128-mysql-galera-fix-incorrect-rc.patch +Patch94: bz1741042-IPaddr2-add-noprefixroute-parameter.patch +Patch95: bz1744224-IPsrcaddr-2-local-rule-destination-fixes.patch +Patch96: bz1788889-podman-improve-image-exist-check.patch +Patch97: bz1744224-IPsrcaddr-3-fix-probe-issues.patch +Patch98: bz1767916-IPaddr2-clusterip-not-supported.patch +Patch99: bz1777381-Filesystem-1-refresh-UUID.patch +Patch100: bz1777381-Filesystem-2-udev-settle.patch +Patch101: bz1744224-IPsrcaddr-4-fix-hardcoded-device.patch +Patch102: bz1792196-rabbitmq-cluster-delete-nodename-when-stop-fails.patch +Patch103: bz1808468-1-lvmlockd-fix-conditionals.patch +Patch104: bz1808468-2-remove-locking_type.patch +Patch105: bz1759115-aws-vpc-route53-1-update.patch +Patch106: bz1804658-azure-lb-1-remove-status-metadata.patch +Patch107: bz1804658-azure-lb-2-add-socat-support.patch +Patch108: bz1810466-aws-vpc-move-ip-1-add-routing_table_role.patch +Patch109: bz1810466-aws-vpc-move-ip-2-update-metadata.patch +Patch110: bz1792237-redis-1-fix-validate-all.patch +Patch111: bz1792237-redis-2-run-validate-during-start.patch +Patch112: bz1817432-use-safe-temp-file-location.patch +Patch113: bz1817598-ocf_is_clone-1-fix-clone-max-can-be-0.patch +Patch114: bz1817598-ocf_is_clone-2-update-comment.patch +Patch115: bz1819021-aws-vpc-move-ip-delete-remaining-route-entries.patch +Patch116: bz1759115-aws-vpc-route53-2-add-public-and-secondary-ip-support.patch +Patch117: bz1633251-gcp-pd-move-1.patch +Patch118: bz1633251-gcp-pd-move-2-use-OCF_FUNCTIONS_DIR.patch +Patch119: bz1633251-gcp-pd-move-3-add-stackdriver_logging-to-metadata.patch +Patch120: bz1819965-1-ocf.py-update.patch +Patch121: bz1819965-2-azure-events.patch +Patch122: bz1759115-aws-vpc-route53-3-awscli-property.patch +Patch123: bz1744190-pgsql-1-set-primary-standby-initial-score.patch +Patch124: bz1744190-pgsql-2-improve-start-checks.patch +Patch125: bz1820523-exportfs-1-add-symlink-support.patch +Patch126: bz1832321-rabbitmq-cluster-increase-wait-timeout.patch +Patch127: bz1818997-nfsserver-1-fix-nfsv4-only-support.patch +Patch128: bz1830716-NovaEvacuate-suppress-expected-error.patch +Patch129: bz1836945-db2-hadr-promote-standby-node.patch +Patch130: bz1633251-gcp-pd-move-4-fixes-and-improvements.patch +Patch131: bz1633251-gcp-pd-move-5-bundle.patch +Patch132: bz1839721-podman-force-rm-container-if-rm-fails.patch +Patch133: bz1820523-exportfs-2-fix-monitor-action.patch +Patch134: bz1843999-aliyun-vpc-move-ip-log-output-when-failing.patch +Patch135: bz1845574-azure-events-1-handle-exceptions-in-urlopen.patch +Patch136: bz1845581-nfsserver-dont-log-error-message-file-doesnt-exist.patch +Patch137: bz1845583-exportfs-1-describe-clientspec-format-in-metadata.patch +Patch138: bz1845583-exportfs-2-fix-typo.patch +Patch139: bz1814896-Filesystem-fast_stop-default-to-no-for-GFS2.patch +Patch140: bz1836186-pgsql-support-Pacemaker-v2.03-output.patch +Patch141: bz1819965-3-azure-events-decode-when-type-not-str.patch +Patch142: bz1818997-nfsserver-2-stop-nfsdcld-if-present.patch +Patch143: bz1818997-3-nfsserver-nfsnotify-fix-selinux-label-issue.patch +Patch144: bz1845574-azure-events-2-import-urlerror-encode-postdata.patch +Patch145: bz1846733-gcp-vpc-move-vip-1-support-multiple-alias-ips.patch +Patch146: bz1846733-gcp-vpc-move-vip-2-fix-list-sort.patch +Patch147: bz1850778-azure-lb-fix-redirect-issue.patch +Patch148: bz1640587-pgsql-ignore-masters-re-promote.patch +Patch149: bz1795535-pgsql-1-add-postgresql-12-support.patch +Patch150: bz1795535-pgsql-2-fix-uppercase-hostname-support.patch +Patch151: bz1858752-Filesystem-support-whitespace-device-dir.patch +Patch152: bz1872999-aws-vpc-move-ip-add-region-parameter.patch +Patch153: bz1881114-galera-recover-joining-non-existing-cluster.patch +Patch154: bz1815013-redis-parse-password-correctly-based-on-version.patch +Patch155: bz1763249-manpages-fix-pcs-syntax.patch +Patch156: bz1890068-gcp-pd-move-fix-partially-matched-disk_name.patch +Patch157: bz1848025-sybaseASE-run-verify-for-start-action-only.patch +Patch158: bz1861001-sybaseASE-add-logfile-parameter.patch +Patch159: bz1891835-galera-set-bootstrap-attribute-before-promote.patch +Patch160: bz1891855-galera-recover-2-node-cluster.patch +Patch161: bz1471182-crypt-1-new-ra.patch +Patch162: bz1471182-crypt-2-fix-bashism.patch +Patch163: bz1471182-crypt-3-fix-missing-and.patch +Patch164: bz1895811-aws-vpc-move-ip-dont-warn-for-expected-scenarios.patch +Patch165: bz1897570-aws-add-imdsv2-support.patch +Patch166: bz1886262-podman-recover-from-killed-conmon.patch +Patch167: bz1900015-podman-recover-from-storage-out-of-sync.patch +Patch168: bz1898690-crypt-make-key_file-crypt_type_not-unique.patch +Patch169: bz1899551-NovaEvacuate-fix-delay_evacuate-unset.patch +Patch170: bz1901357-crypt-1-support-symlink-devices.patch +Patch171: bz1902208-LVM-activate-stop-before-storage-service.patch +Patch172: bz1901357-crypt-2-dont-sanity-check-during-probe.patch +Patch173: bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch +Patch174: bz1913932-1-gcp-vpc-move-add-project-parameter.patch +Patch175: bz1913932-2-gcp-vpc-move-route-fixes.patch +Patch176: bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch +Patch177: bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch +Patch178: bz1940363-1-galera-redis-use-output-as.patch +Patch179: bz1940363-2-bundle-disable-validate-with.patch +Patch180: bz1891883-ethmonitor-vlan-fix.patch +Patch181: bz1902045-iface-vlan-vlan-not-unique.patch +Patch182: bz1924363-nfsserver-error-check-unmount.patch +Patch183: bz1932863-VirtualDomain-fix-pid-status.patch +Patch184: bz1920698-podman-return-not-running-probe.patch +Patch185: bz1939992-awsvip-dont-partially-match-IPs.patch +Patch186: bz1940094-aws-agents-dont-spam-logs.patch +Patch187: bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch +Patch188: bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch +Patch189: bz1872754-pgsqlms-new-ra.patch +Patch190: bz1957765-gcp-vpc-move-vip-retry.patch + +# bundle patches +Patch1000: 7-gcp-bundled.patch +Patch1001: 8-google-cloud-sdk-fixes.patch +Patch1002: 9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch +Patch1003: 10-gcloud-support-info.patch +Patch1004: bz1691456-gcloud-dont-detect-python2.patch +Patch1005: aliyun-vpc-move-ip-4-bundled.patch +Patch1006: python3-syntax-fixes.patch +Patch1007: aliyuncli-python3-fixes.patch +Patch1008: bz1935422-python-pygments-fix-CVE-2021-20270.patch +Patch1009: bz1943464-python-pygments-fix-CVE-2021-27291.patch + +Obsoletes: heartbeat-resources <= %{version} +Provides: heartbeat-resources = %{version} + +# Build dependencies +BuildRequires: automake autoconf gcc +BuildRequires: perl-interpreter python3-devel +BuildRequires: libxslt glib2-devel +BuildRequires: systemd +BuildRequires: which + +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +#BuildRequires: cluster-glue-libs-devel +BuildRequires: docbook-style-xsl docbook-dtds +%if 0%{?rhel} == 0 +BuildRequires: libnet-devel +%endif +%endif + +## Runtime deps +# system tools shared by several agents +Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk +Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat +Requires: /usr/sbin/fuser /bin/mount + +# Filesystem / fs.sh / netfs.sh +Requires: /sbin/fsck +Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4 +Requires: /usr/sbin/fsck.xfs +Requires: /sbin/mount.nfs /sbin/mount.nfs4 +%if 0%{?fedora} < 33 || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version} +%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8) +Requires: /usr/sbin/mount.cifs +%else +Recommends: /usr/sbin/mount.cifs +%endif +%endif + +# IPaddr2 +Requires: /sbin/ip + +# LVM / lvm.sh +Requires: /usr/sbin/lvm + +# nfsserver / netfs.sh +Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd + +# ocf.py +Requires: python3 + +# rgmanager +%if %{with rgmanager} +# ip.sh +Requires: /usr/sbin/ethtool +Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 + +# nfsexport.sh +Requires: /sbin/findfs +Requires: /sbin/quotaon /sbin/quotacheck +%endif + +%description +A set of scripts to interface with several services to operate in a +High Availability environment for both Pacemaker and rgmanager +service managers. + +%ifarch x86_64 +%package aliyun +License: GPLv2+ and LGPLv2+ and ASL 2.0 and BSD and MIT +Summary: Alibaba Cloud (Aliyun) resource agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Requires: %{name} = %{version}-%{release} +Requires: python3-jmespath >= 0.9.0 +Requires: python3-urllib3 +# python-colorama bundle +Provides: bundled(python-%{colorama}) = %{colorama_version} +# python-pycryptodome bundle +Provides: bundled(python-%{pycryptodome}) = %{pycryptodome_version} +# python-aliyun-sdk-core bundle +Provides: bundled(python-aliyun-sdk-core) = %{aliyunsdkcore_version} +# python-aliyun-sdk-ecs bundle +Provides: bundled(python-aliyun-sdk-ecs) = %{aliyunsdkecs_version} +# python-aliyun-sdk-vpc bundle +Provides: bundled(python-aliyun-sdk-vpc) = %{aliyunsdkvpc_version} +# aliyuncli bundle +Provides: bundled(aliyuncli) = %{aliyuncli_version} + +%description aliyun +Alibaba Cloud (Aliyun) resource agents allows Alibaba Cloud +(Aliyun) instances to be managed in a cluster environment. +%endif + +%ifarch x86_64 +%package gcp +License: GPLv2+ and LGPLv2+ and BSD and ASL 2.0 and MIT and Python +Summary: Google Cloud Platform resource agents +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Requires: %{name} = %{version}-%{release} +Requires: python3-google-api-client +# google-cloud-sdk bundle +Requires: python3-cryptography >= 1.7.2 +Requires: python3-dateutil >= 2.6.0 +Provides: bundled(%{googlecloudsdk}) = %{googlecloudsdk_version} +Provides: bundled(python-antlr3) = 3.1.1 +Provides: bundled(python-appdirs) = 1.4.0 +Provides: bundled(python-argparse) = 1.2.1 +Provides: bundled(python-chardet) = 2.3.0 +Provides: bundled(python-dulwich) = 0.10.2 +Provides: bundled(python-ipaddress) = 1.0.16 +Provides: bundled(python-ipaddr) = 2.1.11 +Provides: bundled(python-mako) = 1.0.7 +Provides: bundled(python-oauth2client) = 3.0.0 +Provides: bundled(python-prompt_toolkit) = 1.0.13 +Provides: bundled(python-pyasn1) = 0.4.2 +Provides: bundled(python-pyasn1_modules) = 0.2.1 +Provides: bundled(python-pygments) = 2.2.0 +Provides: bundled(python-pyparsing) = 2.1.10 +Provides: bundled(python-requests) = 2.10.0 +Provides: bundled(python-six) = 1.11.0 +Provides: bundled(python-uritemplate) = 3.0.0 +Provides: bundled(python-urllib3) = 1.15.1 +Provides: bundled(python-websocket) = 0.47.0 +Provides: bundled(python-yaml) = 3.12 +# python-pyroute2 bundle +Provides: bundled(%{pyroute2}) = %{pyroute2_version} + +%description gcp +The Google Cloud Platform resource agents allows Google Cloud +Platform instances to be managed in a cluster environment. +%endif + +%package paf +License: PostgreSQL +Summary: PostgreSQL Automatic Failover (PAF) resource agent +%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} +Group: System Environment/Base +%else +Group: Productivity/Clustering/HA +%endif +Requires: %{name} = %{version}-%{release} +Requires: perl-interpreter + +%description paf +PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL +databases to be managed in a cluster environment. + +%prep +%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0 +%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} +exit 1 +%endif +%setup -q -n %{upstream_prefix}-%{upstream_version} +%patch0 -p1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 +%patch6 -p1 +%patch7 -p1 +%patch8 -p1 +%patch9 -p1 +%patch10 -p1 +%patch11 -p1 +%patch12 -p1 +%patch13 -p1 +%patch14 -p1 +%patch15 -p1 +%patch16 -p1 +%patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 +%patch29 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch34 -p1 +%patch35 -p1 +%patch36 -p1 +%patch37 -p1 +%patch38 -p1 +%patch39 -p1 +%patch40 -p1 -F2 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch45 -p1 +%patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 +%patch52 -p1 +%patch53 -p1 +%patch54 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 -F1 +%patch61 -p1 +%patch62 -p1 -F2 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 +%patch66 -p1 +%patch67 -p1 +%patch68 -p1 +%patch69 -p1 +%patch70 -p1 +%patch71 -p1 +%patch72 -p1 +%patch73 -p1 +%patch74 -p1 +%patch75 -p1 +%patch76 -p1 +%patch77 -p1 +%patch78 -p1 +%patch79 -p1 +%patch80 -p1 +%patch81 -p1 +%patch82 -p1 +%patch83 -p1 +%patch84 -p1 +%patch85 -p1 +%patch86 -p1 +%patch87 -p1 +%patch88 -p1 +%patch89 -p1 +%patch90 -p1 +%patch91 -p1 +%patch92 -p1 +%patch93 -p1 +%patch94 -p1 -F2 +%patch95 -p1 +%patch96 -p1 +%patch97 -p1 +%patch98 -p1 +%patch99 -p1 +%patch100 -p1 +%patch101 -p1 +%patch102 -p1 +%patch103 -p1 +%patch104 -p1 +%patch105 -p1 +%patch106 -p1 +%patch107 -p1 +%patch108 -p1 +%patch109 -p1 +%patch110 -p1 +%patch111 -p1 +%patch112 -p1 +%patch113 -p1 +%patch114 -p1 +%patch115 -p1 +%patch116 -p1 +%patch117 -p1 +%patch118 -p1 +%patch119 -p1 +%patch120 -p1 +%patch121 -p1 +%patch122 -p1 +%patch123 -p1 +%patch124 -p1 +%patch125 -p1 +%patch126 -p1 +%patch127 -p1 +%patch128 -p1 -F2 +%patch129 -p1 +%patch130 -p1 +%patch131 -p1 +%patch132 -p1 +%patch133 -p1 +%patch134 -p1 +%patch135 -p1 +%patch136 -p1 +%patch137 -p1 +%patch138 -p1 +%patch139 -p1 +%patch140 -p1 +%patch141 -p1 +%patch142 -p1 +%patch143 -p1 +%patch144 -p1 +%patch145 -p1 +%patch146 -p1 +%patch147 -p1 +%patch148 -p1 +%patch149 -p1 +%patch150 -p1 +%patch151 -p1 -F1 +%patch152 -p1 +%patch153 -p1 +%patch154 -p1 -F1 +%patch155 -p1 +%patch156 -p1 +%patch157 -p1 +%patch158 -p1 +%patch159 -p1 +%patch160 -p1 +%patch161 -p1 +%patch162 -p1 +%patch163 -p1 +%patch164 -p1 +%patch165 -p1 +%patch166 -p1 +%patch167 -p1 +%patch168 -p1 +%patch169 -p1 -F2 +%patch170 -p1 +%patch171 -p1 +%patch172 -p1 +%patch173 -p1 +%patch174 -p1 +%patch175 -p1 +%patch176 -p1 +%patch177 -p1 +%patch178 -p1 +%patch179 -p1 +%patch180 -p1 +%patch181 -p1 +%patch182 -p1 +%patch183 -p1 +%patch184 -p1 +%patch185 -p1 +%patch186 -p1 +%patch187 -p1 -F2 +%patch188 -p1 +%patch189 -p1 +%patch190 -p1 + +chmod 755 heartbeat/nova-compute-wait +chmod 755 heartbeat/NovaEvacuate +chmod 755 heartbeat/pgsqlms + +# bundles +mkdir -p %{bundled_lib_dir}/gcp +mkdir -p %{bundled_lib_dir}/aliyun + +# google-cloud-sdk bundle +%ifarch x86_64 +tar -xzf %SOURCE1 -C %{bundled_lib_dir}/gcp +## upgrade httplib2 to fix CVE-2020-11078 +pushd %{googlecloudsdk_dir} +rm -rf lib/third_party/httplib2 +popd + +# python-httplib2 bundle +tar -xzf %SOURCE2 -C %{bundled_lib_dir} +mv %{bundled_lib_dir}/%{httplib2}-%{httplib2_version} %{httplib2_dir} + +# gcp*: append bundled-directory to search path, gcloud-ra +%patch1000 -p1 +# google-cloud-sdk fixes +%patch1001 -p1 +# replace python-rsa with python-cryptography +%patch1002 -p1 +# gcloud support info +%patch1003 -p1 +# gcloud remove python 2 detection +%patch1004 -p1 +# rename gcloud +mv %{googlecloudsdk_dir}/bin/gcloud %{googlecloudsdk_dir}/bin/gcloud-ra +# keep googleapiclient +mv %{googlecloudsdk_dir}/platform/bq/third_party/googleapiclient %{googlecloudsdk_dir}/lib/third_party +# only keep gcloud +rm -rf %{googlecloudsdk_dir}/bin/{bootstrapping,bq,dev_appserver.py,docker-credential-gcloud,endpointscfg.py,git-credential-gcloud.sh,gsutil,java_dev_appserver.sh} %{googlecloudsdk_dir}/{completion.*,deb,install.*,path.*,platform,properties,RELEASE_NOTES,rpm,VERSION} +# remove Python 2 code +rm -rf %{googlecloudsdk_dir}/lib/third_party/*/python2 +# remove python-rsa +rm -rf %{googlecloudsdk_dir}/lib/third_party/rsa +# remove grpc +rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc +# remove dateutil +rm -rf %{googlecloudsdk_dir}/lib/third_party/dateutil +# docs/licenses +cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README +cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt +cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE +cp %{httplib2_dir}/LICENSE %{googlecloudsdk}_httplib2_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyu2f/LICENSE %{googlecloudsdk}_pyu2f_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/LICENSE %{googlecloudsdk}_ml_sdk_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/pkg/LICENSE %{googlecloudsdk}_pkg_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ipaddr/LICENSE %{googlecloudsdk}_ipaddr_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/urllib3/LICENSE %{googlecloudsdk}_urllib3_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ipaddress/LICENSE %{googlecloudsdk}_ipaddress_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/requests/LICENSE %{googlecloudsdk}_requests_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/docker/LICENSE %{googlecloudsdk}_docker_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/monotonic/LICENSE %{googlecloudsdk}_monotonic_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/websocket/LICENSE %{googlecloudsdk}_websocket_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/fasteners/LICENSE %{googlecloudsdk}_fasteners_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/wcwidth/LICENSE %{googlecloudsdk}_wcwidth_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pygments/LICENSE %{googlecloudsdk}_pygments_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/oauth2client/LICENSE %{googlecloudsdk}_oauth2client_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/uritemplate/LICENSE %{googlecloudsdk}_uritemplate_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/dulwich/LICENSE %{googlecloudsdk}_dulwich_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/mako/LICENSE %{googlecloudsdk}_mako_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/packaging/LICENSE %{googlecloudsdk}_packaging_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/socks/LICENSE %{googlecloudsdk}_socks_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/antlr3/LICENSE %{googlecloudsdk}_antlr3_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/argparse/LICENSE.txt %{googlecloudsdk}_argparse_LICENSE.txt +cp %{googlecloudsdk_dir}/lib/third_party/chardet/LICENSE %{googlecloudsdk}_chardet_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/ruamel/LICENSE %{googlecloudsdk}_ruamel_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/appdirs/LICENSE %{googlecloudsdk}_appdirs_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/argcomplete/LICENSE %{googlecloudsdk}_argcomplete_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyasn1_modules/LICENSE %{googlecloudsdk}_pyasn1_modules_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/setuptools/LICENSE %{googlecloudsdk}_setuptools_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/google/LICENSE %{googlecloudsdk}_google_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/google/protobuf/LICENSE %{googlecloudsdk}_protobuf_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/six/LICENSE %{googlecloudsdk}_six_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/dns/LICENSE %{googlecloudsdk}_dns_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/enum/LICENSE %{googlecloudsdk}_enum_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/gae_ext_runtime/LICENSE %{googlecloudsdk}_gae_ext_runtime_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/fancy_urllib/LICENSE %{googlecloudsdk}_fancy_urllib_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/pyasn1/LICENSE %{googlecloudsdk}_pyasn1_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/apitools/LICENSE %{googlecloudsdk}_apitools_LICENSE +cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloudsdk}_containerregistry_LICENSE + +# python-pyroute2 bundle +tar -xzf %SOURCE3 -C %{bundled_lib_dir}/gcp +mv %{bundled_lib_dir}/gcp/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir} +cp %{pyroute2_dir}/README.md %{pyroute2}_README.md +cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md +cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2 +cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2 + +# python-colorama bundle +tar -xzf %SOURCE4 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{colorama}-%{colorama_version} %{colorama_dir} +cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt +cp %{colorama_dir}/README.rst %{colorama}_README.rst + +pushd %{colorama_dir} +# remove bundled egg-info +rm -rf *.egg-info +popd + +# python-pycryptodome bundle +tar -xzf %SOURCE5 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir} +cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst +cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst + +# python-aliyun-sdk-core bundle +tar -xzf %SOURCE6 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir} +cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst + +# python-aliyun-sdk-ecs bundle +tar -xzf %SOURCE7 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir} +cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst + +# python-aliyun-sdk-vpc bundle +tar -xzf %SOURCE8 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir} +cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst + +# aliyuncli bundle +tar -xzf %SOURCE9 -C %{bundled_lib_dir}/aliyun +mv %{bundled_lib_dir}/aliyun/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir} +cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst +cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE +# aliyun*: use bundled libraries +%patch1005 -p1 + +# aliyun Python 3 fixes +%patch1006 -p1 +%patch1007 -p1 + +# fix CVE's in python-pygments +pushd %{googlecloudsdk_dir}/lib/third_party +%patch1008 -p1 -F2 +%patch1009 -p1 -F2 +popd +%endif + +%build +if [ ! -f configure ]; then + ./autogen.sh +fi + +%if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5 +CFLAGS="$(echo '%{optflags}')" +%global conf_opt_fatal "--enable-fatal-warnings=no" +%else +CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" +%global conf_opt_fatal "--enable-fatal-warnings=yes" +%endif + +%if %{with rgmanager} +%global rasset rgmanager +%endif +%if %{with linuxha} +%global rasset linux-ha +%endif +%if %{with rgmanager} && %{with linuxha} +%global rasset all +%endif + +export CFLAGS + +%configure BASH_SHELL="/bin/bash" \ + PYTHON="%{__python3}" \ + %{conf_opt_fatal} \ +%if %{defined _unitdir} + --with-systemdsystemunitdir=%{_unitdir} \ +%endif +%if %{defined _tmpfilesdir} + --with-systemdtmpfilesdir=%{_tmpfilesdir} \ + --with-rsctmpdir=/run/resource-agents \ +%endif + --with-pkg-name=%{name} \ + --with-ras-set=%{rasset} + +%if %{defined jobs} +JFLAGS="$(echo '-j%{jobs}')" +%else +JFLAGS="$(echo '%{_smp_mflags}')" +%endif + +make $JFLAGS + +# python-httplib2 bundle +%ifarch x86_64 +pushd %{httplib2_dir} +%{__python3} setup.py build +popd + +# python-pyroute2 bundle +pushd %{pyroute2_dir} +%{__python3} setup.py build +popd + +# python-colorama bundle +pushd %{colorama_dir} +%{__python3} setup.py build +popd + +# python-pycryptodome bundle +pushd %{pycryptodome_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-core bundle +pushd %{aliyunsdkcore_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-ecs bundle +pushd %{aliyunsdkecs_dir} +%{__python3} setup.py build +popd + +# python-aliyun-sdk-vpc bundle +pushd %{aliyunsdkvpc_dir} +%{__python3} setup.py build +popd + +# aliyuncli bundle +pushd %{aliyuncli_dir} +%{__python3} setup.py build +popd +%endif + +%install +rm -rf %{buildroot} +make install DESTDIR=%{buildroot} + +# byte compile ocf.py +%py_byte_compile %{__python3} %{buildroot}%{_usr}/lib/ocf/lib/heartbeat + +# google-cloud-sdk bundle +%ifarch x86_64 +pushd %{googlecloudsdk_dir} +mkdir -p %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} +cp -a bin data lib %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} +mkdir %{buildroot}/%{_bindir} +ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir} +popd + +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{googlecloudsdk_dir}/lib/third_party +popd + +# python-pyroute2 bundle +pushd %{pyroute2_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/gcp +popd + +# python-colorama bundle +pushd %{colorama_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-pycryptodome bundle +pushd %{pycryptodome_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-core bundle +pushd %{aliyunsdkcore_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-ecs bundle +pushd %{aliyunsdkecs_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# python-aliyun-sdk-vpc bundle +pushd %{aliyunsdkvpc_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +popd + +# aliyuncli bundle +pushd %{aliyuncli_dir} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun +sed -i -e "/^import sys/asys.path.insert(0, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun')\nsys.path.insert(1, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyuncli')" %{buildroot}/%{_bindir}/aliyuncli +mv %{buildroot}/%{_bindir}/aliyuncli %{buildroot}/%{_bindir}/aliyuncli-ra +# aliyun_completer / aliyun_zsh_complete.sh +rm %{buildroot}/%{_bindir}/aliyun_* +popd +%endif + +## tree fixup +# remove docs (there is only one and they should come from doc sections in files) +rm -rf %{buildroot}/usr/share/doc/resource-agents + +## +# Create symbolic link between IPAddr and IPAddr2 +## +rm -f %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr +ln -s /usr/lib/ocf/resource.d/heartbeat/IPaddr2 %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root) +%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog +%if %{with linuxha} +%doc heartbeat/README.galera +%doc doc/README.webapps +%doc %{_datadir}/%{name}/ra-api-1.dtd +%doc %{_datadir}/%{name}/metadata.rng +%endif + +%if %{with rgmanager} +%{_datadir}/cluster +%{_sbindir}/rhev-check.sh +%endif + +%if %{with linuxha} +%dir %{_usr}/lib/ocf +%dir %{_usr}/lib/ocf/resource.d +%dir %{_usr}/lib/ocf/lib + +%{_usr}/lib/ocf/lib/heartbeat + +%{_usr}/lib/ocf/resource.d/heartbeat +%{_usr}/lib/ocf/resource.d/openstack +%if %{with rgmanager} +%{_usr}/lib/ocf/resource.d/redhat +%endif + +%if %{defined _unitdir} +%{_unitdir}/resource-agents-deps.target +%endif +%if %{defined _tmpfilesdir} +%{_tmpfilesdir}/%{name}.conf +%endif + +%dir %{_datadir}/%{name} +%dir %{_datadir}/%{name}/ocft +%{_datadir}/%{name}/ocft/configs +%{_datadir}/%{name}/ocft/caselib +%{_datadir}/%{name}/ocft/README +%{_datadir}/%{name}/ocft/README.zh_CN +%{_datadir}/%{name}/ocft/helpers.sh +%exclude %{_datadir}/%{name}/ocft/runocft +%exclude %{_datadir}/%{name}/ocft/runocft.prereq + +%{_sbindir}/ocft + +%{_includedir}/heartbeat + +%if %{defined _tmpfilesdir} +%dir %attr (1755, root, root) /run/resource-agents +%else +%dir %attr (1755, root, root) %{_var}/run/resource-agents +%endif + +%{_mandir}/man7/*.7* + +### +# Supported, but in another sub package +### +%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%exclude %{_mandir}/man7/*aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* +%exclude %{_mandir}/man7/*gcp* +%exclude /usr/lib/%{name}/%{bundled_lib_dir} +%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms +%exclude %{_mandir}/man7/*pgsqlms* +%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +### +# Moved to separate packages +### +%exclude /usr/lib/ocf/resource.d/heartbeat/SAP* +%exclude /usr/lib/ocf/lib/heartbeat/sap* +%exclude %{_mandir}/man7/*SAP* + +### +# Unsupported +### +%exclude /usr/lib/ocf/resource.d/heartbeat/clvm +%exclude /usr/lib/ocf/resource.d/heartbeat/LVM +%exclude /usr/lib/ocf/resource.d/heartbeat/AoEtarget +%exclude /usr/lib/ocf/resource.d/heartbeat/AudibleAlarm +%exclude /usr/lib/ocf/resource.d/heartbeat/ClusterMon +%exclude /usr/lib/ocf/resource.d/heartbeat/EvmsSCC +%exclude /usr/lib/ocf/resource.d/heartbeat/Evmsd +%exclude /usr/lib/ocf/resource.d/heartbeat/ICP +%exclude /usr/lib/ocf/resource.d/heartbeat/LinuxSCSI +%exclude /usr/lib/ocf/resource.d/heartbeat/ManageRAID +%exclude /usr/lib/ocf/resource.d/heartbeat/ManageVE +%exclude /usr/lib/ocf/resource.d/heartbeat/Pure-FTPd +%exclude /usr/lib/ocf/resource.d/heartbeat/Raid1 +%exclude /usr/lib/ocf/resource.d/heartbeat/ServeRAID +%exclude /usr/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon +%exclude /usr/lib/ocf/resource.d/heartbeat/Stateful +%exclude /usr/lib/ocf/resource.d/heartbeat/SysInfo +%exclude /usr/lib/ocf/resource.d/heartbeat/VIPArip +%exclude /usr/lib/ocf/resource.d/heartbeat/WAS +%exclude /usr/lib/ocf/resource.d/heartbeat/WAS6 +%exclude /usr/lib/ocf/resource.d/heartbeat/WinPopup +%exclude /usr/lib/ocf/resource.d/heartbeat/Xen +%exclude /usr/lib/ocf/resource.d/heartbeat/anything +%exclude /usr/lib/ocf/resource.d/heartbeat/asterisk +%exclude /usr/lib/ocf/resource.d/heartbeat/dnsupdate +%exclude /usr/lib/ocf/resource.d/heartbeat/eDir88 +%exclude /usr/lib/ocf/resource.d/heartbeat/fio +%exclude /usr/lib/ocf/resource.d/heartbeat/ids +%exclude /usr/lib/ocf/resource.d/heartbeat/iface-bridge +%exclude /usr/lib/ocf/resource.d/heartbeat/ipsec +%exclude /usr/lib/ocf/resource.d/heartbeat/jira +%exclude /usr/lib/ocf/resource.d/heartbeat/kamailio +%exclude /usr/lib/ocf/resource.d/heartbeat/lxd-info +%exclude /usr/lib/ocf/resource.d/heartbeat/machine-info +%exclude /usr/lib/ocf/resource.d/heartbeat/mariadb +%exclude /usr/lib/ocf/resource.d/heartbeat/minio +%exclude /usr/lib/ocf/resource.d/heartbeat/mpathpersist +%exclude /usr/lib/ocf/resource.d/heartbeat/iscsi +%exclude /usr/lib/ocf/resource.d/heartbeat/jboss +%exclude /usr/lib/ocf/resource.d/heartbeat/ldirectord +%exclude /usr/lib/ocf/resource.d/heartbeat/lxc +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-cinder-volume +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-floating-ip +%exclude /usr/lib/ocf/resource.d/heartbeat/openstack-info +%exclude /usr/lib/ocf/resource.d/heartbeat/ovsmonitor +%exclude /usr/lib/ocf/resource.d/heartbeat/pgagent +%exclude /usr/lib/ocf/resource.d/heartbeat/pingd +%exclude /usr/lib/ocf/resource.d/heartbeat/pound +%exclude /usr/lib/ocf/resource.d/heartbeat/proftpd +%exclude /usr/lib/ocf/resource.d/heartbeat/rkt +%exclude /usr/lib/ocf/resource.d/heartbeat/scsi2reservation +%exclude /usr/lib/ocf/resource.d/heartbeat/sfex +%exclude /usr/lib/ocf/resource.d/heartbeat/sg_persist +%exclude /usr/lib/ocf/resource.d/heartbeat/syslog-ng +%exclude /usr/lib/ocf/resource.d/heartbeat/varnish +%exclude /usr/lib/ocf/resource.d/heartbeat/vmware +%exclude /usr/lib/ocf/resource.d/heartbeat/zabbixserver +%exclude /usr/lib/ocf/resource.d/heartbeat/mysql-proxy +%exclude /usr/lib/ocf/resource.d/heartbeat/rsyslog +%exclude /usr/lib/ocf/resource.d/heartbeat/vsftpd +%exclude /usr/lib/ocf/resource.d/heartbeat/ZFS +%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-cinder-volume.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-floating-ip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_openstack-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz + +### +# Other excluded files. +### +# This tool has to be updated for the new pacemaker lrmd. +%exclude %{_sbindir}/ocf-tester +%exclude %{_mandir}/man8/ocf-tester.8* +# ldirectord is not supported +%exclude /etc/ha.d/resource.d/ldirectord +%exclude /etc/init.d/ldirectord +%exclude %{_unitdir}/ldirectord.service +%exclude /etc/logrotate.d/ldirectord +%exclude /usr/sbin/ldirectord +%exclude %{_mandir}/man8/ldirectord.8.gz + +# For compatability with pre-existing agents +%dir %{_sysconfdir}/ha.d +%{_sysconfdir}/ha.d/shellfuncs + +%{_libexecdir}/heartbeat +%endif + +%if %{with rgmanager} +%post -n resource-agents +ccs_update_schema > /dev/null 2>&1 ||: +%endif + +%ifarch x86_64 +%files aliyun +%doc aliyun*_README* %{colorama}_README.rst %{pycryptodome}_README.rst +%license %{aliyuncli}_LICENSE %{colorama}_LICENSE.txt %{pycryptodome}_LICENSE.rst +%defattr(-,root,root) +/usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%{_mandir}/man7/*aliyun-vpc-move-ip* +# bundle +%{_bindir}/aliyuncli-ra +%dir /usr/lib/%{name} +/usr/lib/%{name}/%{bundled_lib_dir}/aliyun +%endif + +%ifarch x86_64 +%files gcp +%doc %{googlecloudsdk}_*README* +%license %{googlecloudsdk}_*LICENSE* +%doc %{pyroute2}_README* +%license %{pyroute2}_LICENSE* +%defattr(-,root,root) +/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-vip* +%{_mandir}/man7/*gcp-vpc-move-vip* +/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-route* +%{_mandir}/man7/*gcp-vpc-move-route* +/usr/lib/ocf/resource.d/heartbeat/gcp-pd-move* +%{_mandir}/man7/*gcp-pd-move* +# bundle +%{_bindir}/gcloud-ra +%dir /usr/lib/%{name} +/usr/lib/%{name}/%{bundled_lib_dir}/gcp +%endif + +%files paf +%doc paf_README.md +%license paf_LICENSE +%defattr(-,root,root) +%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms +%{_mandir}/man7/*pgsqlms* +%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +%changelog +* Wed May 12 2021 Oyvind Albrigtsen - 4.1.1-94 +- gcp-vpc-move-vip: add retry logic + + Resolves: rhbz#1957765 + +* Wed Apr 28 2021 Oyvind Albrigtsen - 4.1.1-93 +- db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to promote-check +- pgsqlms: new resource agent +- python-pygments: fix CVE-2021-27291 and CVE-2021-20270 + + Resolves: rhbz#1872754, rhbz#1934651, rhbz#1935422, rhbz#1943464 + +* Thu Apr 8 2021 Oyvind Albrigtsen - 4.1.1-91 +- ethmonitor: fix vlan regex +- iface-vlan: make vlan parameter not unique +- nfsserver: error-check unmount +- VirtualDomain: fix pid status regex +- podman: return NOT_RUNNING when monitor cmd fails +- awsvip: dont partially match similar IPs during +- aws agents: dont spam log files +- aws-vpc-move-ip: add ENI lookup + + Resolves: rhbz#1891883, rhbz#1902045, rhbz#1924363, rhbz#1932863 + Resolves: rhbz#1920698, rhbz#1939992, rhbz#1940094, rhbz#1939281 + +* Mon Mar 22 2021 Oyvind Albrigtsen - 4.1.1-90 +- galera/rabbitmq-cluster/redis: run crm_mon without validation when + running in bundle (1940363) + +* Thu Mar 11 2021 Oyvind Albrigtsen - 4.1.1-89 +- azure-lb: redirect to avoid nc dying with EPIPE error (1937142) + +* Thu Feb 25 2021 Oyvind Albrigtsen - 4.1.1-87 +- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and + make vpc_network parameter optional + + Resolves: rhbz#1913932 + +* Thu Dec 3 2020 Oyvind Albrigtsen - 4.1.1-81 +- ocf-shellfuncs: fix traceback redirection for Bash 5+ + + Resolves: rhbz#1903677 + +* Tue Dec 1 2020 Oyvind Albrigtsen - 4.1.1-80 +- crypt: support symlink devices, and dont run sanity checks for probes + + Resolves: rhbz#1901357 + +* Mon Nov 30 2020 Oyvind Albrigtsen - 4.1.1-79 +- LVM-activate: add drop-in during start-action to avoid getting + fenced during reboot + + Resolves: rhbz#1902208 + +* Wed Nov 25 2020 Oyvind Albrigtsen - 4.1.1-77 +- NovaEvacuate: set delay_evacuate to 0 when it's not set + + Resolves: rhbz#1899551 + +* Tue Nov 24 2020 Oyvind Albrigtsen - 4.1.1-76 +- podman: recover from killed conmon process +- podman: recover from podman's storage being out of sync +- crypt: make key_file and crypt_type parameters not unique + + Resolves: rhbz#1886262 + Resolves: rhbz#1900015 + Resolves: rhbz#1898690 + +* Fri Nov 13 2020 Oyvind Albrigtsen - 4.1.1-75 +- AWS agents: add support for IMDSv2 + + Resolves: rhbz#1897570 + +* Wed Nov 11 2020 Oyvind Albrigtsen - 4.1.1-74 +- aws-vpc-move-ip: don't warn for expected scenarios + + Resolves: rhbz#1895811 + +* Mon Nov 2 2020 Oyvind Albrigtsen - 4.1.1-73 +- crypt: new resource agent + + Resolves: rhbz#1471182 + +* Wed Oct 28 2020 Oyvind Albrigtsen - 4.1.1-72 +- sybaseASE: Run verify_all() for start operation only +- sybaseASE: add logfile parameter +- galera: set bootstrap attribute before promote +- galera: recover after network split in a 2-node cluster + + Resolves: rhbz#1848025 + Resolves: rhbz#1861001 + Resolves: rhbz#1891835 + Resolves: rhbz#1891855 + +* Tue Oct 27 2020 Oyvind Albrigtsen - 4.1.1-71 +- redis: parse password correctly based on version +- all agents: fix pcs syntax in manpage for pcs 0.10+ +- gcp-pd-move: dont stop partially matched "disk_name" + + Resolves: rhbz#1815013 + Resolves: rhbz#1763249 + Resolves: rhbz#1890068 + +* Wed Oct 7 2020 Oyvind Albrigtsen - 4.1.1-70 +- galera: recover from joining a non existing cluster + + Resolves: rhbz#1881114 + +* Wed Sep 23 2020 Oyvind Albrigtsen - 4.1.1-69 +- pgsql: ignore masters re-promote +- pgsql: add PostgreSQL 12 support +- Make Samba/CIFS dependency weak +- Filesystem: Support whitespace in device or directory name +- aws-vpc-move-ip: add region parameter + + Resolves: rhbz#1640587 + Resolves: rhbz#1795535 + Resolves: rhbz#1828600 + Resolves: rhbz#1858752 + Resolves: rhbz#1872999 + +* Thu Aug 20 2020 Oyvind Albrigtsen - 4.1.1-68 +- azure-lb: fix redirect issue + + Resolves: rhbz#1850778 + +* Wed Aug 19 2020 Oyvind Albrigtsen - 4.1.1-67 +- gcp-vpc-move-vip: add support for multiple alias IPs + + Resolves: rhbz#1846733 + +* Thu Jul 30 2020 Oyvind Albrigtsen - 4.1.1-65 +- azure-events: handle exceptions in urlopen + + Resolves: rhbz#1845574 + +* Mon Jul 27 2020 Oyvind Albrigtsen - 4.1.1-64 +- nfsserver: fix NFSv4-only support +- azure-events: new resource agent for Azure + + Resolves: rhbz#1818997 + Resolves: rhbz#1819965 + +* Thu Jun 25 2020 Oyvind Albrigtsen - 4.1.1-60 +- Upgrade bundled python-httplib2 to fix CVE-2020-11078 + + Resolves: rhbz#1850990 + +* Wed Jun 17 2020 Oyvind Albrigtsen - 4.1.1-59 +- pgsql: support Pacemaker v2.03+ output + + Resolves: rhbz#1836186 + +* Thu Jun 11 2020 Oyvind Albrigtsen - 4.1.1-56 +- Filesystem: set "fast_stop" default to "no" for GFS2 filesystems + + Resolves: rhbz#1814896 + +* Wed Jun 10 2020 Oyvind Albrigtsen - 4.1.1-55 +- nfsserver: dont log error message when /etc/sysconfig/nfs does not exist +- exportfs: describe clientspec format in metadata + + Resolves: rhbz#1845581 + Resolves: rhbz#1845583 + +* Tue Jun 9 2020 Oyvind Albrigtsen - 4.1.1-54 +- exportfs: add symlink support +- aliyun-vpc-move-ip: log output when failing + + Resolves: rhbz#1820523 + Resolves: rhbz#1843999 + +* Tue Jun 2 2020 Oyvind Albrigtsen - 4.1.1-53 +- podman: force remove container if remove fails + + Resolves: rhbz#1839721 + +* Thu May 28 2020 Oyvind Albrigtsen - 4.1.1-52 +- gcp-pd-move: new resource agent for Google Cloud + + Resolves: rhbz#1633251 + +* Wed May 27 2020 Oyvind Albrigtsen - 4.1.1-51 +- NovaEvacuate: suppress expected initial error message +- db2 (HADR): promote standby node when master node disappears + + Resolves: rhbz#1830716 + Resolves: rhbz#1836945 + +* Thu May 7 2020 Oyvind Albrigtsen - 4.1.1-50 +- rabbitmq-cluster: increase rabbitmqctl wait timeout during start + + Resolves: rhbz#1832321 + +* Tue Apr 28 2020 Oyvind Albrigtsen - 4.1.1-49 +- aws-vpc-route53: new resource agent for AWS +- pgsql: improve checks to prevent incorrect status, and set initial + score for primary and hot standby + + Resolves: rhbz#1759115 + Resolves: rhbz#1744190 + +* Mon Apr 6 2020 Oyvind Albrigtsen - 4.1.1-47 +- aws-vpc-move-ip: delete remaining route entries + + Resolves: rhbz#1819021 + +* Fri Mar 27 2020 Oyvind Albrigtsen - 4.1.1-46 +- use safe temp file location +- ocf-shellfuncs: ocf_is_clone(): fix to return true when clone-max + is set to 0 + + Resolves: rhbz#1817432 + Resolves: rhbz#1817598 + +* Wed Mar 18 2020 Oyvind Albrigtsen - 4.1.1-45 +- azure-lb: support using socat instead of nc +- aws-vpc-move-ip: add "routing_table_role" parameter +- redis: fix validate-all action and run it during start + + Resolves: rhbz#1804658 + Resolves: rhbz#1810466 + Resolves: rhbz#1792237 + +* Fri Mar 6 2020 Oyvind Albrigtsen - 4.1.1-44 +- lvmlockd: automatically remove locking_type from lvm.conf for LVM + v2.03+ + + Resolves: rhbz#1808468 + +* Tue Jan 28 2020 Oyvind Albrigtsen - 4.1.1-43 +- rabbitmq-cluster: delete nodename when stop fails + + Resolves: rhbz#1792196 + +* Thu Jan 23 2020 Oyvind Albrigtsen - 4.1.1-42 +- IPsrcaddr: add destination and table parameters + + Resolves: rhbz#1744224 + +* Mon Jan 13 2020 Oyvind Albrigtsen - 4.1.1-40 +- podman: improve image exist check +- IPaddr2: add CLUSTERIP not supported info to metadata/manpage +- Filesystem: refresh UUID if block device doesnt exist + + Resolves: rhbz#1788889 + Resolves: rhbz#1767916 + Resolves: rhbz#1777381 + +* Wed Nov 27 2019 Oyvind Albrigtsen - 4.1.1-38 +- IPaddr2: add noprefixroute parameter + + Resolves: rhbz#1741042 + +* Wed Nov 13 2019 Oyvind Albrigtsen - 4.1.1-36 +- exportfs: allow multiple exports with same fsid +- mysql/galera: fix incorrect rc + + Resolves: rhbz#1764888 + Resolves: rhbz#1765128 + +* Mon Oct 14 2019 Oyvind Albrigtsen - 4.1.1-35 +- Route: dont fence when parameters not set +- LVM-activate: add partial-activation support + + Resolves: rhbz#1750261 + Resolves: rhbz#1741843 + +* Wed Oct 2 2019 Oyvind Albrigtsen - 4.1.1-34 +- LVM/clvm: remove manpages for excluded agents +- LVM-activate: return NOT_RUNNING when node rejoins cluster +- LVM-activate: detect systemid volume without reboot +- Filesystem: add symlink support +- Filesystem: avoid corrupt mount-list and dont kill incorrect processes + for bind-mounts +- IPsrcaddr: make proto optional to fix regression when used without + NetworkManager +- docker: fix stop issues +- rabbitmq-cluster: also restore users in single node mode +- IPaddr2: sanitize compressed IPv6 IPs +- nfsserver: systemd performance improvements +- NovaEvacuate: add "evacuate_delay" parameter + + Resolves: rhbz#1694392 + Resolves: rhbz#1695039 + Resolves: rhbz#1738428 + Resolves: rhbz#1744103 + Resolves: rhbz#1744140 + Resolves: rhbz#1757837 + Resolves: rhbz#1748768 + Resolves: rhbz#1750352 + Resolves: rhbz#1751700 + Resolves: rhbz#1751962 + Resolves: rhbz#1755760 + +* Tue Aug 27 2019 Oyvind Albrigtsen - 4.1.1-33 +- rabbitmq-cluster: fail monitor when node is in minority partition, + fix stop regression, retry start when cluster join fails, ensure + node attributes are removed + + Resolves: rhbz#1745713 + +* Mon Aug 12 2019 Oyvind Albrigtsen - 4.1.1-32 +- mysql/galera: use runuser/su to avoid using DAC_OVERRIDE + + Resolves: rhbz#1692960 + +* Wed Aug 7 2019 Oyvind Albrigtsen - 4.1.1-31 +- podman: add drop-in dependency support + + Resolves: rhbz#1736746 + +* Wed Jul 31 2019 Oyvind Albrigtsen - 4.1.1-30 +- iSCSITarget/iSCSILogicalUnit: only create iqn/acls when it doesnt + exist + + Resolves: rhbz#1692413 + +* Tue Jul 30 2019 Oyvind Albrigtsen - 4.1.1-29 +- CTDB: add support for v4.9+ + + Resolves: rhbz#1732867 + +* Tue Jul 23 2019 Oyvind Albrigtsen - 4.1.1-28 +- podman: fixes to avoid bundle resources restarting when probing + takes too long +- LVM-activate: fix monitor to avoid hang caused by validate-all call + + Resolves: rhbz#1718219 + Resolves: rhbz#1730455 + +* Wed Jun 19 2019 Oyvind Albrigtsen - 4.1.1-27 +- ocf_log: do not log debug messages when HA_debug unset +- Filesystem: remove notify-action from metadata +- dhcpd keep SELinux context in chroot + + Resolves: rhbz#1707969 + Resolves: rhbz#1717759 + Resolves: rhbz#1719684 + +* Tue Jun 11 2019 Oyvind Albrigtsen - 4.1.1-26 +- sap/sap-hana: split subpackages into separate packages + + Resolves: rhbz#1705767 + +* Wed May 29 2019 Oyvind Albrigtsen - 4.1.1-24 +- Squid: fix PID file issue + + Resolves: rhbz#1689184 + +* Tue May 28 2019 Oyvind Albrigtsen - 4.1.1-23 +- Route: make family parameter optional +- redis: mute password warning + + Resolves: rhbz#1669140 + Resolves: rhbz#1683548 + +* Thu May 23 2019 Oyvind Albrigtsen - 4.1.1-22 +- aws-vpc-move-ip: add multi route-table support and fix issue + w/multiple NICs + + Resolves: rhbz#1697559 + +* Fri Apr 5 2019 Oyvind Albrigtsen - 4.1.1-21 +- gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding issue + + Resolves: rhbz#1695656 + +* Mon Apr 1 2019 Oyvind Albrigtsen - 4.1.1-20 +- aws-vpc-move-ip: use "--query" to avoid a possible race condition +- gcloud-ra: fix Python 3 issue and remove Python 2 detection + + Resolves: rhbz#1693662 + Resolves: rhbz#1691456 + +* Thu Mar 21 2019 Oyvind Albrigtsen - 4.1.1-19 +- Add CI gating tests +- LVM-activate: support LVs from same VG +- tomcat: use systemd when catalina.sh is unavailable +- Fixed python-devel/perl build dependencies + + Resolves: rhbz#1682136 + Resolves: rhbz#1667414 + Resolves: rhbz#1666691 + Resolves: rhbz#1595854 + +* Thu Mar 7 2019 Oyvind Albrigtsen - 4.1.1-18 +- aliyun-vpc-move-ip: exclude from main package +- aliyuncli-ra: upgrade bundled python-aliyun-sdk-core and fix Python 3 issues +- ocf.py: byte compile + + Resolves: rhbz#1677204 + Resolves: rhbz#1677981 + Resolves: rhbz#1678874 + +* Tue Feb 5 2019 Oyvind Albrigtsen - 4.1.1-17 +- LVM-activate: dont require locking_type + + Resolves: rhbz#1658664 + +* Fri Jan 11 2019 Oyvind Albrigtsen - 4.1.1-16 +- vdo-vol: fix monitor-action +- LVM-activate: dont fail initial probe + + Resolves: rhbz#1662466 + Resolves: rhbz#1643307 + +* Tue Oct 23 2018 Oyvind Albrigtsen - 4.1.1-15 +- nfsserver: fix start-issues when nfs_shared_infodir parameter is + changed + + Resolves: rhbz#1642027 + +* Mon Oct 8 2018 Oyvind Albrigtsen - 4.1.1-14 +- redis: use basename in pidof to avoid issues in containers + + Resolves: rhbz#1635785 + +* Wed Sep 26 2018 Oyvind Albrigtsen - 4.1.1-11 +- Remove grpc from bundle + + Resolves: rhbz#1630627 + +* Fri Sep 21 2018 Oyvind Albrigtsen - 4.1.1-10 +- systemd-tmpfiles: change path to /run/resource-agents + + Resolves: rhbz#1631291 + +* Fri Aug 24 2018 Oyvind Albrigtsen - 4.1.1-9 +- podman: new resource agent + + Resolves: rhbz#1607607 + +* Wed Aug 22 2018 Oyvind Albrigtsen - 4.1.1-8 +- LVM: fix missing dash in activate_options +- LVM-activate: warn about incorrect vg_access_mode +- lvmlockd: add cmirrord support + +* Wed Aug 1 2018 Oyvind Albrigtsen - 4.1.1-7 +- findif: only match lines containing netmasks + +* Mon Jul 30 2018 Florian Weimer - 4.1.1-6 +- Rebuild with fixed binutils + +* Fri Jul 27 2018 Oyvind Albrigtsen - 4.1.1-5 +- vdo-vol: new resource agent + + Resolves: rhbz#1552330 + +* Wed Jul 4 2018 Oyvind Albrigtsen - 4.1.1-4 +- VirtualDomain: add stateless support +- Exclude unsupported agents + +* Thu Jun 28 2018 Oyvind Albrigtsen - 4.1.1-3 +- Added SAPHana and OpenStack agents + +* Fri May 25 2018 Oyvind Albrigtsen - 4.1.1-2 +- Remove unsupported clvm and LVM agents + +* Tue Mar 13 2018 Oyvind Albrigtsen - 4.1.1-1 +- Rebase to resource-agents 4.1.1 upstream release. + +* Mon Feb 19 2018 Oyvind Albrigtsen - 4.1.0-2 +- Add gcc to BuildRequires + +* Fri Feb 09 2018 Igor Gnatenko - 4.1.0-1.1 +- Escape macros in %%changelog + +* Wed Jan 10 2018 Oyvind Albrigtsen - 4.1.0-1 +- Rebase to resource-agents 4.1.0 upstream release. + +* Thu Aug 03 2017 Fedora Release Engineering - 4.0.1-1.3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Thu Jul 27 2017 Fedora Release Engineering - 4.0.1-1.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Sat Feb 11 2017 Fedora Release Engineering - 4.0.1-1.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Feb 2 2017 Oyvind Albrigtsen - 4.0.1-1 +- Rebase to resource-agents 4.0.1 upstream release. + +* Wed Feb 1 2017 Oyvind Albrigtsen - 4.0.0-2 +- galera: remove "long SST monitoring" support due to corner-case issues + +* Tue Jan 31 2017 Oyvind Albrigtsen - 4.0.0-1 +- Rebase to resource-agents 4.0.0 upstream release. + +* Thu Dec 15 2016 Oyvind Albrigtsen - 3.9.7-6 +- Add netstat dependency + +* Tue Feb 9 2016 Oyvind Albrigtsen - 3.9.7-4 +- Rebase to resource-agents 3.9.7 upstream release. + +* Thu Feb 04 2016 Fedora Release Engineering - 3.9.6-2.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Thu Jun 18 2015 Fedora Release Engineering - 3.9.6-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Mon Apr 20 2015 David Vossel - 3.9.6-2 +- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent + +* Fri Feb 13 2015 David Vossel - 3.9.6-1 +- Rebase to resource-agents 3.9.6 upstream release. + +* Sun Aug 17 2014 Fedora Release Engineering - 3.9.5-12.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Sun Jun 08 2014 Fedora Release Engineering - 3.9.5-12.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Wed Apr 30 2014 David Vossel - 3.9.5-12 +- Sync with latest upstream. + +* Thu Jan 2 2014 David Vossel - 3.9.5-11 +- Sync with latest upstream. + +* Sun Oct 20 2013 David Vossel - 3.9.5-10 +- Fix build system for rawhide. + +* Wed Oct 16 2013 David Vossel - 3.9.5-9 +- Remove rgmanager agents from build. + +* Sun Aug 04 2013 Fedora Release Engineering - 3.9.5-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild + +* Wed Jul 17 2013 Petr Pisar - 3.9.5-7 +- Perl 5.18 rebuild + +* Tue Jun 18 2013 David Vossel - 3.9.5-6 +- Restores rsctmp directory to upstream default. + +* Tue Jun 18 2013 David Vossel - 3.9.5-5 +- Merges redhat provider into heartbeat provider. Remove + rgmanager's redhat provider. + + Resolves: rhbz#917681 + Resolves: rhbz#928890 + Resolves: rhbz#952716 + Resolves: rhbz#960555 + +* Tue Mar 12 2013 David Vossel - 3.9.5-3 +- Fixes build system error with conditional logic involving + IPv6addr and updates spec file to build against rhel 7 as + well as fedora 19. + +* Mon Mar 11 2013 David Vossel - 3.9.5-2 +- Resolves rhbz#915050 + +* Mon Mar 11 2013 David Vossel - 3.9.5-1 +- New upstream release. + +* Fri Jan 25 2013 Kevin Fenzi - 3.9.2-5 +- Fix cifs mount requires + +* Mon Nov 12 2012 Chris Feist - 3.9.2-4 +- Removed version number after dist + +* Mon Oct 29 2012 Chris Feist - 3.9.2-3.8 +- Remove cluster-glue-libs-devel +- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to + disappearance of cluster-glue in F18) + +* Sat Jul 21 2012 Fedora Release Engineering - 3.9.2-3.5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu Jul 05 2012 Chris Feist - 3.9.2-3.4 +- Fix location of lvm (change from /sbin to /usr/sbin) + +* Wed Apr 04 2012 Jon Ciesla - 3.9.2-3.3 +- Rebuilt to fix rawhide dependency issues (caused by move of fsck from + /sbin to /usr/sbin). + +* Fri Mar 30 2012 Jon Ciesla - 3.9.2-3.1 +- libnet rebuild. + +* Sat Jan 14 2012 Fedora Release Engineering - 3.9.2-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Fri Jul 8 2011 Fabio M. Di Nitto - 3.9.2-2 +- add post call to resource-agents to integrate with cluster 3.1.4 + +* Thu Jun 30 2011 Fabio M. Di Nitto - 3.9.2-1 +- new upstream release +- fix 2 regressions from 3.9.1 + +* Mon Jun 20 2011 Fabio M. Di Nitto - 3.9.1-1 +- new upstream release +- import spec file from upstream + +* Tue Mar 1 2011 Fabio M. Di Nitto - 3.1.1-1 +- new upstream release 3.1.1 and 1.0.4 + +* Wed Feb 09 2011 Fedora Release Engineering - 3.1.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Thu Dec 2 2010 Fabio M. Di Nitto - 3.1.0-1 +- new upstream release +- spec file update: + Update upstream URL + Update source URL + use standard configure macro + use standard make invokation + +* Thu Oct 7 2010 Fabio M. Di Nitto - 3.0.17-1 +- new upstream release + Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013 + Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733 + Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718 + Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943 + Resolves: rhbz#639018 + +* Thu Oct 7 2010 Andrew Beekhof - 3.0.16-2 +- new upstream release of the Pacemaker agents: 71b1377f907c + +* Thu Sep 2 2010 Fabio M. Di Nitto - 3.0.16-1 +- new upstream release + Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680 + Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844 + Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691 + Resolves: rhbz#622576 + +* Thu Jul 29 2010 Fabio M. Di Nitto - 3.0.14-1 +- new upstream release + Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003 + Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547 + Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368 + Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989 + Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181 + Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110 + Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356 + Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202 + Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566 + Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814 + +* Mon Jun 7 2010 Fabio M. Di Nitto - 3.0.13-1 +- new upstream release + Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626 + Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002 + Resolves: rhbz#599643 + +* Tue May 18 2010 Andrew Beekhof - 3.0.12-2 +- libnet is not available on RHEL +- Do not package ldirectord on RHEL + Resolves: rhbz#577264 + +* Mon May 10 2010 Fabio M. Di Nitto - 3.0.12-1 +- new upstream release + Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753 + Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890 + Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010 + Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823 + +* Mon May 10 2010 Andrew Beekhof - 3.0.12-1 +- New pacemaker agents upstream release: a7c0f35916bf + + High: pgsql: properly implement pghost parameter + + High: RA: mysql: fix syntax error + + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371) + + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378) + + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data + + Medium: IPaddr: return the correct code if interface delete failed + + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis) + + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815) + + Medium: pgsql: implement "config" parameter + + Medium: RA: iSCSITarget: follow changed IET access policy + +* Wed Apr 21 2010 Fabio M. Di Nitto - 3.0.11-1 +- new upstream release + Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017 + Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017 + Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533 +- Switch to file based Requires. + Also address several other problems related to missing runtime + components in different agents. + With the current Requires: set, we guarantee all basic functionalities + out of the box for lvm/fs/clusterfs/netfs/networking. + Resolves: rhbz#570008 + +* Sat Apr 17 2010 Andrew Beekhof - 3.0.10-2 +- New pacemaker agents upstream release + + High: RA: vmware: fix set_environment() invocation (LF 2342) + + High: RA: vmware: update to version 0.2 + + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388) + + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg) + + Medium: IPsrcaddr: modify the interface route (lf#2367) + + Medium: ldirectord: Allow multiple email addresses (LF 2168) + + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328) + + Medium: meta-data: improve timeouts in most resource agents + + Medium: nfsserver: use default values (lf#2321) + + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal + + Medium: ocf-shellfuncs: don't output to stderr if using syslog + + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid + + Medium: RA: iSCSILogicalUnit: fix monitor for STGT + + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284) + + Medium: RA: ManageRAID: require bash + + Medium: RA: ManageRAID: require bash + + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988) + + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION + + Medium: RA: VirtualDomain: improve error messages + + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name + + Medium: Route: add route table parameter (lf#2335) + + Medium: sfex: don't use pid file (lf#2363,bnc#585416) + + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416) + +* Fri Apr 9 2010 Fabio M. Di Nitto - 3.0.10-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027 + Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335 + Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249 + Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626 + Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626 + Resolves: rhbz#579059 + +* Wed Mar 24 2010 Andrew Beekhof - 3.0.9-2 +- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page +- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built + +* Mon Mar 1 2010 Fabio M. Di Nitto - 3.0.9-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902 + Resolves: rhbz#512171, rhbz#519491 + +* Mon Feb 22 2010 Fabio M. Di Nitto - 3.0.8-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901 + Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157 + Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201 + Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363 + Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630 + Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047 + Resolves: rhbz#554968, rhbz#555047 +- spec file update: + * update spec file copyright date + * use bz2 tarball + +* Fri Jan 15 2010 Fabio M. Di Nitto - 3.0.7-2 +- Add python as BuildRequires + +* Mon Jan 11 2010 Fabio M. Di Nitto - 3.0.7-1 +- New rgmanager resource agents upstream release + Resolves: rhbz#526286, rhbz#533461 + +* Mon Jan 11 2010 Andrew Beekhof - 3.0.6-2 +- Update Pacameker agents to upstream version: c76b4a6eb576 + + High: RA: VirtualDomain: fix forceful stop (LF 2283) + + High: apache: monitor operation of depth 10 for web applications (LF 2234) + + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281) + + Medium: RA: Route: improve validate (LF 2232) + + Medium: mark obsolete RAs as deprecated (LF 2244) + + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work + +* Mon Dec 7 2009 Fabio M. Di Nitto - 3.0.6-1 +- New rgmanager resource agents upstream release +- spec file update: + * use global instead of define + * use new Source0 url + * use %%name macro more aggressively + +* Mon Dec 7 2009 Andrew Beekhof - 3.0.5-2 +- Update Pacameker agents to upstream version: bc00c0b065d9 + + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239) + + High: doc: add man pages for all RAs (LF2237) + + High: syslog-ng: new RA + + High: vmware: make meta-data work and several cleanups (LF 2212) + + Medium: .ocf-shellfuncs: add ocf_is_probe function + + Medium: Dev: make RAs executable (LF2239) + + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034) + + Medium: add mercurial repository version information to .ocf-shellfuncs + + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469) + + Medium: iSCSITarget, iSCSILogicalUnit: support LIO + + Medium: nfsserver: use check_binary properly in validate (LF 2211) + + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219) + + Medium: oracle/oralsnr: export variables properly + + Medium: pgsql: remove the previous backup_label if it exists + + Medium: postfix: fix double stop (thanks to Dinh N. Quoc) + + RA: LVM: Make monitor operation quiet in logs (bnc#546353) + + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968) + + ldirectord: OCF agent: overhaul + +* Fri Nov 20 2009 Fabio M. Di Nitto - 3.0.5-1 +- New rgmanager resource agents upstream release +- Allow pacemaker to use rgmanager resource agents + +* Wed Oct 28 2009 Andrew Beekhof - 3.0.4-2 +- Update Pacameker agents to upstream version: e2338892f59f + + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes + + High: Trap sigterm for compatibility with the libnet version of send_arp + + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down + + Medium: IPv6addr: recognize network masks properly + + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define" + +* Wed Oct 21 2009 Fabio M. Di Nitto - 3.0.4-1 +- New rgmanager resource agents upstream release + +* Mon Oct 12 2009 Andrew Beekhof - 3.0.3-3 +- Update Pacameker agents to upstream version: 099c0e5d80db + + Add the ha_parameter function back into .ocf-shellfuncs. + + Bug bnc#534803 - Provide a default for MAILCMD + + Fix use of undefined macro @HA_NOARCHDATAHBDIR@ + + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff) + + Import shellfuncs from heartbeat as badly written RAs use it + + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate + + Medium: RA: Filesystem: implement monitor operation + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable + + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum) + + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID + + Medium: RA: iSCSITarget: be more persistent deleting targets on stop + + Medium: RA: portblock: add per-IP filtering capability + + Medium: mysql-proxy: log_level and keepalive parameters + + Medium: oracle: drop spurious output from sqlplus + + RA: Filesystem: allow configuring smbfs mounts as clones + +* Wed Sep 23 2009 Fabio M. Di Nitto - 3.0.3-1 +- New rgmanager resource agents upstream release + +* Thu Aug 20 2009 Fabio M. Di Nitto - 3.0.1-1 +- New rgmanager resource agents upstream release + +* Tue Aug 18 2009 Andrew Beekhof - 3.0.0-16 +- Create an ldirectord package +- Update Pacameker agents to upstream version: 2198dc90bec4 + + Build: Import ldirectord. + + Ensure HA_VARRUNDIR has a value to substitute + + High: Add findif tool (mandatory for IPaddr/IPaddr2) + + High: IPv6addr: new nic and cidr_netmask parameters + + High: postfix: new resource agent + + Include license information + + Low (LF 2159): Squid: make the regexp match more precisely output of netstat + + Low: configure: Fix package name. + + Low: ldirectord: add dependency on $remote_fs. + + Low: ldirectord: add mandatory required header to init script. + + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp + + Medium: VirtualDomain: destroy domain shortly before timeout expiry + + Medium: shellfuncs: Make the mktemp wrappers work. + + Remove references to Echo function + + Remove references to heartbeat shellfuncs. + + Remove useless path lookups + + findif: actually include the right header. Simplify configure. + + ldirectord: Remove superfluous configure artifact. + + ocf-tester: Fix package reference and path to DTD. + +* Tue Aug 11 2009 Ville Skyttä - 3.0.0-15 +- Use bzipped upstream hg tarball. + +* Wed Jul 29 2009 Fabio M. Di Nitto - 3.0.0-14 +- Merge Pacemaker cluster resource agents: + * Add Source1. + * Drop noarch. We have real binaries now. + * Update BuildRequires. + * Update all relevant prep/build/install/files/description sections. + +* Sun Jul 26 2009 Fedora Release Engineering - 3.0.0-13 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Wed Jul 8 2009 Fabio M. Di Nitto - 3.0.0-12 +- spec file updates: + * Update copyright header + * final release.. undefine alphatag + +* Thu Jul 2 2009 Fabio M. Di Nitto - 3.0.0-11.rc4 +- New upstream release. + +* Sat Jun 20 2009 Fabio M. Di Nitto - 3.0.0-10.rc3 +- New upstream release. + +* Wed Jun 10 2009 Fabio M. Di Nitto - 3.0.0-9.rc2 +- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970 + +* Tue Mar 24 2009 Fabio M. Di Nitto - 3.0.0-8.rc1 +- New upstream release. +- Update BuildRoot usage to preferred versions/names + +* Mon Mar 9 2009 Fabio M. Di Nitto - 3.0.0-7.beta1 +- New upstream release. + +* Fri Mar 6 2009 Fabio M. Di Nitto - 3.0.0-6.alpha7 +- New upstream release. + +* Tue Mar 3 2009 Fabio M. Di Nitto - 3.0.0-5.alpha6 +- New upstream release. + +* Tue Feb 24 2009 Fabio M. Di Nitto - 3.0.0-4.alpha5 +- Drop Conflicts with rgmanager. + +* Mon Feb 23 2009 Fabio M. Di Nitto - 3.0.0-3.alpha5 +- New upstream release. + +* Thu Feb 19 2009 Fabio M. Di Nitto - 3.0.0-2.alpha4 +- Add comments on how to build this package. + +* Thu Feb 5 2009 Fabio M. Di Nitto - 3.0.0-1.alpha4 +- New upstream release. +- Fix datadir/cluster directory ownership. + +* Tue Jan 27 2009 Fabio M. Di Nitto - 3.0.0-1.alpha3 + - Initial packaging