import resource-agents-4.1.1-98.el8

This commit is contained in:
CentOS Sources 2021-10-06 14:25:46 -04:00 committed by Stepan Oksanichenko
parent 99cab2d15d
commit 533f512f27
22 changed files with 5530 additions and 3 deletions

View File

@ -0,0 +1,714 @@
From 90b595650d7d8a6f6a69a9f7060c6406aa731c18 Mon Sep 17 00:00:00 2001
From: "Fabio M. Di Nitto" <fdinitto@redhat.com>
Date: Wed, 28 Jul 2021 10:08:10 +0200
Subject: [PATCH] Add storage-mon pacemaker health check
Signed-off-by: Fabio M. Di Nitto <fdinitto@redhat.com>
---
.gitignore | 41 ++++++
configure.ac | 1 +
doc/man/Makefile.am | 3 +-
heartbeat/Makefile.am | 17 +--
heartbeat/storage-mon.in | 263 +++++++++++++++++++++++++++++++++++++++
tools/Makefile.am | 5 +-
tools/storage_mon.c | 263 +++++++++++++++++++++++++++++++++++++++
7 files changed, 583 insertions(+), 10 deletions(-)
create mode 100644 heartbeat/storage-mon.in
create mode 100644 tools/storage_mon.c
diff --git a/.gitignore b/.gitignore
index 38d3566205..f7277bf04e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,6 +45,46 @@ heartbeat/ocf-shellfuncs
heartbeat/send_ua
heartbeat/shellfuncs
heartbeat/*.pyc
+heartbeat/AoEtarget
+heartbeat/CTDB
+heartbeat/ManageRAID
+heartbeat/ManageVE
+heartbeat/Squid
+heartbeat/SysInfo
+heartbeat/aws-vpc-route53
+heartbeat/azure-events
+heartbeat/clvm
+heartbeat/conntrackd
+heartbeat/dnsupdate
+heartbeat/dummypy
+heartbeat/eDir88
+heartbeat/fio
+heartbeat/galera
+heartbeat/gcp-pd-move
+heartbeat/gcp-vpc-move-ip
+heartbeat/gcp-vpc-move-route
+heartbeat/gcp-vpc-move-vip
+heartbeat/iSCSILogicalUnit
+heartbeat/iSCSITarget
+heartbeat/jira
+heartbeat/kamailio
+heartbeat/lxc
+heartbeat/lxd-info
+heartbeat/machine-info
+heartbeat/mariadb
+heartbeat/mpathpersist
+heartbeat/nfsnotify
+heartbeat/openstack-info
+heartbeat/rabbitmq-cluster
+heartbeat/redis
+heartbeat/rsyslog
+heartbeat/sg_persist
+heartbeat/slapd
+heartbeat/smb-share
+heartbeat/storage-mon
+heartbeat/sybaseASE
+heartbeat/syslog-ng
+heartbeat/vsftpd
include/agent_config.h
include/config.h
include/config.h.in
@@ -61,6 +101,7 @@ systemd/resource-agents.conf
tools/findif
tools/ocf-tester
tools/send_arp
+tools/storage_mon
tools/tickle_tcp
tools/ocft/README
tools/ocft/README.zh_CN
diff --git a/configure.ac b/configure.ac
index 717fb95432..c125df98f6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1002,6 +1002,7 @@ AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog])
AC_CONFIG_FILES([heartbeat/smb-share], [chmod +x heartbeat/smb-share])
AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist])
AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd])
+AC_CONFIG_FILES([heartbeat/storage-mon], [chmod +x heartbeat/storage-mon])
AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE])
AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng])
AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd])
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index 947d83cb2b..97904ccb16 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -138,6 +138,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
ocf_heartbeat_mariadb.7 \
ocf_heartbeat_mdraid.7 \
ocf_heartbeat_minio.7 \
+ ocf_heartbeat_mpathpersist.7 \
ocf_heartbeat_mysql.7 \
ocf_heartbeat_mysql-proxy.7 \
ocf_heartbeat_nagios.7 \
@@ -175,7 +176,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
ocf_heartbeat_smb-share.7 \
ocf_heartbeat_sybaseASE.7 \
ocf_heartbeat_sg_persist.7 \
- ocf_heartbeat_mpathpersist.7 \
+ ocf_heartbeat_storage-mon.7 \
ocf_heartbeat_symlink.7 \
ocf_heartbeat_syslog-ng.7 \
ocf_heartbeat_tomcat.7 \
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
index 9af44cc127..5d52d211f2 100644
--- a/heartbeat/Makefile.am
+++ b/heartbeat/Makefile.am
@@ -32,22 +32,22 @@ ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
dtddir = $(datadir)/$(PACKAGE_NAME)
dtd_DATA = ra-api-1.dtd metadata.rng
+ocf_PROGRAMS =
+
if USE_IPV6ADDR_AGENT
-ocf_PROGRAMS = IPv6addr
-else
-ocf_PROGRAMS =
+ocf_PROGRAMS += IPv6addr
endif
+halib_PROGRAMS =
+
if IPV6ADDR_COMPATIBLE
-halib_PROGRAMS = send_ua
-else
-halib_PROGRAMS =
+halib_PROGRAMS += send_ua
endif
IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c
-send_ua_SOURCES = send_ua.c IPv6addr_utils.c
-
IPv6addr_LDADD = -lplumb $(LIBNETLIBS)
+
+send_ua_SOURCES = send_ua.c IPv6addr_utils.c
send_ua_LDADD = $(LIBNETLIBS)
osp_SCRIPTS = nova-compute-wait \
@@ -170,6 +170,7 @@ ocf_SCRIPTS = AoEtarget \
mpathpersist \
slapd \
+ storage-mon \
sybaseASE \
symlink \
syslog-ng \
tomcat \
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
new file mode 100644
index 0000000000..5b289fe554
--- /dev/null
+++ b/heartbeat/storage-mon.in
@@ -0,0 +1,263 @@
+#!@BASH_SHELL@
+#
+# Copyright (C) 2021 Red Hat, Inc. All rights reserved.
+#
+# Authors: Christine Caulfield <ccaulfie@redhat.com>
+# Fabio M. Di Nitto <fdinitto@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+
+#
+# Checks storage I/O status of all given drives and writes the #health-storage
+# status into the CIB
+# Implementation is heavily based on ocf:pacemaker:HealtSMART
+#
+# It sends a single block on IO to a radom location on the device and reports any errors returned.
+# If the IO hangs, that will also be returned. (bear in mind tha tmay also hang the C app in some
+# instances).
+#
+# It's worth making a note in the RA description that the smartmon RA is also recommended (this
+# does not replace it), and that Pacemaker health checking should be configued.
+#
+# https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Explained/singlehtml/index.html#tracking-node-health
+
+#######################################################################
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+#
+STORAGEMON=$HA_BIN/storage_mon
+ATTRDUP=/usr/sbin/attrd_updater
+
+OCF_RESKEY_CRM_meta_interval_default="0"
+OCF_RESKEY_io_timeout_default="10"
+OCF_RESKEY_inject_errors_default=""
+OCF_RESKEY_state_file_default="${HA_RSCTMP%%/}/storage-mon-${OCF_RESOURCE_INSTANCE}.state"
+
+# Explicitly list all environment variables used, to make static analysis happy
+: ${OCF_RESKEY_CRM_meta_interval:=${OCF_RESKEY_CRM_meta_interval_default}}
+: ${OCF_RESKEY_drives:=""}
+: ${OCF_RESKEY_io_timeout:=${OCF_RESKEY_io_timeout_default}}
+: ${OCF_RESKEY_inject_errors:=${OCF_RESKEY_inject_errors_default}}
+: ${OCF_RESKEY_state_file:=${OCF_RESKEY_state_file_default}}
+
+#######################################################################
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="storage-mon">
+<version>1.0</version>
+
+<longdesc lang="en">
+System health agent that checks the storage I/O status of the given drives and
+updates the #health-storage attribute. Usage is highly recommended in combination
+with storage-mon monitoring agent. The agent currently support a maximum of 25
+devices per instance.
+</longdesc>
+<shortdesc lang="en">storage I/O health status</shortdesc>
+
+<parameters>
+
+<parameter name="state_file" unique="1">
+<longdesc lang="en">
+Location to store the resource state in.
+</longdesc>
+<shortdesc lang="en">State file</shortdesc>
+<content type="string" default="${OCF_RESKEY_state_file_default}" />
+</parameter>
+
+<parameter name="drives" unique="1" required="1">
+<longdesc lang="en">
+The drive(s) to check as a SPACE separated list. Enter the full path to the device, e.g. "/dev/sda".
+</longdesc>
+<shortdesc lang="en">Drives to check</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="io_timeout" unique="0">
+<longdesc lang="en">
+Specify disk I/O timeout in seconds. Minimum 1, recommeded 10 (default).
+</longdesc>
+<shortdesc lang="en">Disk I/O timeout</shortdesc>
+<content type="integer" default="${OCF_RESKEY_io_timeout_default}" />
+</parameter>
+
+<parameter name="inject_errors" unique="0">
+<longdesc lang="en">
+Used only for testing! Specify % of I/O errors to simulate drives failures.
+</longdesc>
+<shortdesc lang="en">Specify % of I/O errors to simulate drives failures</shortdesc>
+<content type="integer" default="${OCF_RESKEY_inject_errors_default}" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="10s" />
+<action name="stop" timeout="120s" />
+<action name="monitor" timeout="120s" interval="30s" start-delay="0s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="10s" />
+</actions>
+</resource-agent>
+END
+ return $OCF_SUCCESS
+}
+
+#######################################################################
+
+storage-mon_usage() {
+ cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+ return $1
+}
+
+storage-mon_init() {
+ #Test for presence of storage_mon helper
+ if [ ! -x "$STORAGEMON" ] ; then
+ ocf_log err "${STORAGEMON} not installed."
+ exit $OCF_ERR_INSTALLED
+ fi
+
+ i=0
+ for DRIVE in ${OCF_RESKEY_drives}; do
+ if [ ! -e "$DRIVE" ] ; then
+ ocf_log err "${DRIVE} not found on the system"
+ exit $OCF_ERR_INSTALLED
+ fi
+ i=$((i + 1))
+ done
+
+ if [ "$i" -gt "25" ]; then
+ ocf_log err "Too many drives ($i) configured for this agent. Max 25."
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ if [ "${OCF_RESKEY_io_timeout}" -lt "1" ]; then
+ ocf_log err "Minimum timeout is 1. Recommended 10 (default)."
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ if [ -n "${OCF_RESKEY_inject_errors}" ]; then
+ if [ "${OCF_RESKEY_inject_errors}" -lt "1" ] || [ "${OCF_RESKEY_inject_errors}" -gt "100" ]; then
+ ocf_log err "Inject errors % has to be a value between 1 and 100."
+ exit $OCF_ERR_CONFIGURED
+ fi
+ fi
+}
+
+storage-mon_validate() {
+ storage-mon_init
+
+ # Is the state directory writable?
+ state_dir=$(dirname "$OCF_RESKEY_state_file")
+ touch "$state_dir/$$"
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_CONFIGURED
+ fi
+ rm "$state_dir/$$"
+
+ return $OCF_SUCCESS
+}
+
+storage-mon_monitor() {
+ storage-mon_init
+
+ # Monitor _MUST!_ differentiate correctly between running
+ # (SUCCESS), failed (ERROR) or _cleanly_ stopped (NOT RUNNING).
+ # That is THREE states, not just yes/no.
+
+ if [ ! -f "${OCF_RESKEY_state_file}" ]; then
+ return $OCF_NOT_RUNNING
+ fi
+
+ # generate command line
+ cmdline=""
+ for DRIVE in ${OCF_RESKEY_drives}; do
+ cmdline="$cmdline --device $DRIVE --score 1"
+ done
+ cmdline="$cmdline --timeout ${OCF_RESKEY_io_timeout}"
+ if [ -n "${OCF_RESKEY_inject_errors}" ]; then
+ cmdline="$cmdline --inject-errors-percent ${OCF_RESKEY_inject_errors}"
+ fi
+ $STORAGEMON $cmdline
+ if [ $? -ne 0 ]; then
+ status="red"
+ else
+ status="green"
+ fi
+
+ "$ATTRDUP" -n "#health-${OCF_RESOURCE_INSTANCE}" -U "$status" -d "5s"
+ return $OCF_SUCCESS
+}
+
+storage-mon_start() {
+ storage-mon_monitor
+ if [ $? -eq $OCF_SUCCESS ]; then
+ return $OCF_SUCCESS
+ fi
+ touch "${OCF_RESKEY_state_file}"
+}
+
+storage-mon_stop() {
+ storage-mon_monitor
+ if [ $? -eq $OCF_SUCCESS ]; then
+ rm "${OCF_RESKEY_state_file}"
+ fi
+ return $OCF_SUCCESS
+}
+
+storage-mon_validate() {
+ storage-mon_init
+
+ # Is the state directory writable?
+ state_dir=$(dirname "${OCF_RESKEY_state_file}")
+ touch "$state_dir/$$"
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_CONFIGURED
+ fi
+ rm "$state_dir/$$"
+
+ return $OCF_SUCCESS
+}
+
+case "$__OCF_ACTION" in
+ start) storage-mon_start;;
+ stop) storage-mon_stop;;
+ monitor) storage-mon_monitor;;
+ validate-all) storage-mon_validate;;
+ meta-data) meta_data;;
+ usage|help) storage-mon_usage $OCF_SUCCESS;;
+ *) storage-mon_usage $OCF_ERR_UNIMPLEMENTED;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
+# vim: set filetype=sh:
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 1186967cfb..83ff43651d 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -29,7 +29,8 @@ EXTRA_DIST = ocf-tester.8 sfex_init.8
sbin_PROGRAMS =
sbin_SCRIPTS = ocf-tester
-halib_PROGRAMS = findif
+halib_PROGRAMS = findif \
+ storage_mon
man8_MANS = ocf-tester.8
@@ -67,6 +68,8 @@ sfex_stat_LDADD = $(GLIBLIB) -lplumb -lplumbgpl
findif_SOURCES = findif.c
+storage_mon_SOURCES = storage_mon.c
+
if BUILD_TICKLE
halib_PROGRAMS += tickle_tcp
tickle_tcp_SOURCES = tickle_tcp.c
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
new file mode 100644
index 0000000000..7b65bb4191
--- /dev/null
+++ b/tools/storage_mon.c
@@ -0,0 +1,263 @@
+#include <stdio.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <syslog.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#ifdef __FreeBSD__
+#include <sys/disk.h>
+#endif
+
+#define MAX_DEVICES 25
+#define DEFAULT_TIMEOUT 10
+
+static void usage(char *name, FILE *f)
+{
+ fprintf(f, "usage: %s [-hv] [-d <device>]... [-s <score>]... [-t <secs>]\n", name);
+ fprintf(f, " --device <dev> device to test, up to %d instances\n", MAX_DEVICES);
+ fprintf(f, " --score <n> score if device fails the test. Must match --device count\n");
+ fprintf(f, " --timeout <n> max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT);
+ fprintf(f, " --inject-errors-percent <n> Generate EIO errors <n>%% of the time (for testing only)\n");
+ fprintf(f, " --verbose emit extra output to stdout\n");
+ fprintf(f, " --help print this messages\n");
+}
+
+/* Check one device */
+static void *test_device(const char *device, int verbose, int inject_error_percent)
+{
+ uint64_t devsize;
+ int device_fd;
+ int res;
+ off_t seek_spot;
+ char buffer[512];
+
+ if (verbose) {
+ printf("Testing device %s\n", device);
+ }
+
+ device_fd = open(device, O_RDONLY);
+ if (device_fd < 0) {
+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
+ exit(-1);
+ }
+#ifdef __FreeBSD__
+ res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize);
+#else
+ res = ioctl(device_fd, BLKGETSIZE64, &devsize);
+#endif
+ if (res != 0) {
+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+ if (verbose) {
+ fprintf(stderr, "%s: size=%zu\n", device, devsize);
+ }
+ /* Don't fret about real randomness */
+ srand(time(NULL) + getpid());
+ /* Pick a random place on the device - sector aligned */
+ seek_spot = (rand() % (devsize-1024)) & 0xFFFFFFFFFFFFFE00;
+ res = lseek(device_fd, seek_spot, SEEK_SET);
+ if (res < 0) {
+ fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+
+ if (verbose) {
+ printf("%s: reading from pos %ld\n", device, seek_spot);
+ }
+
+ res = read(device_fd, buffer, sizeof(buffer));
+ if (res < 0) {
+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+ if (res < (int)sizeof(buffer)) {
+ fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res);
+ close(device_fd);
+ exit(-1);
+ }
+
+ /* Fake an error */
+ if (inject_error_percent && ((rand() % 100) < inject_error_percent)) {
+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
+ close(device_fd);
+ exit(-1);
+ }
+ res = close(device_fd);
+ if (res != 0) {
+ fprintf(stderr, "Failed to close %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+
+ if (verbose) {
+ printf("%s: done\n", device);
+ }
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ char *devices[MAX_DEVICES];
+ int scores[MAX_DEVICES];
+ pid_t test_forks[MAX_DEVICES];
+ size_t device_count = 0;
+ size_t score_count = 0;
+ size_t finished_count = 0;
+ int timeout = DEFAULT_TIMEOUT;
+ struct timespec ts;
+ time_t start_time;
+ size_t i;
+ int final_score = 0;
+ int opt, option_index;
+ int verbose = 0;
+ int inject_error_percent = 0;
+ struct option long_options[] = {
+ {"timeout", required_argument, 0, 't' },
+ {"device", required_argument, 0, 'd' },
+ {"score", required_argument, 0, 's' },
+ {"inject-errors-percent", required_argument, 0, 0 },
+ {"verbose", no_argument, 0, 'v' },
+ {"help", no_argument, 0, 'h' },
+ {0, 0, 0, 0 }
+ };
+ while ( (opt = getopt_long(argc, argv, "hvt:d:s:",
+ long_options, &option_index)) != -1 ) {
+ switch (opt) {
+ case 0: /* Long-only options */
+ if (strcmp(long_options[option_index].name, "inject-errors-percent") == 0) {
+ inject_error_percent = atoi(optarg);
+ if (inject_error_percent < 1 || inject_error_percent > 100) {
+ fprintf(stderr, "inject_error_percent should be between 1 and 100\n");
+ return -1;
+ }
+ }
+ break;
+ case 'd':
+ if (device_count < MAX_DEVICES) {
+ devices[device_count++] = strdup(optarg);
+ } else {
+ fprintf(stderr, "too many devices, max is %d\n", MAX_DEVICES);
+ return -1;
+ }
+ break;
+ case 's':
+ if (device_count < MAX_DEVICES) {
+ int score = atoi(optarg);
+ if (score < 1 || score > 10) {
+ fprintf(stderr, "Score must be between 1 and 10 inclusive\n");
+ return -1;
+ }
+ scores[score_count++] = score;
+ } else {
+ fprintf(stderr, "too many scores, max is %d\n", MAX_DEVICES);
+ return -1;
+ }
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 't':
+ timeout = atoi(optarg);
+ if (timeout < 1) {
+ fprintf(stderr, "invalid timeout %d. Min 1, recommended %d (default)\n", timeout, DEFAULT_TIMEOUT);
+ return -1;
+ }
+ break;
+ case 'h':
+ usage(argv[0], stdout);
+ break;
+ default:
+ usage(argv[0], stderr);
+ break;
+ }
+
+ }
+ if (device_count == 0) {
+ fprintf(stderr, "No devices to test, use the -d or --device argument\n");
+ return -1;
+ }
+
+ if (device_count != score_count) {
+ fprintf(stderr, "There must be the same number of devices and scores\n");
+ return -1;
+ }
+
+ openlog("storage_mon", 0, LOG_DAEMON);
+
+ memset(test_forks, 0, sizeof(test_forks));
+ for (i=0; i<device_count; i++) {
+ test_forks[i] = fork();
+ if (test_forks[i] < 0) {
+ fprintf(stderr, "Error spawning fork for %s: %s\n", devices[i], strerror(errno));
+ syslog(LOG_ERR, "Error spawning fork for %s: %s\n", devices[i], strerror(errno));
+ /* Just test the devices we have */
+ break;
+ }
+ /* child */
+ if (test_forks[i] == 0) {
+ test_device(devices[i], verbose, inject_error_percent);
+ }
+ }
+
+ /* See if they have finished */
+ clock_gettime(CLOCK_REALTIME, &ts);
+ start_time = ts.tv_sec;
+
+ while ((finished_count < device_count) && ((start_time + timeout) > ts.tv_sec)) {
+ for (i=0; i<device_count; i++) {
+ int wstatus;
+ pid_t w;
+
+ if (test_forks[i] > 0) {
+ w = waitpid(test_forks[i], &wstatus, WUNTRACED | WNOHANG | WCONTINUED);
+ if (w < 0) {
+ fprintf(stderr, "waitpid on %s failed: %s\n", devices[i], strerror(errno));
+ return -1;
+ }
+
+ if (w == test_forks[i]) {
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == 0) {
+ finished_count++;
+ test_forks[i] = 0;
+ } else {
+ syslog(LOG_ERR, "Error reading from device %s", devices[i]);
+ final_score += scores[i];
+ }
+ }
+ }
+ }
+ }
+
+ usleep(100000);
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ }
+
+ /* See which threads have not finished */
+ for (i=0; i<device_count; i++) {
+ if (test_forks[i] != 0) {
+ syslog(LOG_ERR, "Reading from device %s did not complete in %d seconds timeout", devices[i], timeout);
+ fprintf(stderr, "Thread for device %s did not complete in time\n", devices[i]);
+ final_score += scores[i];
+ }
+ }
+
+ if (verbose) {
+ printf("Final score is %d\n", final_score);
+ }
+ return final_score;
+}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
From 7f7ca75100a846242ff1510fd9bcf299cd3d00eb Mon Sep 17 00:00:00 2001
From: Aleksei Burlakov <aleksei.burlakov@suse.com>
Date: Mon, 26 Oct 2020 13:25:45 +0100
Subject: [PATCH] ethmonitor: is_interface: RE matches vlan names
Vlan names end not with : but are suffixed with the @devices-name
---
heartbeat/ethmonitor | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
index e791fbe9d..cf0321ab4 100755
--- a/heartbeat/ethmonitor
+++ b/heartbeat/ethmonitor
@@ -230,8 +230,8 @@ is_interface() {
#
# List interfaces but exclude FreeS/WAN ipsecN virtual interfaces
#
- local iface=`$IP2UTIL -o -f inet addr show | grep " $1 " \
- | cut -d ' ' -f2 | sort -u | grep -v '^ipsec[0-9][0-9]*$'`
+ local iface=`$IP2UTIL -o -f link addr show | grep -e " $1[:@]" \
+ | cut -d ' ' -f2 | tr -d ':' | cut -d '@' -f1 | sort -u | grep -v '^ipsec[0-9][0-9]*$'`
[ "$iface" != "" ]
}

View File

@ -0,0 +1,40 @@
From 3dd051ed56418dc241417ea02e59db3982b7b92c Mon Sep 17 00:00:00 2001
From: Oliver Freyermuth <o.freyermuth@googlemail.com>
Date: Thu, 26 Nov 2020 10:25:01 +0100
Subject: [PATCH] heartbeat/iface-vlan: vlan_{interface,id} do not have to be
unique.
Machines commonly have several vlan_id attached to one interface,
and may also have a vlan_id attached to several interfaces.
vlan_name will still be unique, usual names are:
- bond_in.83@bond_in
- bond_in.84@bond_in
fixes #1581
---
heartbeat/iface-vlan | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/iface-vlan b/heartbeat/iface-vlan
index cbe7e86da..d0481373c 100755
--- a/heartbeat/iface-vlan
+++ b/heartbeat/iface-vlan
@@ -89,7 +89,7 @@ vlan_meta_data() {
</shortdesc>
<parameters>
- <parameter name="vlan_interface" unique="1" required="1">
+ <parameter name="vlan_interface" unique="0" required="1">
<longdesc lang="en">
Define the interface where VLAN should be attached.
</longdesc>
@@ -99,7 +99,7 @@ vlan_meta_data() {
<content type="string"/>
</parameter>
- <parameter name="vlan_id" unique="1" required="1">
+ <parameter name="vlan_id" unique="0" required="1">
<longdesc lang="en">
Define the VLAN ID. It has to be a value between 0 and 4094.
</longdesc>

View File

@ -0,0 +1,22 @@
From 4812c67894063f8125a3915d32da168931f088c6 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 25 Feb 2021 16:49:55 +0100
Subject: [PATCH] gcp-vpc-move-route: make "vpc_network" optional
---
heartbeat/gcp-vpc-move-route.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
index 9fe985832..fd2d2ec59 100644
--- a/heartbeat/gcp-vpc-move-route.in
+++ b/heartbeat/gcp-vpc-move-route.in
@@ -98,7 +98,7 @@ subnet ranges
<content type="string" />
</parameter>
-<parameter name="vpc_network" required="1">
+<parameter name="vpc_network" required="0">
<longdesc lang="en">
Name of the VPC network
</longdesc>

View File

@ -0,0 +1,42 @@
From 6877b20a83cb691884996bf77385259388fdebb2 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 3 Mar 2021 17:06:12 +0100
Subject: [PATCH] podman: return OCF_NOT_RUNNING when monitor cmd fails (not
running)
---
heartbeat/podman | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/heartbeat/podman b/heartbeat/podman
index 82ea14624..5b707f3f5 100755
--- a/heartbeat/podman
+++ b/heartbeat/podman
@@ -204,14 +204,19 @@ monitor_cmd_exec()
# 125: no container with name or ID ${CONTAINER} found
# 126: container state improper (not running)
# 127: any other error
- if [ $rc -eq 125 ] || [ $rc -eq 126 ]; then
- rc=$OCF_NOT_RUNNING
- elif [ $rc -ne 0 ]; then
- ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
- rc=$OCF_ERR_GENERIC
- else
- ocf_log debug "monitor cmd passed: exit code = $rc"
- fi
+ # 255: podman 2+: container not running
+ case "$rc" in
+ 125|126|255)
+ rc=$OCF_NOT_RUNNING
+ ;;
+ 0)
+ ocf_log debug "monitor cmd passed: exit code = $rc"
+ ;;
+ *)
+ ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
+ rc=$OCF_ERR_GENERIC
+ ;;
+ esac
return $rc
}

View File

@ -0,0 +1,57 @@
From dc4fc6fb51481e62c763212129e7dbae4cb663fd Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Tue, 2 Feb 2021 17:55:40 -0800
Subject: [PATCH] nfsserver: Error-check unbind_tree
Fail to stop if unmounting rpcpipefs_dir or /var/lib/nfs fails.
Resolves: RHBZ#1924363
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/nfsserver | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 80d20676b..96b19abe3 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -465,9 +465,20 @@ unbind_tree ()
sleep 1
i=$((i + 1))
done
+
+ if mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "; then
+ ocf_log err "Failed to unmount $OCF_RESKEY_rpcpipefs_dir"
+ return $OCF_ERR_GENERIC
+ fi
+
if is_bound /var/lib/nfs; then
- umount /var/lib/nfs
+ if ! umount /var/lib/nfs; then
+ ocf_log err "Failed to unmount /var/lib/nfs"
+ return $OCF_ERR_GENERIC
+ fi
fi
+
+ return $OCF_SUCCESS
}
binary_status()
@@ -836,8 +847,14 @@ nfsserver_stop ()
esac
unbind_tree
- ocf_log info "NFS server stopped"
- return 0
+ rc=$?
+ if [ "$rc" -ne $OCF_SUCCESS ]; then
+ ocf_exit_reason "Failed to unmount a bind mount"
+ else
+ ocf_log info "NFS server stopped"
+ fi
+
+ return $rc
}
nfsserver_validate ()

View File

@ -0,0 +1,31 @@
From 500de79739cd39808fb48fa556c9b9b9fe2e8acd Mon Sep 17 00:00:00 2001
From: Matthias Hensler <matthias@wspse.de>
Date: Thu, 18 Feb 2021 12:49:49 +0100
Subject: [PATCH] fix pid_status() for VirtualDomain on EL8
see https://github.com/ClusterLabs/resource-agents/issues/1613
---
heartbeat/VirtualDomain | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain
index eb41e3e22..f9cd21fc7 100755
--- a/heartbeat/VirtualDomain
+++ b/heartbeat/VirtualDomain
@@ -421,14 +421,14 @@ pid_status()
case "$emulator" in
qemu-kvm|qemu-dm|qemu-system-*)
rc=$OCF_NOT_RUNNING
- ps awx | grep -E "[q]emu-(kvm|dm|system).*-name $DOMAIN_NAME " > /dev/null 2>&1
+ ps awx | grep -E "[q]emu-(kvm|dm|system).*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
if [ $? -eq 0 ]; then
rc=$OCF_SUCCESS
fi
;;
libvirt_lxc)
rc=$OCF_NOT_RUNNING
- ps awx | grep -E "[l]ibvirt_lxc.*-name $DOMAIN_NAME " > /dev/null 2>&1
+ ps awx | grep -E "[l]ibvirt_lxc.*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
if [ $? -eq 0 ]; then
rc=$OCF_SUCCESS
fi

View File

@ -0,0 +1,23 @@
From dd5394180267c652d0928db8c5508d9977893fe5 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 18 Mar 2021 16:23:10 +0100
Subject: [PATCH] db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to
promote-check
---
heartbeat/db2 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/db2 b/heartbeat/db2
index a57fd2bb6..459136cbd 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -767,7 +767,7 @@ db2_promote() {
return $OCF_SUCCESS
;;
- PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|Primary/Peer)
+ PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED|Primary/Peer)
# nothing to do, only update pacemaker's view
echo MASTER > $STATE_FILE
return $OCF_SUCCESS

View File

@ -0,0 +1,52 @@
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Thu, 10 Dec 2020 08:19:21 +0100
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
Reason was a lookahead-only pattern which was included in the state
where the lookahead was transitioning to.
---
pygments/lexers/ml.py | 12 ++++++------
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
index 8ca8ce3eb..f2ac367c5 100644
--- a/pygments/lexers/ml.py
+++ b/pygments/lexers/ml.py
@@ -142,7 +142,7 @@ def id_callback(self, match):
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
@@ -315,15 +315,14 @@ def id_callback(self, match):
'ename': [
include('whitespace'),
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
- include('breakout'),
- include('core'),
- (r'\S+', Error),
+ default('#pop'),
],
'datcon': [
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
],
}
+
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).

View File

@ -0,0 +1,118 @@
From 760680df771b6e2a9fbcd2f6d9862df4ec1a86de Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Tue, 9 Mar 2021 18:25:52 -0800
Subject: [PATCH 1/2] azure-lb: Be quiet during stop operation
Currently, it logs "kill (<pid>) No such process" to stderr during stops.
A stop operation is expected to run `kill -s 0 $pid` for a nonexistent
PID, so log that at debug level.
A start or monitor operation's `kill -s 0 $pid` should always succeed,
so any output is unexpected and an error.
Also remove "local" bashism.
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/azure-lb | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb
index 65a12235b..863132744 100755
--- a/heartbeat/azure-lb
+++ b/heartbeat/azure-lb
@@ -93,12 +93,18 @@ getpid() {
lb_monitor() {
if test -f "$pidfile"; then
- if pid=`getpid $pidfile` && [ "$pid" ] && kill -s 0 $pid; then
- return $OCF_SUCCESS
- else
- # pidfile w/o process means the process died
- return $OCF_ERR_GENERIC
+ [ "$__OCF_ACTION" = "stop" ] && level="debug" || level="err"
+
+ if pid=$(getpid "$pidfile") && [ -n "$pid" ]; then
+ output=$(kill -s 0 "$pid" 2>&1)
+ mon_rc=$?
+
+ [ -n "$output" ] && ocf_log "$level" "$output"
+ [ "$mon_rc" -eq 0 ] && return $OCF_SUCCESS
fi
+
+ # pidfile w/o process means the process died
+ return $OCF_ERR_GENERIC
else
return $OCF_NOT_RUNNING
fi
@@ -131,7 +137,7 @@ lb_start() {
}
lb_stop() {
- local rc=$OCF_SUCCESS
+ stop_rc=$OCF_SUCCESS
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
# Allow 2/3 of the action timeout for the orderly shutdown
@@ -160,7 +166,7 @@ lb_stop() {
while :; do
if ! lb_monitor; then
ocf_log warn "SIGKILL did the job."
- rc=$OCF_SUCCESS
+ stop_rc=$OCF_SUCCESS
break
fi
ocf_log info "The job still hasn't stopped yet. Waiting..."
@@ -168,7 +174,7 @@ lb_stop() {
done
fi
rm -f $pidfile
- return $rc
+ return $stop_rc
}
lb_validate() {
From 10f39e90d6b04c28752a4f9adc94dfc03d9d61b8 Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Tue, 9 Mar 2021 18:32:45 -0800
Subject: [PATCH 2/2] azure-lb: Redirect stdout and stderr to /dev/null
This fixes a regression introduced in commit d22700fc.
When the nc listener process created by an azure-lb resource attempts to
write to stdout, it dies with an EPIPE error.
This can happen when random/garbage input is sent to the nc listener, as
may happen during a port scan. For example, if the listener is on port
62000, and a client sends some text (e.g., `echo test | nc node1
62000`), then the listener attempts to echo "test" to its stdout. This
fails with an EPIPE.
Prior to commit d22700fc, all output was redirected to the pid file.
This caused its own problems, but it prevented this particular behavior.
The fix is to redirect the listener's stdout and stderr to /dev/null.
Resolves: RHBZ#1937142
Resolves: RHBZ#1937151
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/azure-lb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb
index 863132744..ade1b4577 100755
--- a/heartbeat/azure-lb
+++ b/heartbeat/azure-lb
@@ -119,7 +119,7 @@ lb_start() {
if ! lb_monitor; then
ocf_log debug "Starting $process: $cmd"
# Execute the command as created above
- $cmd &
+ $cmd >/dev/null 2>&1 &
echo $! > $pidfile
if lb_monitor; then
ocf_log debug "$process: $cmd started successfully, calling monitor"

View File

@ -0,0 +1,141 @@
From b727fe4e2a0f4c88fca0ed9f90f57e570253c961 Mon Sep 17 00:00:00 2001
From: Costas Tyfoxylos <costas.tyf@gmail.com>
Date: Wed, 26 Aug 2020 15:18:00 +0300
Subject: [PATCH 1/2] aws-vpc-move-ip: Implemented optional eni lookup instead
of the default instance id.
In a shared network pattern where the cluster resides in shared subnets the instance ids of the nodes are not retrievable but the eni ids are and this optional feature gives transparent support in that situation.
---
heartbeat/aws-vpc-move-ip | 41 +++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
index 1b540caec..bc82428e5 100755
--- a/heartbeat/aws-vpc-move-ip
+++ b/heartbeat/aws-vpc-move-ip
@@ -44,6 +44,7 @@ OCF_RESKEY_routing_table_default=""
OCF_RESKEY_routing_table_role_default=""
OCF_RESKEY_interface_default="eth0"
OCF_RESKEY_monapi_default="false"
+OCF_RESKEY_lookup_type_default="InstanceId"
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
@@ -54,6 +55,7 @@ OCF_RESKEY_monapi_default="false"
: ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}}
: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}}
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
+: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
[ -n "$OCF_RESKEY_region" ] && region_opt="--region $OCF_RESKEY_region"
#######################################################################
@@ -154,6 +156,17 @@ Enable enhanced monitoring using AWS API calls to check route table entry
<shortdesc lang="en">Enhanced Monitoring</shortdesc>
<content type="boolean" default="${OCF_RESKEY_monapi_default}" />
</parameter>
+
+<parameter name="lookup_type" required="0">
+<longdesc lang="en">
+Name of resource type to lookup in route table.
+"InstanceId" : EC2 instance ID. (default)
+"NetworkInterfaceId" : ENI ID. (useful in shared VPC setups).
+</longdesc>
+<shortdesc lang="en">lookup type for route table resource</shortdesc>
+<content type="string" default="${OCF_RESKEY_lookup_type_default}" />
+</parameter>
+
</parameters>
<actions>
@@ -187,7 +200,7 @@ execute_cmd_as_role(){
ec2ip_set_address_param_compat(){
# Include backward compatibility for the deprecated address parameter
- if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then
+ if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then
OCF_RESKEY_ip="$OCF_RESKEY_address"
fi
}
@@ -213,16 +226,24 @@ ec2ip_validate() {
}
ec2ip_monitor() {
- MON_RES=""
+ MON_RES=""
+ if [ "${OCF_RESKEY_lookup_type}" = "NetworkInterfaceId" ]; then
+ EC2_ID="$(ec2ip_get_instance_eni)"
+ RESOURCE_TYPE="interface"
+ else
+ EC2_ID="$EC2_INSTANCE_ID"
+ RESOURCE_TYPE="instance"
+ fi
+
if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then
for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
ocf_log info "monitor: check routing table (API call) - $rtb"
if [[ -z "${OCF_RESKEY_routing_table_role}" ]]; then
- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId"
+ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
ocf_log debug "executing command: $cmd"
ROUTE_TO_INSTANCE="$($cmd)"
else
- cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId"
+ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)"
fi
ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"
@@ -230,8 +251,8 @@ ec2ip_monitor() {
ROUTE_TO_INSTANCE="<unknown>"
fi
- if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then
- ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb"
+ if [ "$EC2_ID" != "$ROUTE_TO_INSTANCE" ]; then
+ ocf_log warn "not routed to this $RESOURCE_TYPE ($EC2_ID) but to $RESOURCE_TYPE $ROUTE_TO_INSTANCE on $rtb"
MON_RES="$MON_RES $rtb"
fi
sleep 1
@@ -275,7 +296,7 @@ ec2ip_drop() {
return $OCF_SUCCESS
}
-ec2ip_get_and_configure() {
+ec2ip_get_instance_eni() {
MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
if [ -f $MAC_FILE ]; then
cmd="cat ${MAC_FILE}"
@@ -300,7 +321,11 @@ ec2ip_get_and_configure() {
return $OCF_ERR_GENERIC
fi
ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
+ echo $EC2_NETWORK_INTERFACE_ID
+}
+ec2ip_get_and_configure() {
+ EC2_NETWORK_INTERFACE_ID="$(ec2ip_get_instance_eni)"
for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
if [ -z "${OCF_RESKEY_routing_table_role}" ]; then
cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID"
From f4c8daae098dd33bdd5136ca4846eb505110e006 Mon Sep 17 00:00:00 2001
From: Sander Botman <sbotman@schubergphilis.com>
Date: Fri, 28 Aug 2020 22:01:03 +0200
Subject: [PATCH 2/2] aws-vpc-move-ip: Fix the region option
---
heartbeat/aws-vpc-move-ip | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
index bc82428e5..a5b28ad92 100755
--- a/heartbeat/aws-vpc-move-ip
+++ b/heartbeat/aws-vpc-move-ip
@@ -243,7 +243,7 @@ ec2ip_monitor() {
ocf_log debug "executing command: $cmd"
ROUTE_TO_INSTANCE="$($cmd)"
else
- cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
+ cmd="$OCF_RESKEY_awscli $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)"
fi
ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}"

View File

@ -0,0 +1,23 @@
From 3491a6ad30830a8545defa5a417a7db46b093904 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 17 Mar 2021 12:39:10 +0100
Subject: [PATCH] awsvip: dont partially match similar IPs during
monitor-action
---
heartbeat/awsvip | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
index 7d0bf35b6..044d049c6 100755
--- a/heartbeat/awsvip
+++ b/heartbeat/awsvip
@@ -172,7 +172,7 @@ awsvip_monitor() {
--instance-id "${INSTANCE_ID}" \
--query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
--output text | \
- grep -q "${SECONDARY_PRIVATE_IP}"
+ grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)"
RET=$?
if [ $RET -ne 0 ]; then

View File

@ -0,0 +1,64 @@
From 59b0840d262900d0eaa8b19df3ede55eea5250d2 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 17 Mar 2021 12:10:59 +0100
Subject: [PATCH] AWS agents: dont spam log files when getting token
---
heartbeat/aws-vpc-move-ip | 2 +-
heartbeat/aws-vpc-route53.in | 2 +-
heartbeat/awseip | 2 +-
heartbeat/awsvip | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
index cbb629b00..3ca3d6bd6 100755
--- a/heartbeat/aws-vpc-move-ip
+++ b/heartbeat/aws-vpc-move-ip
@@ -215,7 +215,7 @@ ec2ip_validate() {
return $OCF_ERR_CONFIGURED
fi
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
if [ -z "${EC2_INSTANCE_ID}" ]; then
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
index 4fb17019b..21948eaca 100644
--- a/heartbeat/aws-vpc-route53.in
+++ b/heartbeat/aws-vpc-route53.in
@@ -347,7 +347,7 @@ r53_monitor() {
_get_ip() {
case $OCF_RESKEY_ip in
local|public)
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");;
*.*.*.*)
IPADDRESS="${OCF_RESKEY_ip}";;
diff --git a/heartbeat/awseip b/heartbeat/awseip
index de1967774..12ffffaa3 100755
--- a/heartbeat/awseip
+++ b/heartbeat/awseip
@@ -244,7 +244,7 @@ AWSCLI="${OCF_RESKEY_awscli}"
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
case $__OCF_ACTION in
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
index 8050107e8..7d0bf35b6 100755
--- a/heartbeat/awsvip
+++ b/heartbeat/awsvip
@@ -206,7 +206,7 @@ esac
AWSCLI="${OCF_RESKEY_awscli}"
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN")
NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")

View File

@ -0,0 +1,100 @@
From f510d8e78ce65736ca5a72bd8125d31dcb4ff621 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Tue, 16 Jun 2020 13:32:18 +0200
Subject: [PATCH 1/2] galera/redis: use --output-as for crm_mon w/newer
Pacemaker, and prepare for Promoted role
---
heartbeat/galera | 9 ++++++++-
heartbeat/redis.in | 9 ++++++++-
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/heartbeat/galera b/heartbeat/galera
index 4a313e24b..ba3de4b81 100755
--- a/heartbeat/galera
+++ b/heartbeat/galera
@@ -441,7 +441,14 @@ master_exists()
return 1
fi
# determine if a master instance is already up and is healthy
- crm_mon --as-xml | grep "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1
+ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.1.0"
+ res=$?
+ if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then
+ XMLOPT="--output-as=xml"
+ else
+ XMLOPT="--as-xml"
+ fi
+ crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\""
return $?
}
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
index da7230a49..7e534db4a 100755
--- a/heartbeat/redis.in
+++ b/heartbeat/redis.in
@@ -272,7 +272,14 @@ master_is_active()
{
if [ -z "$MASTER_ACTIVE_CACHED" ]; then
# determine if a master instance is already up and is healthy
- crm_mon --as-xml | grep "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1
+ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.1.0"
+ res=$?
+ if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then
+ XMLOPT="--output-as=xml"
+ else
+ XMLOPT="--as-xml"
+ fi
+ crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\""
MASTER_ACTIVE=$?
MASTER_ACTIVE_CACHED="true"
fi
From 6f36172da222275124fb44736b4801ea884c3dd0 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Fri, 31 Jul 2020 14:31:47 +0200
Subject: [PATCH 2/2] galera/redis: support RHEL 8.1 pacemaker
based on dfdb4e645638948cd4dafaba9d65ebddb2152b2c that solves this issue
in pgsql
---
heartbeat/galera | 7 +++++++
heartbeat/redis.in | 7 +++++++
2 files changed, 14 insertions(+)
diff --git a/heartbeat/galera b/heartbeat/galera
index ba3de4b81..69d75a854 100755
--- a/heartbeat/galera
+++ b/heartbeat/galera
@@ -445,6 +445,13 @@ master_exists()
res=$?
if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then
XMLOPT="--output-as=xml"
+ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0"
+ if [ $? -eq 1 ]; then
+ crm_mon -1 $XMLOPT >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ XMLOPT="--as-xml"
+ fi
+ fi
else
XMLOPT="--as-xml"
fi
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
index 7e534db4a..8afdf08a9 100755
--- a/heartbeat/redis.in
+++ b/heartbeat/redis.in
@@ -276,6 +276,13 @@ master_is_active()
res=$?
if [ -z "$OCF_RESKEY_crm_feature_set" ] || [ $res -eq 2 ]; then
XMLOPT="--output-as=xml"
+ ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0"
+ if [ $? -eq 1 ]; then
+ crm_mon -1 $XMLOPT >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ XMLOPT="--as-xml"
+ fi
+ fi
else
XMLOPT="--as-xml"
fi

View File

@ -0,0 +1,176 @@
From 716db89c1ab02ce4fed8ba0916ff1f6d01b4b636 Mon Sep 17 00:00:00 2001
From: Damien Ciabrini <damien.ciabrini@gmail.com>
Date: Thu, 18 Mar 2021 14:11:11 +0100
Subject: [PATCH] bundle: run crm_mon without performing validation
We have a use case in OpenStack where the resource agents run
inside bundles (containerized pacemaker remotes), and we cannot
always guarantee an exact match of pacemaker version on the host
and in containers. This can make crm_mon fail to run because
it may not have the latest version of the schema to validate the
CIB that it's getting.
Add a function crm_mon_no_validation to allow clustered services
like galera, redis and rabbitmq to work when there is a minor
version mismatch between host and containers. This doesn't
impact non-containerized use cases, there is a single version
of pacemaker binaries in this case.
Related-Bug: rhbz#1940363
---
configure.ac | 2 ++
heartbeat/{galera => galera.in} | 10 +++++-----
heartbeat/ocf-shellfuncs.in | 16 ++++++++++++++++
.../{rabbitmq-cluster => rabbitmq-cluster.in} | 4 ++--
heartbeat/redis.in | 4 ++--
5 files changed, 27 insertions(+), 9 deletions(-)
rename heartbeat/{galera => galera.in} (98%)
rename heartbeat/{rabbitmq-cluster => rabbitmq-cluster.in} (98%)
diff --git a/configure.ac b/configure.ac
index ed9dc09bf..11c1b786b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -979,6 +979,7 @@ AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate])
AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate])
AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88])
AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio])
+AC_CONFIG_FILES([heartbeat/galera], [chmod +x heartbeat/galera])
AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move])
AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip])
AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip])
@@ -993,6 +994,7 @@ AC_CONFIG_FILES([heartbeat/machine-info], [chmod +x heartbeat/machine-info])
AC_CONFIG_FILES([heartbeat/mariadb], [chmod +x heartbeat/mariadb])
AC_CONFIG_FILES([heartbeat/mpathpersist], [chmod +x heartbeat/mpathpersist])
AC_CONFIG_FILES([heartbeat/nfsnotify], [chmod +x heartbeat/nfsnotify])
+AC_CONFIG_FILES([heartbeat/rabbitmq-cluster], [chmod +x heartbeat/rabbitmq-cluster])
AC_CONFIG_FILES([heartbeat/redis], [chmod +x heartbeat/redis])
AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog])
AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist])
diff --git a/heartbeat/galera b/heartbeat/galera.in
similarity index 98%
rename from heartbeat/galera
rename to heartbeat/galera.in
index c2f636f0d..7f5f2f1eb 100755
--- a/heartbeat/galera
+++ b/heartbeat/galera.in
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!@BASH_SHELL@
#
# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
# All Rights Reserved.
@@ -447,7 +447,7 @@ is_two_node_mode_active()
# crm_node or corosync-quorumtool cannot access various corosync
# flags when running inside a bundle, so only count the cluster
# members
- ocf_is_true "$OCF_RESKEY_two_node_mode" && ${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath "count(//nodes/node[@type='member'])" - | grep -q -w 2
+ ocf_is_true "$OCF_RESKEY_two_node_mode" && crm_mon_no_validation -1X | xmllint --xpath "count(//nodes/node[@type='member'])" - | grep -q -w 2
}
is_last_node_in_quorate_partition()
@@ -458,7 +458,7 @@ is_last_node_in_quorate_partition()
# is clean), we shouldn't consider ourself quorate.
local partition_members=$(${HA_SBIN_DIR}/crm_node -p | wc -w)
local quorate=$(${HA_SBIN_DIR}/crm_node -q)
- local clean_members=$(${HA_SBIN_DIR}/crm_mon -1X | xmllint --xpath 'count(//nodes/node[@type="member" and @unclean="false"])' -)
+ local clean_members=$(crm_mon_no_validation -1X | xmllint --xpath 'count(//nodes/node[@type="member" and @unclean="false"])' -)
[ "$partition_members" = 1 ] && [ "$quorate" = 1 ] && [ "$clean_members" = 2 ]
}
@@ -480,7 +480,7 @@ master_exists()
XMLOPT="--output-as=xml"
ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0"
if [ $? -eq 1 ]; then
- crm_mon -1 $XMLOPT >/dev/null 2>&1
+ crm_mon_no_validation -1 $XMLOPT >/dev/null 2>&1
if [ $? -ne 0 ]; then
XMLOPT="--as-xml"
fi
@@ -461,7 +461,7 @@
else
XMLOPT="--as-xml"
fi
- crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\""
+ crm_mon_no_validation -1 $XMLOPT | grep -q -i -E "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"(Promoted|Master)\".*active=\"true\".*orphaned=\"false\".*failed=\"false\""
return $?
}
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
index ac75dfc87..760790cbd 100644
--- a/heartbeat/ocf-shellfuncs.in
+++ b/heartbeat/ocf-shellfuncs.in
@@ -41,6 +41,8 @@
unset LC_ALL; export LC_ALL
unset LANGUAGE; export LANGUAGE
+: ${HA_SBIN_DIR:=@sbindir@}
+
__SCRIPT_NAME=`basename $0`
if [ -z "$OCF_ROOT" ]; then
@@ -670,6 +672,20 @@ EOF
systemctl daemon-reload
}
+# usage: crm_mon_no_validation args...
+# run crm_mon without any cib schema validation
+# This is useful when an agent runs in a bundle to avoid potential
+# schema validation errors when host and bundle are not perfectly aligned
+# To be used, your shell must support on process substitution (e.g. bash)
+# returns:
+# <crm_mon error codes>
+crm_mon_no_validation()
+{
+ # The subshell prevents parsing error with incompatible shells
+ "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q | sed 's/validate-with=\"[^\"]*\"/validate-with=\"none\"/') \
+ ${HA_SBIN_DIR}/crm_mon \$*" -- $*
+}
+
#
# pseudo_resource status tracking function...
#
diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster.in
similarity index 98%
rename from heartbeat/rabbitmq-cluster
rename to heartbeat/rabbitmq-cluster.in
index f7d48120c..abd0662f2 100755
--- a/heartbeat/rabbitmq-cluster
+++ b/heartbeat/rabbitmq-cluster.in
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!@BASH_SHELL@
#
# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
# All Rights Reserved.
@@ -195,7 +195,7 @@ rmq_join_list()
# ...
local remote_join_list=$(cibadmin -Q --xpath "//node_state//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p")
# The following expression prepares a filter like '-e overcloud-rabbit-0 -e overcloud-rabbit-1 -e ...'
- local filter=$(crm_mon -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}')
+ local filter=$(crm_mon_no_validation -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}')
# export the intersection which gives us only the nodes that
# a) wrote their namein the cib attrd
# b) run on nodes where pacemaker_remote is enabled
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
index 8afdf08a9..f53d46964 100755
--- a/heartbeat/redis.in
+++ b/heartbeat/redis.in
@@ -278,7 +278,7 @@ master_is_active()
XMLOPT="--output-as=xml"
ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.2.0"
if [ $? -eq 1 ]; then
- crm_mon -1 $XMLOPT >/dev/null 2>&1
+ crm_mon_no_validation -1 $XMLOPT >/dev/null 2>&1
if [ $? -ne 0 ]; then
XMLOPT="--as-xml"
fi
@@ -286,7 +286,7 @@ master_is_active()
else
XMLOPT="--as-xml"
fi
- crm_mon -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\""
+ crm_mon_no_validation -1 $XMLOPT | grep -q -i -E "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".* role=\"(Promoted|Master)\".* active=\"true\".* orphaned=\"false\".* failed=\"false\""
MASTER_ACTIVE=$?
MASTER_ACTIVE_CACHED="true"
fi

View File

@ -0,0 +1,138 @@
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Mon, 11 Jan 2021 09:46:34 +0100
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
Caller/Doyensec
---
pygments/lexers/archetype.py | 2 +-
pygments/lexers/factor.py | 4 ++--
pygments/lexers/jvm.py | 1 -
pygments/lexers/matlab.py | 6 +++---
pygments/lexers/objective.py | 4 ++--
pygments/lexers/templates.py | 2 +-
pygments/lexers/varnish.py | 2 +-
8 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
index 65046613d..26f5ea8c9 100644
--- a/pygments/lexers/archetype.py
+++ b/pygments/lexers/archetype.py
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
+ (r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
index be7b30dff..9200547f9 100644
--- a/pygments/lexers/factor.py
+++ b/pygments/lexers/factor.py
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
- (r'"""\s+(?:.|\n)*?\s+"""', String),
+ (r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 62dfd45e5..9a9397c2d 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
- (r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
index 4823c6a7e..578848623 100644
--- a/pygments/lexers/matlab.py
+++ b/pygments/lexers/matlab.py
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
(r'.', Comment.Multiline),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
(r"[^']*'", String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
(r'.', String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
index 34e4062f6..38ac9bb05 100644
--- a/pygments/lexers/objective.py
+++ b/pygments/lexers/objective.py
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
'logos_classname'),
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
bygroups(Keyword, Text, Name.Class)),
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
'function'),
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
+ (r'(%new)(\s*)(\()(.*?)(\))',
bygroups(Keyword, Text, Keyword, String, Keyword)),
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
inherit,
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 33c06c4c4..5c3346b4c 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
# see doc for handling first name arg: /directives/evoque/
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
index 23653f7a1..9d358bd7c 100644
--- a/pygments/lexers/varnish.py
+++ b/pygments/lexers/varnish.py
@@ -61,7 +61,7 @@ def analyse_text(text):
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
(r'(\.probe)(\s*=\s*)(\{)',
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),

View File

@ -0,0 +1,102 @@
From 3ae6d8f0a34d099945d9bf005ed45dbfe9452202 Mon Sep 17 00:00:00 2001
From: kj1724 <78624900+kj1724@users.noreply.github.com>
Date: Wed, 28 Apr 2021 10:22:38 -0400
Subject: [PATCH] gcp-vpc-move-vip.in: Adds retries
If the cluster fails a monitoring event, it will try to restart the resource. If the resource agent makes an API/metadata call that fails at that time, the resource will be considered "failed", but in certain case also "unconfigured", which prevents further operations.
These changes can help the agent recover on certain intermittent failures.
---
heartbeat/gcp-vpc-move-vip.in | 62 ++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 27 deletions(-)
diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
index bbbd87b7a9..c411555110 100755
--- a/heartbeat/gcp-vpc-move-vip.in
+++ b/heartbeat/gcp-vpc-move-vip.in
@@ -50,6 +50,8 @@ REMOVE = 1
CONN = None
THIS_VM = None
ALIAS = None
+MAX_RETRIES = 3
+RETRY_BACKOFF_SECS = 1
METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
METADATA = \
@@ -111,18 +113,37 @@ def get_metadata(metadata_key, params=None, timeout=None):
Returns:
HTTP response from the GET request.
-
- Raises:
- urlerror.HTTPError: raises when the GET request fails.
"""
- timeout = timeout or 60
- metadata_url = os.path.join(METADATA_SERVER, metadata_key)
- params = urlparse.urlencode(params or {})
- url = '%s?%s' % (metadata_url, params)
- request = urlrequest.Request(url, headers=METADATA_HEADERS)
- request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
- return request_opener.open(
- request, timeout=timeout * 1.1).read().decode("utf-8")
+ for i in range(MAX_RETRIES):
+ try:
+ timeout = timeout or 60
+ metadata_url = os.path.join(METADATA_SERVER, metadata_key)
+ params = urlparse.urlencode(params or {})
+ url = '%s?%s' % (metadata_url, params)
+ request = urlrequest.Request(url, headers=METADATA_HEADERS)
+ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
+ return request_opener.open(
+ request, timeout=timeout * 1.1).read().decode("utf-8")
+ except Exception as e:
+ logger.error('Couldn\'t get instance name, is this running inside GCE?: '
+ + str(e))
+ time.sleep(RETRY_BACKOFF_SECS * (i + 1))
+
+ # If the retries are exhausted we exit with a generic error.
+ sys.exit(OCF_ERR_GENERIC)
+
+
+def create_api_connection():
+ for i in range(MAX_RETRIES):
+ try:
+ return googleapiclient.discovery.build('compute', 'v1',
+ cache_discovery=False)
+ except Exception as e:
+ logger.error('Couldn\'t connect with google api: ' + str(e))
+ time.sleep(RETRY_BACKOFF_SECS * (i + 1))
+
+ # If the retries are exhausted we exit with a generic error.
+ sys.exit(OCF_ERR_GENERIC)
def get_instance(project, zone, instance):
@@ -358,24 +379,11 @@ def gcp_alias_status(alias):
def validate():
global ALIAS
- global CONN
global THIS_VM
+ global CONN
- # Populate global vars
- try:
- CONN = googleapiclient.discovery.build('compute', 'v1',
- cache_discovery=False)
- except Exception as e:
- logger.error('Couldn\'t connect with google api: ' + str(e))
- sys.exit(OCF_ERR_CONFIGURED)
-
- try:
- THIS_VM = get_metadata('instance/name')
- except Exception as e:
- logger.error('Couldn\'t get instance name, is this running inside GCE?: '
- + str(e))
- sys.exit(OCF_ERR_CONFIGURED)
-
+ CONN = create_api_connection()
+ THIS_VM = get_metadata('instance/name')
ALIAS = os.environ.get('OCF_RESKEY_alias_ip')
if not ALIAS:
logger.error('Missing alias_ip parameter')

View File

@ -0,0 +1,82 @@
diff --color -uNr a/heartbeat/lvmlockd b/heartbeat/lvmlockd
--- a/heartbeat/lvmlockd 2021-06-11 16:08:37.725598299 +0200
+++ b/heartbeat/lvmlockd 2021-06-11 16:10:38.690910781 +0200
@@ -59,14 +59,6 @@
<shortdesc lang="en">This agent manages the lvmlockd daemon</shortdesc>
<parameters>
-<parameter name="with_cmirrord" unique="0" required="0">
-<longdesc lang="en">
-Start with cmirrord (cluster mirror log daemon).
-</longdesc>
-<shortdesc lang="en">activate cmirrord</shortdesc>
-<content type="boolean" default="false" />
-</parameter>
-
<parameter name="pidfile" unique="0">
<longdesc lang="en">pid file</longdesc>
<shortdesc lang="en">pid file</shortdesc>
@@ -110,7 +102,6 @@
: ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"}
LOCKD="lvmlockd"
-CMIRRORD="cmirrord"
# 0.5s sleep each count
TIMEOUT_COUNT=20
@@ -150,12 +141,6 @@
rc=$?
mirror_rc=$rc
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- pid=$(pgrep $CMIRRORD | head -n1)
- daemon_is_running "$pid"
- mirror_rc=$?
- fi
-
# If these ever don't match, return error to force recovery
if [ $mirror_rc -ne $rc ]; then
return $OCF_ERR_GENERIC
@@ -235,16 +220,6 @@
return $OCF_SUCCESS
fi
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- ocf_log info "starting ${CMIRRORD}..."
- $CMIRRORD
- rc=$?
- if [ $rc -ne $OCF_SUCCESS ] ; then
- ocf_exit_reason "Failed to start ${CMIRRORD}, exit code: $rc"
- return $OCF_ERR_GENERIC
- fi
- fi
-
if [ ! -z "$OCF_RESKEY_socket_path" ] ; then
extras="$extras -s ${OCF_RESKEY_socket_path}"
fi
@@ -341,13 +316,8 @@
pid=$(get_pid)
kill_stop $LOCKD $pid
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- pid=$(pgrep $CMIRRORD)
- kill_stop $CMIRRORD $pid
- fi
-
if silent_status ; then
- ocf_exit_reason "Failed to stop, ${LOCKD} or ${CMIRRORD} still running."
+ ocf_exit_reason "Failed to stop, ${LOCKD} still running."
return $OCF_ERR_GENERIC
fi
@@ -370,10 +340,6 @@
check_binary pgrep
check_binary lvmlockctl
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- check_binary $CMIRRORD
- fi
-
return $OCF_SUCCESS
}

View File

@ -0,0 +1,39 @@
From 5729c79c6ab06f3dacf1fe8dafab9403e5560e34 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 20 May 2021 10:14:49 +0200
Subject: [PATCH] LVM-activate: fix drop-in check to avoid re-creating drop-in
file when it already exists
---
heartbeat/LVM-activate | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index a8e40dce4..53223367e 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -820,17 +820,14 @@ lvm_start() {
if systemd_is_running ; then
# Create drop-in to deactivate VG before stopping
# storage services during shutdown/reboot.
- after=$(systemctl show resource-agents-deps.target.d \
- --property=After | cut -d'=' -f2)
-
- case "$after" in
- *" blk-availability.service "*)
- ;;
- *)
- systemd_drop_in "99-LVM-activate" "After" \
+ systemctl show resource-agents-deps.target \
+ --property=After | cut -d'=' -f2 | \
+ grep -qE "(^|\s)blk-availability.service(\s|$)"
+
+ if [ "$?" -ne 0 ]; then
+ systemd_drop_in "99-LVM-activate" "After" \
"blk-availability.service"
- ;;
- esac
+ fi
# If blk-availability isn't started, the "After="
# directive has no effect.

View File

@ -0,0 +1,74 @@
From 7850aea1600389beb16c7aad40bba1b76ae694c4 Mon Sep 17 00:00:00 2001
From: Damien Ciabrini <dciabrin@redhat.com>
Date: Tue, 15 Jun 2021 20:03:20 +0200
Subject: [PATCH] podman: workaround race during container creation
podman and OCI runtime have a race that sometimes causes
a container to fail to be created and run [1] if the
cgroup to be used is not available yet. When that happens,
try to recreate it until it succeeds or the start
timeout is reached.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1972209
---
heartbeat/podman | 32 ++++++++++++++++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)
diff --git a/heartbeat/podman b/heartbeat/podman
index 5b707f3f5..034dfff76 100755
--- a/heartbeat/podman
+++ b/heartbeat/podman
@@ -358,8 +358,18 @@ run_new_container()
local rc
ocf_log info "running container $CONTAINER for the first time"
- ocf_run podman run $opts $image $cmd
+ out=$(podman run $opts $image $cmd 2>&1)
rc=$?
+
+ if [ -n "$out" ]; then
+ out="$(echo "$out" | tr -s ' \t\r\n' ' ')"
+ if [ $rc -eq 0 ]; then
+ ocf_log info "$out"
+ else
+ ocf_log err "$out"
+ fi
+ fi
+
if [ $rc -eq 125 ]; then
# If an internal podman error occurred, it might be because
# the internal storage layer still references an old container
@@ -370,6 +380,24 @@ run_new_container()
ocf_run podman rm --storage $CONTAINER
ocf_run podman run $opts $image $cmd
rc=$?
+ elif [ $rc -eq 127 ]; then
+ # rhbz#1972209: podman 3.0.x seems to be hit by a race
+ # where the cgroup is not yet set up properly when the OCI
+ # runtime configures the container. If that happens, recreate
+ # the container as long as we get the same error code or
+ # until start timeout preempts us.
+ while [ $rc -eq 127 ] && (echo "$out" | grep -q "cgroup.*scope not found") ; do
+ ocf_log warn "Internal podman error while assigning cgroup. Retrying."
+ # Arbitrary sleep to prevent consuming all CPU while looping
+ sleep 1
+ podman rm -f "$CONTAINER"
+ out=$(podman run $opts $image $cmd 2>&1)
+ rc=$?
+ done
+ # Log the created container ID if it succeeded
+ if [ $rc -eq 0 ]; then
+ ocf_log info "$out"
+ fi
fi
return $rc
@@ -422,7 +450,7 @@ podman_start()
fi
if [ $rc -ne 0 ]; then
- ocf_exit_reason "podman failed to launch container"
+ ocf_exit_reason "podman failed to launch container (rc: $rc)"
return $OCF_ERR_GENERIC
fi

View File

@ -70,7 +70,7 @@
Name: resource-agents
Summary: Open Source HA Reusable Cluster Resource Scripts
Version: 4.1.1
Release: 86%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
Release: 98%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
License: GPLv2+ and LGPLv2+
URL: https://github.com/ClusterLabs/resource-agents
%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
@ -264,6 +264,25 @@ Patch172: bz1901357-crypt-2-dont-sanity-check-during-probe.patch
Patch173: bz1903677-ocf-shellfuncs-fix-traceback-redirection-bash5.patch
Patch174: bz1913932-1-gcp-vpc-move-add-project-parameter.patch
Patch175: bz1913932-2-gcp-vpc-move-route-fixes.patch
Patch176: bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch
Patch177: bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch
Patch178: bz1940363-1-galera-redis-use-output-as.patch
Patch179: bz1940363-2-bundle-disable-validate-with.patch
Patch180: bz1891883-ethmonitor-vlan-fix.patch
Patch181: bz1902045-iface-vlan-vlan-not-unique.patch
Patch182: bz1924363-nfsserver-error-check-unmount.patch
Patch183: bz1932863-VirtualDomain-fix-pid-status.patch
Patch184: bz1920698-podman-return-not-running-probe.patch
Patch185: bz1939992-awsvip-dont-partially-match-IPs.patch
Patch186: bz1940094-aws-agents-dont-spam-logs.patch
Patch187: bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch
Patch188: bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch
Patch189: bz1872754-pgsqlms-new-ra.patch
Patch190: bz1957765-gcp-vpc-move-vip-retry.patch
Patch191: bz1969968-lvmlockd-remove-with_cmirrord.patch
Patch192: bz1972035-LVM-activate-fix-drop-in.patch
Patch193: bz1972743-podman-fix-container-creation-race.patch
Patch194: bz1509319-storage-mon-new-ra.patch
# bundle patches
Patch1000: 7-gcp-bundled.patch
@ -274,6 +293,8 @@ Patch1004: bz1691456-gcloud-dont-detect-python2.patch
Patch1005: aliyun-vpc-move-ip-4-bundled.patch
Patch1006: python3-syntax-fixes.patch
Patch1007: aliyuncli-python3-fixes.patch
Patch1008: bz1935422-python-pygments-fix-CVE-2021-20270.patch
Patch1009: bz1943464-python-pygments-fix-CVE-2021-27291.patch
Obsoletes: heartbeat-resources <= %{version}
Provides: heartbeat-resources = %{version}
@ -413,6 +434,21 @@ The Google Cloud Platform resource agents allows Google Cloud
Platform instances to be managed in a cluster environment.
%endif
%package paf
License: PostgreSQL
Summary: PostgreSQL Automatic Failover (PAF) resource agent
%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
Group: System Environment/Base
%else
Group: Productivity/Clustering/HA
%endif
Requires: %{name} = %{version}-%{release}
Requires: perl-interpreter
%description paf
PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL
databases to be managed in a cluster environment.
%prep
%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0
%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
@ -595,9 +631,29 @@ exit 1
%patch173 -p1
%patch174 -p1
%patch175 -p1
%patch176 -p1
%patch177 -p1
%patch178 -p1
%patch179 -p1
%patch180 -p1
%patch181 -p1
%patch182 -p1
%patch183 -p1
%patch184 -p1
%patch185 -p1
%patch186 -p1
%patch187 -p1 -F2
%patch188 -p1
%patch189 -p1
%patch190 -p1
%patch191 -p1
%patch192 -p1
%patch193 -p1
%patch194 -p1 -F2
chmod 755 heartbeat/nova-compute-wait
chmod 755 heartbeat/NovaEvacuate
chmod 755 heartbeat/pgsqlms
# bundles
mkdir -p %{bundled_lib_dir}/gcp
@ -736,6 +792,12 @@ cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE
# aliyun Python 3 fixes
%patch1006 -p1
%patch1007 -p1
# fix CVE's in python-pygments
pushd %{googlecloudsdk_dir}/lib/third_party
%patch1008 -p1 -F2
%patch1009 -p1 -F2
popd
%endif
%build
@ -965,6 +1027,9 @@ rm -rf %{buildroot}
%exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
%exclude %{_mandir}/man7/*gcp*
%exclude /usr/lib/%{name}/%{bundled_lib_dir}
%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms
%exclude %{_mandir}/man7/*pgsqlms*
%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
###
# Moved to separate packages
@ -1157,9 +1222,70 @@ ccs_update_schema > /dev/null 2>&1 ||:
/usr/lib/%{name}/%{bundled_lib_dir}/gcp
%endif
%files paf
%doc paf_README.md
%license paf_LICENSE
%defattr(-,root,root)
%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms
%{_mandir}/man7/*pgsqlms*
%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
%changelog
* Tue Jan 19 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-86
- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter
* Mon Aug 30 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-98
- storage-mon: new resource agent
Resolves: rhbz#1509319
* Thu Jun 17 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-97
- podman: fix possible race during container creation
Resolves: rhbz#1972743
* Tue Jun 15 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-96
- LVM-activate: fix drop-in check to avoid re-creating drop-in
Resolves: rhbz#1972035
* Fri Jun 11 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-95
- lvmlockd: remove cmirrord support, as cmirrord is incompatible w/lvmlockd
Resolves: rhbz#1969968
* Wed May 12 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-94
- gcp-vpc-move-vip: add retry logic
Resolves: rhbz#1957765
* Wed Apr 28 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-93
- db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to promote-check
- pgsqlms: new resource agent
- python-pygments: fix CVE-2021-27291 and CVE-2021-20270
Resolves: rhbz#1872754, rhbz#1934651, rhbz#1935422, rhbz#1943464
* Thu Apr 8 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-91
- ethmonitor: fix vlan regex
- iface-vlan: make vlan parameter not unique
- nfsserver: error-check unmount
- VirtualDomain: fix pid status regex
- podman: return NOT_RUNNING when monitor cmd fails
- awsvip: dont partially match similar IPs during
- aws agents: dont spam log files
- aws-vpc-move-ip: add ENI lookup
Resolves: rhbz#1891883, rhbz#1902045, rhbz#1924363, rhbz#1932863
Resolves: rhbz#1920698, rhbz#1939992, rhbz#1940094, rhbz#1939281
* Mon Mar 22 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90
- galera/rabbitmq-cluster/redis: run crm_mon without validation when
running in bundle (1940363)
* Thu Mar 11 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-89
- azure-lb: redirect to avoid nc dying with EPIPE error (1937142)
* Thu Feb 25 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-87
- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and
make vpc_network parameter optional
Resolves: rhbz#1913932