import resource-agents-4.1.1-98.el8

This commit is contained in:
CentOS Sources 2021-11-09 04:46:54 -05:00 committed by Stepan Oksanichenko
parent 2010234b37
commit c9677f5977
18 changed files with 4791 additions and 24 deletions

View File

@ -0,0 +1,714 @@
From 90b595650d7d8a6f6a69a9f7060c6406aa731c18 Mon Sep 17 00:00:00 2001
From: "Fabio M. Di Nitto" <fdinitto@redhat.com>
Date: Wed, 28 Jul 2021 10:08:10 +0200
Subject: [PATCH] Add storage-mon pacemaker health check
Signed-off-by: Fabio M. Di Nitto <fdinitto@redhat.com>
---
.gitignore | 41 ++++++
configure.ac | 1 +
doc/man/Makefile.am | 3 +-
heartbeat/Makefile.am | 17 +--
heartbeat/storage-mon.in | 263 +++++++++++++++++++++++++++++++++++++++
tools/Makefile.am | 5 +-
tools/storage_mon.c | 263 +++++++++++++++++++++++++++++++++++++++
7 files changed, 583 insertions(+), 10 deletions(-)
create mode 100644 heartbeat/storage-mon.in
create mode 100644 tools/storage_mon.c
diff --git a/.gitignore b/.gitignore
index 38d3566205..f7277bf04e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,6 +45,46 @@ heartbeat/ocf-shellfuncs
heartbeat/send_ua
heartbeat/shellfuncs
heartbeat/*.pyc
+heartbeat/AoEtarget
+heartbeat/CTDB
+heartbeat/ManageRAID
+heartbeat/ManageVE
+heartbeat/Squid
+heartbeat/SysInfo
+heartbeat/aws-vpc-route53
+heartbeat/azure-events
+heartbeat/clvm
+heartbeat/conntrackd
+heartbeat/dnsupdate
+heartbeat/dummypy
+heartbeat/eDir88
+heartbeat/fio
+heartbeat/galera
+heartbeat/gcp-pd-move
+heartbeat/gcp-vpc-move-ip
+heartbeat/gcp-vpc-move-route
+heartbeat/gcp-vpc-move-vip
+heartbeat/iSCSILogicalUnit
+heartbeat/iSCSITarget
+heartbeat/jira
+heartbeat/kamailio
+heartbeat/lxc
+heartbeat/lxd-info
+heartbeat/machine-info
+heartbeat/mariadb
+heartbeat/mpathpersist
+heartbeat/nfsnotify
+heartbeat/openstack-info
+heartbeat/rabbitmq-cluster
+heartbeat/redis
+heartbeat/rsyslog
+heartbeat/sg_persist
+heartbeat/slapd
+heartbeat/smb-share
+heartbeat/storage-mon
+heartbeat/sybaseASE
+heartbeat/syslog-ng
+heartbeat/vsftpd
include/agent_config.h
include/config.h
include/config.h.in
@@ -61,6 +101,7 @@ systemd/resource-agents.conf
tools/findif
tools/ocf-tester
tools/send_arp
+tools/storage_mon
tools/tickle_tcp
tools/ocft/README
tools/ocft/README.zh_CN
diff --git a/configure.ac b/configure.ac
index 717fb95432..c125df98f6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1002,6 +1002,7 @@ AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog])
AC_CONFIG_FILES([heartbeat/smb-share], [chmod +x heartbeat/smb-share])
AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist])
AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd])
+AC_CONFIG_FILES([heartbeat/storage-mon], [chmod +x heartbeat/storage-mon])
AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE])
AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng])
AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd])
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index 947d83cb2b..97904ccb16 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -138,6 +138,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
ocf_heartbeat_mariadb.7 \
ocf_heartbeat_mdraid.7 \
ocf_heartbeat_minio.7 \
+ ocf_heartbeat_mpathpersist.7 \
ocf_heartbeat_mysql.7 \
ocf_heartbeat_mysql-proxy.7 \
ocf_heartbeat_nagios.7 \
@@ -175,7 +176,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
ocf_heartbeat_smb-share.7 \
ocf_heartbeat_sybaseASE.7 \
ocf_heartbeat_sg_persist.7 \
- ocf_heartbeat_mpathpersist.7 \
+ ocf_heartbeat_storage-mon.7 \
ocf_heartbeat_symlink.7 \
ocf_heartbeat_syslog-ng.7 \
ocf_heartbeat_tomcat.7 \
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
index 9af44cc127..5d52d211f2 100644
--- a/heartbeat/Makefile.am
+++ b/heartbeat/Makefile.am
@@ -32,22 +32,22 @@ ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
dtddir = $(datadir)/$(PACKAGE_NAME)
dtd_DATA = ra-api-1.dtd metadata.rng
+ocf_PROGRAMS =
+
if USE_IPV6ADDR_AGENT
-ocf_PROGRAMS = IPv6addr
-else
-ocf_PROGRAMS =
+ocf_PROGRAMS += IPv6addr
endif
+halib_PROGRAMS =
+
if IPV6ADDR_COMPATIBLE
-halib_PROGRAMS = send_ua
-else
-halib_PROGRAMS =
+halib_PROGRAMS += send_ua
endif
IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c
-send_ua_SOURCES = send_ua.c IPv6addr_utils.c
-
IPv6addr_LDADD = -lplumb $(LIBNETLIBS)
+
+send_ua_SOURCES = send_ua.c IPv6addr_utils.c
send_ua_LDADD = $(LIBNETLIBS)
osp_SCRIPTS = nova-compute-wait \
@@ -170,6 +170,7 @@ ocf_SCRIPTS = AoEtarget \
mpathpersist \
slapd \
+ storage-mon \
sybaseASE \
symlink \
syslog-ng \
tomcat \
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
new file mode 100644
index 0000000000..5b289fe554
--- /dev/null
+++ b/heartbeat/storage-mon.in
@@ -0,0 +1,263 @@
+#!@BASH_SHELL@
+#
+# Copyright (C) 2021 Red Hat, Inc. All rights reserved.
+#
+# Authors: Christine Caulfield <ccaulfie@redhat.com>
+# Fabio M. Di Nitto <fdinitto@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+
+#
+# Checks storage I/O status of all given drives and writes the #health-storage
+# status into the CIB
+# Implementation is heavily based on ocf:pacemaker:HealtSMART
+#
+# It sends a single block on IO to a radom location on the device and reports any errors returned.
+# If the IO hangs, that will also be returned. (bear in mind tha tmay also hang the C app in some
+# instances).
+#
+# It's worth making a note in the RA description that the smartmon RA is also recommended (this
+# does not replace it), and that Pacemaker health checking should be configued.
+#
+# https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Explained/singlehtml/index.html#tracking-node-health
+
+#######################################################################
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+#
+STORAGEMON=$HA_BIN/storage_mon
+ATTRDUP=/usr/sbin/attrd_updater
+
+OCF_RESKEY_CRM_meta_interval_default="0"
+OCF_RESKEY_io_timeout_default="10"
+OCF_RESKEY_inject_errors_default=""
+OCF_RESKEY_state_file_default="${HA_RSCTMP%%/}/storage-mon-${OCF_RESOURCE_INSTANCE}.state"
+
+# Explicitly list all environment variables used, to make static analysis happy
+: ${OCF_RESKEY_CRM_meta_interval:=${OCF_RESKEY_CRM_meta_interval_default}}
+: ${OCF_RESKEY_drives:=""}
+: ${OCF_RESKEY_io_timeout:=${OCF_RESKEY_io_timeout_default}}
+: ${OCF_RESKEY_inject_errors:=${OCF_RESKEY_inject_errors_default}}
+: ${OCF_RESKEY_state_file:=${OCF_RESKEY_state_file_default}}
+
+#######################################################################
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="storage-mon">
+<version>1.0</version>
+
+<longdesc lang="en">
+System health agent that checks the storage I/O status of the given drives and
+updates the #health-storage attribute. Usage is highly recommended in combination
+with storage-mon monitoring agent. The agent currently support a maximum of 25
+devices per instance.
+</longdesc>
+<shortdesc lang="en">storage I/O health status</shortdesc>
+
+<parameters>
+
+<parameter name="state_file" unique="1">
+<longdesc lang="en">
+Location to store the resource state in.
+</longdesc>
+<shortdesc lang="en">State file</shortdesc>
+<content type="string" default="${OCF_RESKEY_state_file_default}" />
+</parameter>
+
+<parameter name="drives" unique="1" required="1">
+<longdesc lang="en">
+The drive(s) to check as a SPACE separated list. Enter the full path to the device, e.g. "/dev/sda".
+</longdesc>
+<shortdesc lang="en">Drives to check</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="io_timeout" unique="0">
+<longdesc lang="en">
+Specify disk I/O timeout in seconds. Minimum 1, recommeded 10 (default).
+</longdesc>
+<shortdesc lang="en">Disk I/O timeout</shortdesc>
+<content type="integer" default="${OCF_RESKEY_io_timeout_default}" />
+</parameter>
+
+<parameter name="inject_errors" unique="0">
+<longdesc lang="en">
+Used only for testing! Specify % of I/O errors to simulate drives failures.
+</longdesc>
+<shortdesc lang="en">Specify % of I/O errors to simulate drives failures</shortdesc>
+<content type="integer" default="${OCF_RESKEY_inject_errors_default}" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="10s" />
+<action name="stop" timeout="120s" />
+<action name="monitor" timeout="120s" interval="30s" start-delay="0s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="10s" />
+</actions>
+</resource-agent>
+END
+ return $OCF_SUCCESS
+}
+
+#######################################################################
+
+storage-mon_usage() {
+ cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+ return $1
+}
+
+storage-mon_init() {
+ #Test for presence of storage_mon helper
+ if [ ! -x "$STORAGEMON" ] ; then
+ ocf_log err "${STORAGEMON} not installed."
+ exit $OCF_ERR_INSTALLED
+ fi
+
+ i=0
+ for DRIVE in ${OCF_RESKEY_drives}; do
+ if [ ! -e "$DRIVE" ] ; then
+ ocf_log err "${DRIVE} not found on the system"
+ exit $OCF_ERR_INSTALLED
+ fi
+ i=$((i + 1))
+ done
+
+ if [ "$i" -gt "25" ]; then
+ ocf_log err "Too many drives ($i) configured for this agent. Max 25."
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ if [ "${OCF_RESKEY_io_timeout}" -lt "1" ]; then
+ ocf_log err "Minimum timeout is 1. Recommended 10 (default)."
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ if [ -n "${OCF_RESKEY_inject_errors}" ]; then
+ if [ "${OCF_RESKEY_inject_errors}" -lt "1" ] || [ "${OCF_RESKEY_inject_errors}" -gt "100" ]; then
+ ocf_log err "Inject errors % has to be a value between 1 and 100."
+ exit $OCF_ERR_CONFIGURED
+ fi
+ fi
+}
+
+storage-mon_validate() {
+ storage-mon_init
+
+ # Is the state directory writable?
+ state_dir=$(dirname "$OCF_RESKEY_state_file")
+ touch "$state_dir/$$"
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_CONFIGURED
+ fi
+ rm "$state_dir/$$"
+
+ return $OCF_SUCCESS
+}
+
+storage-mon_monitor() {
+ storage-mon_init
+
+ # Monitor _MUST!_ differentiate correctly between running
+ # (SUCCESS), failed (ERROR) or _cleanly_ stopped (NOT RUNNING).
+ # That is THREE states, not just yes/no.
+
+ if [ ! -f "${OCF_RESKEY_state_file}" ]; then
+ return $OCF_NOT_RUNNING
+ fi
+
+ # generate command line
+ cmdline=""
+ for DRIVE in ${OCF_RESKEY_drives}; do
+ cmdline="$cmdline --device $DRIVE --score 1"
+ done
+ cmdline="$cmdline --timeout ${OCF_RESKEY_io_timeout}"
+ if [ -n "${OCF_RESKEY_inject_errors}" ]; then
+ cmdline="$cmdline --inject-errors-percent ${OCF_RESKEY_inject_errors}"
+ fi
+ $STORAGEMON $cmdline
+ if [ $? -ne 0 ]; then
+ status="red"
+ else
+ status="green"
+ fi
+
+ "$ATTRDUP" -n "#health-${OCF_RESOURCE_INSTANCE}" -U "$status" -d "5s"
+ return $OCF_SUCCESS
+}
+
+storage-mon_start() {
+ storage-mon_monitor
+ if [ $? -eq $OCF_SUCCESS ]; then
+ return $OCF_SUCCESS
+ fi
+ touch "${OCF_RESKEY_state_file}"
+}
+
+storage-mon_stop() {
+ storage-mon_monitor
+ if [ $? -eq $OCF_SUCCESS ]; then
+ rm "${OCF_RESKEY_state_file}"
+ fi
+ return $OCF_SUCCESS
+}
+
+storage-mon_validate() {
+ storage-mon_init
+
+ # Is the state directory writable?
+ state_dir=$(dirname "${OCF_RESKEY_state_file}")
+ touch "$state_dir/$$"
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_CONFIGURED
+ fi
+ rm "$state_dir/$$"
+
+ return $OCF_SUCCESS
+}
+
+case "$__OCF_ACTION" in
+ start) storage-mon_start;;
+ stop) storage-mon_stop;;
+ monitor) storage-mon_monitor;;
+ validate-all) storage-mon_validate;;
+ meta-data) meta_data;;
+ usage|help) storage-mon_usage $OCF_SUCCESS;;
+ *) storage-mon_usage $OCF_ERR_UNIMPLEMENTED;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
+# vim: set filetype=sh:
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 1186967cfb..83ff43651d 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -29,7 +29,8 @@ EXTRA_DIST = ocf-tester.8 sfex_init.8
sbin_PROGRAMS =
sbin_SCRIPTS = ocf-tester
-halib_PROGRAMS = findif
+halib_PROGRAMS = findif \
+ storage_mon
man8_MANS = ocf-tester.8
@@ -67,6 +68,8 @@ sfex_stat_LDADD = $(GLIBLIB) -lplumb -lplumbgpl
findif_SOURCES = findif.c
+storage_mon_SOURCES = storage_mon.c
+
if BUILD_TICKLE
halib_PROGRAMS += tickle_tcp
tickle_tcp_SOURCES = tickle_tcp.c
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
new file mode 100644
index 0000000000..7b65bb4191
--- /dev/null
+++ b/tools/storage_mon.c
@@ -0,0 +1,263 @@
+#include <stdio.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <syslog.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#ifdef __FreeBSD__
+#include <sys/disk.h>
+#endif
+
+#define MAX_DEVICES 25
+#define DEFAULT_TIMEOUT 10
+
+static void usage(char *name, FILE *f)
+{
+ fprintf(f, "usage: %s [-hv] [-d <device>]... [-s <score>]... [-t <secs>]\n", name);
+ fprintf(f, " --device <dev> device to test, up to %d instances\n", MAX_DEVICES);
+ fprintf(f, " --score <n> score if device fails the test. Must match --device count\n");
+ fprintf(f, " --timeout <n> max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT);
+ fprintf(f, " --inject-errors-percent <n> Generate EIO errors <n>%% of the time (for testing only)\n");
+ fprintf(f, " --verbose emit extra output to stdout\n");
+ fprintf(f, " --help print this messages\n");
+}
+
+/* Check one device */
+static void *test_device(const char *device, int verbose, int inject_error_percent)
+{
+ uint64_t devsize;
+ int device_fd;
+ int res;
+ off_t seek_spot;
+ char buffer[512];
+
+ if (verbose) {
+ printf("Testing device %s\n", device);
+ }
+
+ device_fd = open(device, O_RDONLY);
+ if (device_fd < 0) {
+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
+ exit(-1);
+ }
+#ifdef __FreeBSD__
+ res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize);
+#else
+ res = ioctl(device_fd, BLKGETSIZE64, &devsize);
+#endif
+ if (res != 0) {
+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+ if (verbose) {
+ fprintf(stderr, "%s: size=%zu\n", device, devsize);
+ }
+ /* Don't fret about real randomness */
+ srand(time(NULL) + getpid());
+ /* Pick a random place on the device - sector aligned */
+ seek_spot = (rand() % (devsize-1024)) & 0xFFFFFFFFFFFFFE00;
+ res = lseek(device_fd, seek_spot, SEEK_SET);
+ if (res < 0) {
+ fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+
+ if (verbose) {
+ printf("%s: reading from pos %ld\n", device, seek_spot);
+ }
+
+ res = read(device_fd, buffer, sizeof(buffer));
+ if (res < 0) {
+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+ if (res < (int)sizeof(buffer)) {
+ fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res);
+ close(device_fd);
+ exit(-1);
+ }
+
+ /* Fake an error */
+ if (inject_error_percent && ((rand() % 100) < inject_error_percent)) {
+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
+ close(device_fd);
+ exit(-1);
+ }
+ res = close(device_fd);
+ if (res != 0) {
+ fprintf(stderr, "Failed to close %s: %s\n", device, strerror(errno));
+ close(device_fd);
+ exit(-1);
+ }
+
+ if (verbose) {
+ printf("%s: done\n", device);
+ }
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ char *devices[MAX_DEVICES];
+ int scores[MAX_DEVICES];
+ pid_t test_forks[MAX_DEVICES];
+ size_t device_count = 0;
+ size_t score_count = 0;
+ size_t finished_count = 0;
+ int timeout = DEFAULT_TIMEOUT;
+ struct timespec ts;
+ time_t start_time;
+ size_t i;
+ int final_score = 0;
+ int opt, option_index;
+ int verbose = 0;
+ int inject_error_percent = 0;
+ struct option long_options[] = {
+ {"timeout", required_argument, 0, 't' },
+ {"device", required_argument, 0, 'd' },
+ {"score", required_argument, 0, 's' },
+ {"inject-errors-percent", required_argument, 0, 0 },
+ {"verbose", no_argument, 0, 'v' },
+ {"help", no_argument, 0, 'h' },
+ {0, 0, 0, 0 }
+ };
+ while ( (opt = getopt_long(argc, argv, "hvt:d:s:",
+ long_options, &option_index)) != -1 ) {
+ switch (opt) {
+ case 0: /* Long-only options */
+ if (strcmp(long_options[option_index].name, "inject-errors-percent") == 0) {
+ inject_error_percent = atoi(optarg);
+ if (inject_error_percent < 1 || inject_error_percent > 100) {
+ fprintf(stderr, "inject_error_percent should be between 1 and 100\n");
+ return -1;
+ }
+ }
+ break;
+ case 'd':
+ if (device_count < MAX_DEVICES) {
+ devices[device_count++] = strdup(optarg);
+ } else {
+ fprintf(stderr, "too many devices, max is %d\n", MAX_DEVICES);
+ return -1;
+ }
+ break;
+ case 's':
+ if (device_count < MAX_DEVICES) {
+ int score = atoi(optarg);
+ if (score < 1 || score > 10) {
+ fprintf(stderr, "Score must be between 1 and 10 inclusive\n");
+ return -1;
+ }
+ scores[score_count++] = score;
+ } else {
+ fprintf(stderr, "too many scores, max is %d\n", MAX_DEVICES);
+ return -1;
+ }
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 't':
+ timeout = atoi(optarg);
+ if (timeout < 1) {
+ fprintf(stderr, "invalid timeout %d. Min 1, recommended %d (default)\n", timeout, DEFAULT_TIMEOUT);
+ return -1;
+ }
+ break;
+ case 'h':
+ usage(argv[0], stdout);
+ break;
+ default:
+ usage(argv[0], stderr);
+ break;
+ }
+
+ }
+ if (device_count == 0) {
+ fprintf(stderr, "No devices to test, use the -d or --device argument\n");
+ return -1;
+ }
+
+ if (device_count != score_count) {
+ fprintf(stderr, "There must be the same number of devices and scores\n");
+ return -1;
+ }
+
+ openlog("storage_mon", 0, LOG_DAEMON);
+
+ memset(test_forks, 0, sizeof(test_forks));
+ for (i=0; i<device_count; i++) {
+ test_forks[i] = fork();
+ if (test_forks[i] < 0) {
+ fprintf(stderr, "Error spawning fork for %s: %s\n", devices[i], strerror(errno));
+ syslog(LOG_ERR, "Error spawning fork for %s: %s\n", devices[i], strerror(errno));
+ /* Just test the devices we have */
+ break;
+ }
+ /* child */
+ if (test_forks[i] == 0) {
+ test_device(devices[i], verbose, inject_error_percent);
+ }
+ }
+
+ /* See if they have finished */
+ clock_gettime(CLOCK_REALTIME, &ts);
+ start_time = ts.tv_sec;
+
+ while ((finished_count < device_count) && ((start_time + timeout) > ts.tv_sec)) {
+ for (i=0; i<device_count; i++) {
+ int wstatus;
+ pid_t w;
+
+ if (test_forks[i] > 0) {
+ w = waitpid(test_forks[i], &wstatus, WUNTRACED | WNOHANG | WCONTINUED);
+ if (w < 0) {
+ fprintf(stderr, "waitpid on %s failed: %s\n", devices[i], strerror(errno));
+ return -1;
+ }
+
+ if (w == test_forks[i]) {
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == 0) {
+ finished_count++;
+ test_forks[i] = 0;
+ } else {
+ syslog(LOG_ERR, "Error reading from device %s", devices[i]);
+ final_score += scores[i];
+ }
+ }
+ }
+ }
+ }
+
+ usleep(100000);
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ }
+
+ /* See which threads have not finished */
+ for (i=0; i<device_count; i++) {
+ if (test_forks[i] != 0) {
+ syslog(LOG_ERR, "Reading from device %s did not complete in %d seconds timeout", devices[i], timeout);
+ fprintf(stderr, "Thread for device %s did not complete in time\n", devices[i]);
+ final_score += scores[i];
+ }
+ }
+
+ if (verbose) {
+ printf("Final score is %d\n", final_score);
+ }
+ return final_score;
+}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
From 7f7ca75100a846242ff1510fd9bcf299cd3d00eb Mon Sep 17 00:00:00 2001
From: Aleksei Burlakov <aleksei.burlakov@suse.com>
Date: Mon, 26 Oct 2020 13:25:45 +0100
Subject: [PATCH] ethmonitor: is_interface: RE matches vlan names
Vlan names end not with : but are suffixed with the @devices-name
---
heartbeat/ethmonitor | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
index e791fbe9d..cf0321ab4 100755
--- a/heartbeat/ethmonitor
+++ b/heartbeat/ethmonitor
@@ -230,8 +230,8 @@ is_interface() {
#
# List interfaces but exclude FreeS/WAN ipsecN virtual interfaces
#
- local iface=`$IP2UTIL -o -f inet addr show | grep " $1 " \
- | cut -d ' ' -f2 | sort -u | grep -v '^ipsec[0-9][0-9]*$'`
+ local iface=`$IP2UTIL -o -f link addr show | grep -e " $1[:@]" \
+ | cut -d ' ' -f2 | tr -d ':' | cut -d '@' -f1 | sort -u | grep -v '^ipsec[0-9][0-9]*$'`
[ "$iface" != "" ]
}

View File

@ -0,0 +1,40 @@
From 3dd051ed56418dc241417ea02e59db3982b7b92c Mon Sep 17 00:00:00 2001
From: Oliver Freyermuth <o.freyermuth@googlemail.com>
Date: Thu, 26 Nov 2020 10:25:01 +0100
Subject: [PATCH] heartbeat/iface-vlan: vlan_{interface,id} do not have to be
unique.
Machines commonly have several vlan_id attached to one interface,
and may also have a vlan_id attached to several interfaces.
vlan_name will still be unique, usual names are:
- bond_in.83@bond_in
- bond_in.84@bond_in
fixes #1581
---
heartbeat/iface-vlan | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/iface-vlan b/heartbeat/iface-vlan
index cbe7e86da..d0481373c 100755
--- a/heartbeat/iface-vlan
+++ b/heartbeat/iface-vlan
@@ -89,7 +89,7 @@ vlan_meta_data() {
</shortdesc>
<parameters>
- <parameter name="vlan_interface" unique="1" required="1">
+ <parameter name="vlan_interface" unique="0" required="1">
<longdesc lang="en">
Define the interface where VLAN should be attached.
</longdesc>
@@ -99,7 +99,7 @@ vlan_meta_data() {
<content type="string"/>
</parameter>
- <parameter name="vlan_id" unique="1" required="1">
+ <parameter name="vlan_id" unique="0" required="1">
<longdesc lang="en">
Define the VLAN ID. It has to be a value between 0 and 4094.
</longdesc>

View File

@ -0,0 +1,57 @@
From dc4fc6fb51481e62c763212129e7dbae4cb663fd Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Tue, 2 Feb 2021 17:55:40 -0800
Subject: [PATCH] nfsserver: Error-check unbind_tree
Fail to stop if unmounting rpcpipefs_dir or /var/lib/nfs fails.
Resolves: RHBZ#1924363
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/nfsserver | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 80d20676b..96b19abe3 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -465,9 +465,20 @@ unbind_tree ()
sleep 1
i=$((i + 1))
done
+
+ if mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "; then
+ ocf_log err "Failed to unmount $OCF_RESKEY_rpcpipefs_dir"
+ return $OCF_ERR_GENERIC
+ fi
+
if is_bound /var/lib/nfs; then
- umount /var/lib/nfs
+ if ! umount /var/lib/nfs; then
+ ocf_log err "Failed to unmount /var/lib/nfs"
+ return $OCF_ERR_GENERIC
+ fi
fi
+
+ return $OCF_SUCCESS
}
binary_status()
@@ -836,8 +847,14 @@ nfsserver_stop ()
esac
unbind_tree
- ocf_log info "NFS server stopped"
- return 0
+ rc=$?
+ if [ "$rc" -ne $OCF_SUCCESS ]; then
+ ocf_exit_reason "Failed to unmount a bind mount"
+ else
+ ocf_log info "NFS server stopped"
+ fi
+
+ return $rc
}
nfsserver_validate ()

View File

@ -0,0 +1,31 @@
From 500de79739cd39808fb48fa556c9b9b9fe2e8acd Mon Sep 17 00:00:00 2001
From: Matthias Hensler <matthias@wspse.de>
Date: Thu, 18 Feb 2021 12:49:49 +0100
Subject: [PATCH] fix pid_status() for VirtualDomain on EL8
see https://github.com/ClusterLabs/resource-agents/issues/1613
---
heartbeat/VirtualDomain | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain
index eb41e3e22..f9cd21fc7 100755
--- a/heartbeat/VirtualDomain
+++ b/heartbeat/VirtualDomain
@@ -421,14 +421,14 @@ pid_status()
case "$emulator" in
qemu-kvm|qemu-dm|qemu-system-*)
rc=$OCF_NOT_RUNNING
- ps awx | grep -E "[q]emu-(kvm|dm|system).*-name $DOMAIN_NAME " > /dev/null 2>&1
+ ps awx | grep -E "[q]emu-(kvm|dm|system).*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
if [ $? -eq 0 ]; then
rc=$OCF_SUCCESS
fi
;;
libvirt_lxc)
rc=$OCF_NOT_RUNNING
- ps awx | grep -E "[l]ibvirt_lxc.*-name $DOMAIN_NAME " > /dev/null 2>&1
+ ps awx | grep -E "[l]ibvirt_lxc.*-name ($DOMAIN_NAME|[^ ]*guest=$DOMAIN_NAME(,[^ ]*)?) " > /dev/null 2>&1
if [ $? -eq 0 ]; then
rc=$OCF_SUCCESS
fi

View File

@ -0,0 +1,23 @@
From dd5394180267c652d0928db8c5508d9977893fe5 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 18 Mar 2021 16:23:10 +0100
Subject: [PATCH] db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to
promote-check
---
heartbeat/db2 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/db2 b/heartbeat/db2
index a57fd2bb6..459136cbd 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -767,7 +767,7 @@ db2_promote() {
return $OCF_SUCCESS
;;
- PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|Primary/Peer)
+ PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED|Primary/Peer)
# nothing to do, only update pacemaker's view
echo MASTER > $STATE_FILE
return $OCF_SUCCESS

View File

@ -0,0 +1,52 @@
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Thu, 10 Dec 2020 08:19:21 +0100
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
Reason was a lookahead-only pattern which was included in the state
where the lookahead was transitioning to.
---
pygments/lexers/ml.py | 12 ++++++------
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
index 8ca8ce3eb..f2ac367c5 100644
--- a/pygments/lexers/ml.py
+++ b/pygments/lexers/ml.py
@@ -142,7 +142,7 @@ def id_callback(self, match):
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
@@ -315,15 +315,14 @@ def id_callback(self, match):
'ename': [
include('whitespace'),
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
- include('breakout'),
- include('core'),
- (r'\S+', Error),
+ default('#pop'),
],
'datcon': [
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
],
}
+
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).

View File

@ -0,0 +1,23 @@
From 3491a6ad30830a8545defa5a417a7db46b093904 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 17 Mar 2021 12:39:10 +0100
Subject: [PATCH] awsvip: dont partially match similar IPs during
monitor-action
---
heartbeat/awsvip | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
index 7d0bf35b6..044d049c6 100755
--- a/heartbeat/awsvip
+++ b/heartbeat/awsvip
@@ -172,7 +172,7 @@ awsvip_monitor() {
--instance-id "${INSTANCE_ID}" \
--query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
--output text | \
- grep -q "${SECONDARY_PRIVATE_IP}"
+ grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)"
RET=$?
if [ $RET -ne 0 ]; then

View File

@ -0,0 +1,64 @@
From 59b0840d262900d0eaa8b19df3ede55eea5250d2 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 17 Mar 2021 12:10:59 +0100
Subject: [PATCH] AWS agents: dont spam log files when getting token
---
heartbeat/aws-vpc-move-ip | 2 +-
heartbeat/aws-vpc-route53.in | 2 +-
heartbeat/awseip | 2 +-
heartbeat/awsvip | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
index cbb629b00..3ca3d6bd6 100755
--- a/heartbeat/aws-vpc-move-ip
+++ b/heartbeat/aws-vpc-move-ip
@@ -215,7 +215,7 @@ ec2ip_validate() {
return $OCF_ERR_CONFIGURED
fi
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
if [ -z "${EC2_INSTANCE_ID}" ]; then
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
index 4fb17019b..21948eaca 100644
--- a/heartbeat/aws-vpc-route53.in
+++ b/heartbeat/aws-vpc-route53.in
@@ -347,7 +347,7 @@ r53_monitor() {
_get_ip() {
case $OCF_RESKEY_ip in
local|public)
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+ TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");;
*.*.*.*)
IPADDRESS="${OCF_RESKEY_ip}";;
diff --git a/heartbeat/awseip b/heartbeat/awseip
index de1967774..12ffffaa3 100755
--- a/heartbeat/awseip
+++ b/heartbeat/awseip
@@ -244,7 +244,7 @@ AWSCLI="${OCF_RESKEY_awscli}"
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
case $__OCF_ACTION in
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
index 8050107e8..7d0bf35b6 100755
--- a/heartbeat/awsvip
+++ b/heartbeat/awsvip
@@ -206,7 +206,7 @@ esac
AWSCLI="${OCF_RESKEY_awscli}"
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
-TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN")
NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")

View File

@ -0,0 +1,138 @@
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Mon, 11 Jan 2021 09:46:34 +0100
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
Caller/Doyensec
---
pygments/lexers/archetype.py | 2 +-
pygments/lexers/factor.py | 4 ++--
pygments/lexers/jvm.py | 1 -
pygments/lexers/matlab.py | 6 +++---
pygments/lexers/objective.py | 4 ++--
pygments/lexers/templates.py | 2 +-
pygments/lexers/varnish.py | 2 +-
8 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
index 65046613d..26f5ea8c9 100644
--- a/pygments/lexers/archetype.py
+++ b/pygments/lexers/archetype.py
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
+ (r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
index be7b30dff..9200547f9 100644
--- a/pygments/lexers/factor.py
+++ b/pygments/lexers/factor.py
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
- (r'"""\s+(?:.|\n)*?\s+"""', String),
+ (r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 62dfd45e5..9a9397c2d 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
- (r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
index 4823c6a7e..578848623 100644
--- a/pygments/lexers/matlab.py
+++ b/pygments/lexers/matlab.py
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
(r'.', Comment.Multiline),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
(r"[^']*'", String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
(r'.', String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
index 34e4062f6..38ac9bb05 100644
--- a/pygments/lexers/objective.py
+++ b/pygments/lexers/objective.py
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
'logos_classname'),
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
bygroups(Keyword, Text, Name.Class)),
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
'function'),
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
+ (r'(%new)(\s*)(\()(.*?)(\))',
bygroups(Keyword, Text, Keyword, String, Keyword)),
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
inherit,
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 33c06c4c4..5c3346b4c 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
# see doc for handling first name arg: /directives/evoque/
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
index 23653f7a1..9d358bd7c 100644
--- a/pygments/lexers/varnish.py
+++ b/pygments/lexers/varnish.py
@@ -61,7 +61,7 @@ def analyse_text(text):
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
(r'(\.probe)(\s*=\s*)(\{)',
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),

View File

@ -0,0 +1,102 @@
From 3ae6d8f0a34d099945d9bf005ed45dbfe9452202 Mon Sep 17 00:00:00 2001
From: kj1724 <78624900+kj1724@users.noreply.github.com>
Date: Wed, 28 Apr 2021 10:22:38 -0400
Subject: [PATCH] gcp-vpc-move-vip.in: Adds retries
If the cluster fails a monitoring event, it will try to restart the resource. If the resource agent makes an API/metadata call that fails at that time, the resource will be considered "failed", but in certain case also "unconfigured", which prevents further operations.
These changes can help the agent recover on certain intermittent failures.
---
heartbeat/gcp-vpc-move-vip.in | 62 ++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 27 deletions(-)
diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
index bbbd87b7a9..c411555110 100755
--- a/heartbeat/gcp-vpc-move-vip.in
+++ b/heartbeat/gcp-vpc-move-vip.in
@@ -50,6 +50,8 @@ REMOVE = 1
CONN = None
THIS_VM = None
ALIAS = None
+MAX_RETRIES = 3
+RETRY_BACKOFF_SECS = 1
METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/'
METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
METADATA = \
@@ -111,18 +113,37 @@ def get_metadata(metadata_key, params=None, timeout=None):
Returns:
HTTP response from the GET request.
-
- Raises:
- urlerror.HTTPError: raises when the GET request fails.
"""
- timeout = timeout or 60
- metadata_url = os.path.join(METADATA_SERVER, metadata_key)
- params = urlparse.urlencode(params or {})
- url = '%s?%s' % (metadata_url, params)
- request = urlrequest.Request(url, headers=METADATA_HEADERS)
- request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
- return request_opener.open(
- request, timeout=timeout * 1.1).read().decode("utf-8")
+ for i in range(MAX_RETRIES):
+ try:
+ timeout = timeout or 60
+ metadata_url = os.path.join(METADATA_SERVER, metadata_key)
+ params = urlparse.urlencode(params or {})
+ url = '%s?%s' % (metadata_url, params)
+ request = urlrequest.Request(url, headers=METADATA_HEADERS)
+ request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
+ return request_opener.open(
+ request, timeout=timeout * 1.1).read().decode("utf-8")
+ except Exception as e:
+ logger.error('Couldn\'t get instance name, is this running inside GCE?: '
+ + str(e))
+ time.sleep(RETRY_BACKOFF_SECS * (i + 1))
+
+ # If the retries are exhausted we exit with a generic error.
+ sys.exit(OCF_ERR_GENERIC)
+
+
+def create_api_connection():
+ for i in range(MAX_RETRIES):
+ try:
+ return googleapiclient.discovery.build('compute', 'v1',
+ cache_discovery=False)
+ except Exception as e:
+ logger.error('Couldn\'t connect with google api: ' + str(e))
+ time.sleep(RETRY_BACKOFF_SECS * (i + 1))
+
+ # If the retries are exhausted we exit with a generic error.
+ sys.exit(OCF_ERR_GENERIC)
def get_instance(project, zone, instance):
@@ -358,24 +379,11 @@ def gcp_alias_status(alias):
def validate():
global ALIAS
- global CONN
global THIS_VM
+ global CONN
- # Populate global vars
- try:
- CONN = googleapiclient.discovery.build('compute', 'v1',
- cache_discovery=False)
- except Exception as e:
- logger.error('Couldn\'t connect with google api: ' + str(e))
- sys.exit(OCF_ERR_CONFIGURED)
-
- try:
- THIS_VM = get_metadata('instance/name')
- except Exception as e:
- logger.error('Couldn\'t get instance name, is this running inside GCE?: '
- + str(e))
- sys.exit(OCF_ERR_CONFIGURED)
-
+ CONN = create_api_connection()
+ THIS_VM = get_metadata('instance/name')
ALIAS = os.environ.get('OCF_RESKEY_alias_ip')
if not ALIAS:
logger.error('Missing alias_ip parameter')

View File

@ -0,0 +1,82 @@
diff --color -uNr a/heartbeat/lvmlockd b/heartbeat/lvmlockd
--- a/heartbeat/lvmlockd 2021-06-11 16:08:37.725598299 +0200
+++ b/heartbeat/lvmlockd 2021-06-11 16:10:38.690910781 +0200
@@ -59,14 +59,6 @@
<shortdesc lang="en">This agent manages the lvmlockd daemon</shortdesc>
<parameters>
-<parameter name="with_cmirrord" unique="0" required="0">
-<longdesc lang="en">
-Start with cmirrord (cluster mirror log daemon).
-</longdesc>
-<shortdesc lang="en">activate cmirrord</shortdesc>
-<content type="boolean" default="false" />
-</parameter>
-
<parameter name="pidfile" unique="0">
<longdesc lang="en">pid file</longdesc>
<shortdesc lang="en">pid file</shortdesc>
@@ -110,7 +102,6 @@
: ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"}
LOCKD="lvmlockd"
-CMIRRORD="cmirrord"
# 0.5s sleep each count
TIMEOUT_COUNT=20
@@ -150,12 +141,6 @@
rc=$?
mirror_rc=$rc
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- pid=$(pgrep $CMIRRORD | head -n1)
- daemon_is_running "$pid"
- mirror_rc=$?
- fi
-
# If these ever don't match, return error to force recovery
if [ $mirror_rc -ne $rc ]; then
return $OCF_ERR_GENERIC
@@ -235,16 +220,6 @@
return $OCF_SUCCESS
fi
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- ocf_log info "starting ${CMIRRORD}..."
- $CMIRRORD
- rc=$?
- if [ $rc -ne $OCF_SUCCESS ] ; then
- ocf_exit_reason "Failed to start ${CMIRRORD}, exit code: $rc"
- return $OCF_ERR_GENERIC
- fi
- fi
-
if [ ! -z "$OCF_RESKEY_socket_path" ] ; then
extras="$extras -s ${OCF_RESKEY_socket_path}"
fi
@@ -341,13 +316,8 @@
pid=$(get_pid)
kill_stop $LOCKD $pid
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- pid=$(pgrep $CMIRRORD)
- kill_stop $CMIRRORD $pid
- fi
-
if silent_status ; then
- ocf_exit_reason "Failed to stop, ${LOCKD} or ${CMIRRORD} still running."
+ ocf_exit_reason "Failed to stop, ${LOCKD} still running."
return $OCF_ERR_GENERIC
fi
@@ -370,10 +340,6 @@
check_binary pgrep
check_binary lvmlockctl
- if ocf_is_true $OCF_RESKEY_with_cmirrord; then
- check_binary $CMIRRORD
- fi
-
return $OCF_SUCCESS
}

View File

@ -70,7 +70,7 @@
Name: resource-agents Name: resource-agents
Summary: Open Source HA Reusable Cluster Resource Scripts Summary: Open Source HA Reusable Cluster Resource Scripts
Version: 4.1.1 Version: 4.1.1
Release: 90%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.7 Release: 98%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
License: GPLv2+ and LGPLv2+ License: GPLv2+ and LGPLv2+
URL: https://github.com/ClusterLabs/resource-agents URL: https://github.com/ClusterLabs/resource-agents
%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
@ -268,10 +268,21 @@ Patch176: bz1913932-3-gcp-vpc-move-route-make-vpc_network-optional.patch
Patch177: bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch Patch177: bz1937142-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch
Patch178: bz1940363-1-galera-redis-use-output-as.patch Patch178: bz1940363-1-galera-redis-use-output-as.patch
Patch179: bz1940363-2-bundle-disable-validate-with.patch Patch179: bz1940363-2-bundle-disable-validate-with.patch
Patch180: bz1943093-aws-vpc-move-ip-add-ENI-lookup.patch Patch180: bz1891883-ethmonitor-vlan-fix.patch
Patch181: bz1973035-podman-fix-container-creation-race.patch Patch181: bz1902045-iface-vlan-vlan-not-unique.patch
Patch182: bz1986868-podman-return-not-running-probe.patch Patch182: bz1924363-nfsserver-error-check-unmount.patch
Patch183: bz1972236-LVM-activate-fix-drop-in.patch Patch183: bz1932863-VirtualDomain-fix-pid-status.patch
Patch184: bz1920698-podman-return-not-running-probe.patch
Patch185: bz1939992-awsvip-dont-partially-match-IPs.patch
Patch186: bz1940094-aws-agents-dont-spam-logs.patch
Patch187: bz1939281-aws-vpc-move-ip-add-ENI-lookup.patch
Patch188: bz1934651-db2-add-PRIMARY-REMOTE_CATCHUP_PENDING-CONNECTED.patch
Patch189: bz1872754-pgsqlms-new-ra.patch
Patch190: bz1957765-gcp-vpc-move-vip-retry.patch
Patch191: bz1969968-lvmlockd-remove-with_cmirrord.patch
Patch192: bz1972035-LVM-activate-fix-drop-in.patch
Patch193: bz1972743-podman-fix-container-creation-race.patch
Patch194: bz1509319-storage-mon-new-ra.patch
# bundle patches # bundle patches
Patch1000: 7-gcp-bundled.patch Patch1000: 7-gcp-bundled.patch
@ -282,6 +293,8 @@ Patch1004: bz1691456-gcloud-dont-detect-python2.patch
Patch1005: aliyun-vpc-move-ip-4-bundled.patch Patch1005: aliyun-vpc-move-ip-4-bundled.patch
Patch1006: python3-syntax-fixes.patch Patch1006: python3-syntax-fixes.patch
Patch1007: aliyuncli-python3-fixes.patch Patch1007: aliyuncli-python3-fixes.patch
Patch1008: bz1935422-python-pygments-fix-CVE-2021-20270.patch
Patch1009: bz1943464-python-pygments-fix-CVE-2021-27291.patch
Obsoletes: heartbeat-resources <= %{version} Obsoletes: heartbeat-resources <= %{version}
Provides: heartbeat-resources = %{version} Provides: heartbeat-resources = %{version}
@ -421,6 +434,21 @@ The Google Cloud Platform resource agents allows Google Cloud
Platform instances to be managed in a cluster environment. Platform instances to be managed in a cluster environment.
%endif %endif
%package paf
License: PostgreSQL
Summary: PostgreSQL Automatic Failover (PAF) resource agent
%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
Group: System Environment/Base
%else
Group: Productivity/Clustering/HA
%endif
Requires: %{name} = %{version}-%{release}
Requires: perl-interpreter
%description paf
PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL
databases to be managed in a cluster environment.
%prep %prep
%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0 %if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0
%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} %{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
@ -607,13 +635,25 @@ exit 1
%patch177 -p1 %patch177 -p1
%patch178 -p1 %patch178 -p1
%patch179 -p1 %patch179 -p1
%patch180 -p1 -F2 %patch180 -p1
%patch181 -p1 %patch181 -p1
%patch182 -p1 %patch182 -p1
%patch183 -p1 %patch183 -p1
%patch184 -p1
%patch185 -p1
%patch186 -p1
%patch187 -p1 -F2
%patch188 -p1
%patch189 -p1
%patch190 -p1
%patch191 -p1
%patch192 -p1
%patch193 -p1
%patch194 -p1 -F2
chmod 755 heartbeat/nova-compute-wait chmod 755 heartbeat/nova-compute-wait
chmod 755 heartbeat/NovaEvacuate chmod 755 heartbeat/NovaEvacuate
chmod 755 heartbeat/pgsqlms
# bundles # bundles
mkdir -p %{bundled_lib_dir}/gcp mkdir -p %{bundled_lib_dir}/gcp
@ -752,6 +792,12 @@ cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE
# aliyun Python 3 fixes # aliyun Python 3 fixes
%patch1006 -p1 %patch1006 -p1
%patch1007 -p1 %patch1007 -p1
# fix CVE's in python-pygments
pushd %{googlecloudsdk_dir}/lib/third_party
%patch1008 -p1 -F2
%patch1009 -p1 -F2
popd
%endif %endif
%build %build
@ -981,6 +1027,9 @@ rm -rf %{buildroot}
%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* %exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
%exclude %{_mandir}/man7/*gcp* %exclude %{_mandir}/man7/*gcp*
%exclude /usr/lib/%{name}/%{bundled_lib_dir} %exclude /usr/lib/%{name}/%{bundled_lib_dir}
%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms
%exclude %{_mandir}/man7/*pgsqlms*
%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
### ###
# Moved to separate packages # Moved to separate packages
@ -1173,37 +1222,66 @@ ccs_update_schema > /dev/null 2>&1 ||:
/usr/lib/%{name}/%{bundled_lib_dir}/gcp /usr/lib/%{name}/%{bundled_lib_dir}/gcp
%endif %endif
%files paf
%doc paf_README.md
%license paf_LICENSE
%defattr(-,root,root)
%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms
%{_mandir}/man7/*pgsqlms*
%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
%changelog %changelog
* Tue Aug 3 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90.7 * Mon Aug 30 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-98
- LVM-activate: fix drop-in check to avoid re-creating drop-in - storage-mon: new resource agent
Resolves: rhbz#1972236 Resolves: rhbz#1509319
* Wed Jul 28 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90.6 * Thu Jun 17 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-97
- podman: return NOT_RUNNING when monitor cmd fails
Resolves: rhbz#1986868
* Thu Jun 17 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90.5
- podman: fix possible race during container creation - podman: fix possible race during container creation
Resolves: rhbz#1973035 Resolves: rhbz#1972743
* Wed Apr 14 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90.2 * Tue Jun 15 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-96
- LVM-activate: fix drop-in check to avoid re-creating drop-in
Resolves: rhbz#1972035
* Fri Jun 11 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-95
- lvmlockd: remove cmirrord support, as cmirrord is incompatible w/lvmlockd
Resolves: rhbz#1969968
* Wed May 12 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-94
- gcp-vpc-move-vip: add retry logic
Resolves: rhbz#1957765
* Wed Apr 28 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-93
- db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to promote-check
- pgsqlms: new resource agent
- python-pygments: fix CVE-2021-27291 and CVE-2021-20270
Resolves: rhbz#1872754, rhbz#1934651, rhbz#1935422, rhbz#1943464
* Thu Apr 8 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-91
- ethmonitor: fix vlan regex
- iface-vlan: make vlan parameter not unique
- nfsserver: error-check unmount
- VirtualDomain: fix pid status regex
- podman: return NOT_RUNNING when monitor cmd fails
- awsvip: dont partially match similar IPs during
- aws agents: dont spam log files
- aws-vpc-move-ip: add ENI lookup - aws-vpc-move-ip: add ENI lookup
Resolves: rhbz#1943093 Resolves: rhbz#1891883, rhbz#1902045, rhbz#1924363, rhbz#1932863
Resolves: rhbz#1920698, rhbz#1939992, rhbz#1940094, rhbz#1939281
* Mon Mar 22 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90 * Mon Mar 22 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-90
- galera/rabbitmq-cluster/redis: run crm_mon without validation when - galera/rabbitmq-cluster/redis: run crm_mon without validation when
running in bundle running in bundle (1940363)
Resolves: rhbz#1940363
* Thu Mar 11 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-89 * Thu Mar 11 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-89
- azure-lb: redirect to avoid nc dying with EPIPE error - azure-lb: redirect to avoid nc dying with EPIPE error (1937142)
Resolves: rhbz#1937142
* Thu Feb 25 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-87 * Thu Feb 25 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-87
- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and - gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and