diff --git a/.gitignore b/.gitignore index 65e1103..5f4ed51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1 @@ -SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz -SOURCES/aliyun-cli-2.1.10.tar.gz -SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz -SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz -SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz -SOURCES/colorama-0.3.3.tar.gz -SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz -SOURCES/httplib2-0.20.4.tar.gz -SOURCES/pycryptodome-3.20.0.tar.gz -SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl -SOURCES/pyroute2-0.4.13.tar.gz -SOURCES/urllib3-1.26.18.tar.gz +ClusterLabs-resource-agents-56e76b01.tar.gz diff --git a/.resource-agents.metadata b/.resource-agents.metadata deleted file mode 100644 index b562fa0..0000000 --- a/.resource-agents.metadata +++ /dev/null @@ -1,12 +0,0 @@ -dfc65f4cac3f95026b2f5674019814a527333004 SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz -306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz -0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz -c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz -f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz -0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz -81f039cf075e9c8b70d5af99c189296a9e031de3 SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz -7caf4412d9473bf17352316249a8133fa70b7e37 SOURCES/httplib2-0.20.4.tar.gz -c55d177e9484d974c95078d4ae945f89ba2c7251 SOURCES/pycryptodome-3.20.0.tar.gz -c8307f47e3b75a2d02af72982a2dfefa3f56e407 SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl -147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz -84e2852d8da1655373f7ce5e7d5d3e256b62b4e4 SOURCES/urllib3-1.26.18.tar.gz diff --git a/RHEL-66293-1-aws-agents-reuse-imds-token-until-it-expires.patch b/RHEL-66293-1-aws-agents-reuse-imds-token-until-it-expires.patch new file mode 100644 index 0000000..b81a105 --- /dev/null +++ b/RHEL-66293-1-aws-agents-reuse-imds-token-until-it-expires.patch @@ -0,0 +1,455 @@ +From 61cec34a754017537c61e79cd1212f2688c32429 Mon Sep 17 00:00:00 2001 +From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com> +Date: Mon, 4 Nov 2024 12:19:10 +0530 +Subject: [PATCH 1/7] Introduce a new shell function to reuse IMDS token + +--- + heartbeat/ocf-shellfuncs.in | 31 +++++++++++++++++++++++++++++++ + 1 file changed, 31 insertions(+) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 5c4bb3264..0c4632cf9 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -1111,3 +1111,34 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace + if ocf_is_true "$HA_use_logd"; then + : ${HA_LOGD:=yes} + fi ++ ++# File to store the token and timestamp ++TOKEN_FILE="/tmp/.imds_token" ++TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) ++TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining ++ ++# Function to fetch a new token ++fetch_new_token() { ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME") ++ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE" ++ echo "$TOKEN" ++} ++ ++# Function to retrieve or renew the token ++get_token() { ++ if [[ -f "$TOKEN_FILE" ]]; then ++ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE" ++ CURRENT_TIME=$(date +%s) ++ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP)) ++ ++ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then ++ # Token is still valid ++ echo "$STORED_TOKEN" ++ return ++ fi ++ fi ++ # Fetch a new token if not valid ++ fetch_new_token ++} ++ ++ + +From 00629fa44cb7a8dd1045fc8cad755e1d0c808476 Mon Sep 17 00:00:00 2001 +From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com> +Date: Mon, 4 Nov 2024 12:21:18 +0530 +Subject: [PATCH 2/7] Utilize the get_token function to reuse the token + +--- + heartbeat/aws-vpc-move-ip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 6115e5ba8..fbeb2ee64 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -270,7 +270,7 @@ ec2ip_validate() { + fi + fi + +- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") ++ TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + +From 36126cdcb90ad617ecfce03d986550907732aa4f Mon Sep 17 00:00:00 2001 +From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com> +Date: Mon, 4 Nov 2024 12:22:16 +0530 +Subject: [PATCH 3/7] Utilize to get_token function to reuse the token + +--- + heartbeat/awsvip | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index f2b238a0f..ca19ac086 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -266,7 +266,7 @@ if [ -n "${OCF_RESKEY_region}" ]; then + AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" + fi + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" +-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") ++TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + +From dcd0050df5ba94905bc71d38b05cbb93f5687b61 Mon Sep 17 00:00:00 2001 +From: harshkiprofile +Date: Mon, 4 Nov 2024 20:05:33 +0530 +Subject: [PATCH 4/7] Move token renewal function to aws.sh for reuse in AWS + agent scripts + +--- + heartbeat/Makefile.am | 1 + + heartbeat/aws-vpc-move-ip | 1 + + heartbeat/aws-vpc-route53.in | 3 ++- + heartbeat/aws.sh | 46 ++++++++++++++++++++++++++++++++++++ + heartbeat/awseip | 3 ++- + heartbeat/awsvip | 1 + + heartbeat/ocf-shellfuncs.in | 33 +------------------------- + 7 files changed, 54 insertions(+), 34 deletions(-) + create mode 100644 heartbeat/aws.sh + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 409847970..655740f14 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -218,6 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \ + ocf-rarun \ + ocf-distro \ + apache-conf.sh \ ++ aws.sh \ + http-mon.sh \ + sapdb-nosha.sh \ + sapdb.sh \ +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index fbeb2ee64..f4b0492f2 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -33,6 +33,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. ${OCF_FUNCTIONS_DIR}/aws.sh + + # Defaults + OCF_RESKEY_awscli_default="/usr/bin/aws" +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index eba2ed95c..f7e756782 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -43,6 +43,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. ${OCF_FUNCTIONS_DIR}/aws.sh + + # Defaults + OCF_RESKEY_awscli_default="/usr/bin/aws" +@@ -377,7 +378,7 @@ r53_monitor() { + _get_ip() { + case $OCF_RESKEY_ip in + local|public) +- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") ++ TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +new file mode 100644 +index 000000000..fc557109c +--- /dev/null ++++ b/heartbeat/aws.sh +@@ -0,0 +1,46 @@ ++#!/bin/sh ++# ++# ++# AWS Helper Scripts ++# ++# ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++# Defaults ++OCF_RESKEY_curl_retries_default="3" ++OCF_RESKEY_curl_sleep_default="1" ++ ++: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} ++: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} ++ ++# Function to enable reusable IMDS token retrieval for efficient repeated access ++# File to store the token and timestamp ++TOKEN_FILE="/tmp/.imds_token" ++TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) ++TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining ++ ++# Function to fetch a new token ++fetch_new_token() { ++ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token") ++ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE" ++ echo "$TOKEN" ++} ++ ++# Function to retrieve or renew the token ++get_token() { ++ if [ -f "$TOKEN_FILE" ]; then ++ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE" ++ CURRENT_TIME=$(date +%s) ++ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP)) ++ ++ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then ++ # Token is still valid ++ echo "$STORED_TOKEN" ++ return ++ fi ++ fi ++ # Fetch a new token if not valid ++ fetch_new_token ++} +\ No newline at end of file +diff --git a/heartbeat/awseip b/heartbeat/awseip +index ffb6223a1..049c2e566 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -38,6 +38,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. ${OCF_FUNCTIONS_DIR}/aws.sh + + ####################################################################### + +@@ -306,7 +307,7 @@ fi + ELASTIC_IP="${OCF_RESKEY_elastic_ip}" + ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" +-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") ++TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index ca19ac086..de67981d8 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -37,6 +37,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. ${OCF_FUNCTIONS_DIR}/aws.sh + + ####################################################################### + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 0c4632cf9..922c6ea45 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -1110,35 +1110,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace + # pacemaker sets HA_use_logd, some others use HA_LOGD :/ + if ocf_is_true "$HA_use_logd"; then + : ${HA_LOGD:=yes} +-fi +- +-# File to store the token and timestamp +-TOKEN_FILE="/tmp/.imds_token" +-TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) +-TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining +- +-# Function to fetch a new token +-fetch_new_token() { +- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME") +- echo "$TOKEN $(date +%s)" > "$TOKEN_FILE" +- echo "$TOKEN" +-} +- +-# Function to retrieve or renew the token +-get_token() { +- if [[ -f "$TOKEN_FILE" ]]; then +- read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE" +- CURRENT_TIME=$(date +%s) +- ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP)) +- +- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then +- # Token is still valid +- echo "$STORED_TOKEN" +- return +- fi +- fi +- # Fetch a new token if not valid +- fetch_new_token +-} +- +- ++fi +\ No newline at end of file + +From 9f7be201923c8eab1b121f2067ed74a69841cf8a Mon Sep 17 00:00:00 2001 +From: harshkiprofile +Date: Tue, 5 Nov 2024 19:12:34 +0530 +Subject: [PATCH 5/7] Refactor to use common temp path and update shell syntax + +--- + heartbeat/Makefile.am | 2 +- + heartbeat/aws.sh | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am +index 655740f14..8352f3a3d 100644 +--- a/heartbeat/Makefile.am ++++ b/heartbeat/Makefile.am +@@ -218,7 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \ + ocf-rarun \ + ocf-distro \ + apache-conf.sh \ +- aws.sh \ ++ aws.sh \ + http-mon.sh \ + sapdb-nosha.sh \ + sapdb.sh \ +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +index fc557109c..c77f93b91 100644 +--- a/heartbeat/aws.sh ++++ b/heartbeat/aws.sh +@@ -17,7 +17,7 @@ OCF_RESKEY_curl_sleep_default="1" + + # Function to enable reusable IMDS token retrieval for efficient repeated access + # File to store the token and timestamp +-TOKEN_FILE="/tmp/.imds_token" ++TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token" + TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) + TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining + +@@ -35,7 +35,7 @@ get_token() { + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP)) + +- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then ++ if [ "$ELAPSED_TIME" -lt "$((TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD))" ]; then + # Token is still valid + echo "$STORED_TOKEN" + return + +From 4f61048064d1df3bebdb5c1441cf0020f213c01b Mon Sep 17 00:00:00 2001 +From: harshkiprofile +Date: Tue, 5 Nov 2024 19:30:15 +0530 +Subject: [PATCH 6/7] Consolidate curl_retry and curl_sleep variable to a + single location in aws.sh + +--- + heartbeat/aws-vpc-move-ip | 4 ---- + heartbeat/aws-vpc-route53.in | 4 ---- + heartbeat/awseip | 4 ---- + heartbeat/awsvip | 4 ---- + 4 files changed, 16 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index f4b0492f2..3aa9ceb02 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -48,8 +48,6 @@ OCF_RESKEY_interface_default="eth0" + OCF_RESKEY_iflabel_default="" + OCF_RESKEY_monapi_default="false" + OCF_RESKEY_lookup_type_default="InstanceId" +-OCF_RESKEY_curl_retries_default="3" +-OCF_RESKEY_curl_sleep_default="1" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} +@@ -63,8 +61,6 @@ OCF_RESKEY_curl_sleep_default="1" + : ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} + : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} + : ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} +-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} +-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} + ####################################################################### + + +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index f7e756782..85c8de3c1 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -54,8 +54,6 @@ OCF_RESKEY_hostedzoneid_default="" + OCF_RESKEY_fullname_default="" + OCF_RESKEY_ip_default="local" + OCF_RESKEY_ttl_default=10 +-OCF_RESKEY_curl_retries_default="3" +-OCF_RESKEY_curl_sleep_default="1" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} +@@ -65,8 +63,6 @@ OCF_RESKEY_curl_sleep_default="1" + : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} + : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} + : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} +-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} +-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} + + usage() { + cat <<-EOT +diff --git a/heartbeat/awseip b/heartbeat/awseip +index 049c2e566..4b1c3bc6a 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -50,16 +50,12 @@ OCF_RESKEY_auth_type_default="key" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_region_default="" + OCF_RESKEY_api_delay_default="3" +-OCF_RESKEY_curl_retries_default="3" +-OCF_RESKEY_curl_sleep_default="1" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} + : ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} + : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} +-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} +-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} + + meta_data() { + cat < +Date: Tue, 5 Nov 2024 20:50:24 +0530 +Subject: [PATCH 7/7] aws.sh needs to added to be symlinkstargets in + doc/man/Makefile.am + +--- + doc/man/Makefile.am | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am +index ef7639bff..447f5cba3 100644 +--- a/doc/man/Makefile.am ++++ b/doc/man/Makefile.am +@@ -42,7 +42,7 @@ radir = $(abs_top_builddir)/heartbeat + # required for out-of-tree build + symlinkstargets = \ + ocf-distro ocf.py ocf-rarun ocf-returncodes \ +- findif.sh apache-conf.sh http-mon.sh mysql-common.sh \ ++ findif.sh apache-conf.sh aws.sh http-mon.sh mysql-common.sh \ + nfsserver-redhat.sh openstack-common.sh ora-common.sh + + preptree: diff --git a/RHEL-66293-2-aws-agents-reuse-imds-token-improvements.patch b/RHEL-66293-2-aws-agents-reuse-imds-token-improvements.patch new file mode 100644 index 0000000..2ba9acf --- /dev/null +++ b/RHEL-66293-2-aws-agents-reuse-imds-token-improvements.patch @@ -0,0 +1,161 @@ +From cc5ffa5e599c974c426e93faa821b342e96b916d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 11 Nov 2024 12:46:27 +0100 +Subject: [PATCH 1/2] aws.sh: chmod 600 $TOKEN_FILE, add get_instance_id() with + DMI support, and use get_instance_id() in AWS agents + +--- + heartbeat/aws-vpc-move-ip | 2 +- + heartbeat/aws.sh | 30 +++++++++++++++++++++++++++--- + heartbeat/awseip | 2 +- + heartbeat/awsvip | 2 +- + 4 files changed, 30 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 3aa9ceb02..09ae68b57 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -269,7 +269,7 @@ ec2ip_validate() { + + TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +- EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") ++ EC2_INSTANCE_ID=$(get_instance_id) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + + if [ -z "${EC2_INSTANCE_ID}" ]; then +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +index c77f93b91..9cd343c16 100644 +--- a/heartbeat/aws.sh ++++ b/heartbeat/aws.sh +@@ -9,8 +9,8 @@ + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + # Defaults +-OCF_RESKEY_curl_retries_default="3" +-OCF_RESKEY_curl_sleep_default="1" ++OCF_RESKEY_curl_retries_default="4" ++OCF_RESKEY_curl_sleep_default="3" + + : ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} + : ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} +@@ -20,11 +20,13 @@ OCF_RESKEY_curl_sleep_default="1" + TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token" + TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) + TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining ++DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances. + + # Function to fetch a new token + fetch_new_token() { + TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token") + echo "$TOKEN $(date +%s)" > "$TOKEN_FILE" ++ chmod 600 "$TOKEN_FILE" + echo "$TOKEN" + } + +@@ -43,4 +45,26 @@ get_token() { + fi + # Fetch a new token if not valid + fetch_new_token +-} +\ No newline at end of file ++} ++ ++get_instance_id() { ++ local INSTANCE_ID ++ ++ # Try to get the EC2 instance ID from DMI first before falling back to IMDS. ++ ocf_log debug "EC2: Attempt to get EC2 Instance ID from local file." ++ if [ -r "$DMI_FILE" ] && [ -s "$DMI_FILE" ]; then ++ INSTANCE_ID="$(cat "$DMI_FILE")" ++ case "$INSTANCE_ID" in ++ i-0*) echo "$INSTANCE_ID"; return "$OCF_SUCCESS" ;; ++ esac ++ fi ++ ++ INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") ++ if [ $? -ne 0 ]; then ++ ocf_exit_reason "Failed to get EC2 Instance ID" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ echo "$INSTANCE_ID" ++ return "$OCF_SUCCESS" ++} +diff --git a/heartbeat/awseip b/heartbeat/awseip +index 4b1c3bc6a..7f38376dc 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -305,7 +305,7 @@ ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" + TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") ++INSTANCE_ID=$(get_instance_id) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + + case $__OCF_ACTION in +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 8c71e7fac..0856ac5e4 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -265,7 +265,7 @@ fi + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" + TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") ++INSTANCE_ID=$(get_instance_id) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + +From b8d3ecc6a8ce4baf4b28d02978dd573728ccf5fa Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 18 Nov 2024 11:10:42 +0100 +Subject: [PATCH 2/2] aws.sh/ocf-shellfuncs: add ability to fresh token if it's + invalid + +--- + heartbeat/aws.sh | 1 + + heartbeat/ocf-shellfuncs.in | 11 ++++++++++- + 2 files changed, 11 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +index 9cd343c16..64f2e13a7 100644 +--- a/heartbeat/aws.sh ++++ b/heartbeat/aws.sh +@@ -18,6 +18,7 @@ OCF_RESKEY_curl_sleep_default="3" + # Function to enable reusable IMDS token retrieval for efficient repeated access + # File to store the token and timestamp + TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token" ++TOKEN_FUNC="fetch_new_token" # Used by curl_retry() if saved token is invalid + TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours) + TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining + DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances. +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 922c6ea45..8e51fa3c8 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -697,6 +697,15 @@ curl_retry() + + ocf_log debug "result: $result" + [ $rc -eq 0 ] && break ++ if [ -n "$TOKEN" ] && [ -n "$TOKEN_FILE" ] && \ ++ [ -f "$TOKEN_FILE" ] && [ -n "$TOKEN_FUNC" ] && \ ++ echo "$result" | grep -q "The requested URL returned error: 401$"; then ++ local OLD_TOKEN="$TOKEN" ++ ocf_log err "Token invalid. Getting new token." ++ TOKEN=$($TOKEN_FUNC) ++ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC ++ args=$(echo "$args" | sed "s/$OLD_TOKEN/$TOKEN/") ++ fi + sleep $sleep + done + +@@ -1110,4 +1119,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace + # pacemaker sets HA_use_logd, some others use HA_LOGD :/ + if ocf_is_true "$HA_use_logd"; then + : ${HA_LOGD:=yes} +-fi +\ No newline at end of file ++fi diff --git a/RHEL-68740-awsvip-add-interface-parameter.patch b/RHEL-68740-awsvip-add-interface-parameter.patch new file mode 100644 index 0000000..c7bf67f --- /dev/null +++ b/RHEL-68740-awsvip-add-interface-parameter.patch @@ -0,0 +1,184 @@ +From 392d40048a25d7cb73ec5b5e9f7a5862f7a3fd48 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 11 Nov 2024 12:22:27 +0100 +Subject: [PATCH 1/2] aws.sh: add get_interface_mac() + +--- + heartbeat/aws.sh | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +index 64f2e13a7..ebb4eb1f4 100644 +--- a/heartbeat/aws.sh ++++ b/heartbeat/aws.sh +@@ -69,3 +69,24 @@ get_instance_id() { + echo "$INSTANCE_ID" + return "$OCF_SUCCESS" + } ++ ++get_interface_mac() { ++ local MAC_FILE MAC_ADDR rc ++ MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" ++ if [ -f "$MAC_FILE" ]; then ++ cmd="cat ${MAC_FILE}" ++ else ++ cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" ++ fi ++ ocf_log debug "executing command: $cmd" ++ MAC_ADDR="$(eval $cmd)" ++ rc=$? ++ if [ $rc != 0 ]; then ++ ocf_log warn "command failed, rc: $rc" ++ return $OCF_ERR_GENERIC ++ fi ++ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" ++ ++ echo $MAC_ADDR ++ return $OCF_SUCCESS ++} + +From 87337ac4da931d5a53c83d53d4bab17ee123ba9f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 11 Nov 2024 12:26:38 +0100 +Subject: [PATCH 2/2] awsvip: let user specify which interface to use, and make + the parameter optional in aws-vpc-move-ip + +--- + heartbeat/aws-vpc-move-ip | 20 ++++---------------- + heartbeat/aws.sh | 4 +++- + heartbeat/awsvip | 24 +++++++++++++++++------- + 3 files changed, 24 insertions(+), 24 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 09ae68b57..2afc0ba53 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -157,7 +157,7 @@ Role to use to query/update the route table + + + +- ++ + + Name of the network interface, i.e. eth0 + +@@ -321,7 +321,7 @@ ec2ip_monitor() { + ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call" + fi + +- cmd="ip addr show to $OCF_RESKEY_ip up" ++ cmd="ip addr show dev $OCF_RESKEY_interface to $OCF_RESKEY_ip up" + ocf_log debug "executing command: $cmd" + RESULT=$($cmd | grep "$OCF_RESKEY_ip") + if [ -z "$RESULT" ]; then +@@ -331,7 +331,7 @@ ec2ip_monitor() { + level="info" + fi + +- ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to running interface" ++ ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to interface $OCF_RESKEY_interface" + return $OCF_NOT_RUNNING + fi + +@@ -369,19 +369,7 @@ ec2ip_drop() { + } + + ec2ip_get_instance_eni() { +- MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" +- if [ -f $MAC_FILE ]; then +- cmd="cat ${MAC_FILE}" +- else +- cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" +- fi +- ocf_log debug "executing command: $cmd" +- MAC_ADDR="$(eval $cmd)" +- rc=$? +- if [ $rc != 0 ]; then +- ocf_log warn "command failed, rc: $rc" +- return $OCF_ERR_GENERIC +- fi ++ MAC_ADDR=$(get_interface_mac) + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + + cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\"" +diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh +index ebb4eb1f4..216033afe 100644 +--- a/heartbeat/aws.sh ++++ b/heartbeat/aws.sh +@@ -73,7 +73,9 @@ get_instance_id() { + get_interface_mac() { + local MAC_FILE MAC_ADDR rc + MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" +- if [ -f "$MAC_FILE" ]; then ++ if [ -z "$OCF_RESKEY_interface" ]; then ++ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/mac\"" ++ elif [ -f "$MAC_FILE" ]; then + cmd="cat ${MAC_FILE}" + else + cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3" +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 0856ac5e4..015180d5a 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -49,12 +49,14 @@ OCF_RESKEY_auth_type_default="key" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_region_default="" + OCF_RESKEY_api_delay_default="3" ++OCF_RESKEY_interface_default="" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} + : ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} + : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} ++: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} + + meta_data() { + cat < + + ++ ++ ++Name of the network interface, i.e. eth0 ++ ++network interface name ++ ++ ++ + + + curl retries before failing +@@ -207,16 +217,16 @@ awsvip_stop() { + } + + awsvip_monitor() { +- $AWSCLI_CMD ec2 describe-instances \ +- --instance-id "${INSTANCE_ID}" \ +- --query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \ ++ $AWSCLI_CMD ec2 describe-network-interfaces \ ++ --network-interface-ids "${NETWORK_ID}" \ ++ --query 'NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \ + --output text | \ + grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)" +- RET=$? +- +- if [ $RET -ne 0 ]; then ++ if [ $? -ne 0 ]; then ++ [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_log error "IP $SECONDARY_PRIVATE_IP not assigned to interface ${NETWORK_ID}" + return $OCF_NOT_RUNNING + fi ++ + return $OCF_SUCCESS + } + +@@ -267,7 +277,7 @@ TOKEN=$(get_token) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + INSTANCE_ID=$(get_instance_id) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC +-MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac") ++MAC_ADDRESS=$(get_interface_mac) + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC + NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id") + [ $? -ne 0 ] && exit $OCF_ERR_GENERIC diff --git a/SOURCES/RHEL-72956-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch b/RHEL-72954-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch similarity index 100% rename from SOURCES/RHEL-72956-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch rename to RHEL-72954-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch diff --git a/SOURCES/RHEL-72956-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch b/RHEL-72954-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch similarity index 100% rename from SOURCES/RHEL-72956-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch rename to RHEL-72954-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch diff --git a/RHEL-73689-1-ocf-shellfuncs-fix-syntax-error-in-crm_mon_no_validation.patch b/RHEL-73689-1-ocf-shellfuncs-fix-syntax-error-in-crm_mon_no_validation.patch new file mode 100644 index 0000000..a936d48 --- /dev/null +++ b/RHEL-73689-1-ocf-shellfuncs-fix-syntax-error-in-crm_mon_no_validation.patch @@ -0,0 +1,23 @@ +From 17bf880b110c59a69e677b3961b0ebe4e9a483d9 Mon Sep 17 00:00:00 2001 +From: Sergey Safarov +Date: Sun, 12 Jan 2025 20:02:53 +0200 +Subject: [PATCH] ocf-shellfuncs: fixed bash systax error added at + a25f08cf98d784894df9c52960eff5ccef059393 + +--- + heartbeat/ocf-shellfuncs.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 8e51fa3c8..6b35b89c7 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -742,7 +742,7 @@ crm_mon_no_validation() + # The subshell prevents parsing error with incompatible shells + ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.19.7" + if [ $res -eq 2 ] || [ $res -eq 1 ]; then +- "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q \ ++ "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q) \ + ${HA_SBIN_DIR}/crm_mon \$*" -- $* + else + "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q | sed 's/validate-with=\"[^\"]*\"/validate-with=\"none\"/') \ diff --git a/RHEL-73689-2-ocf-shellfuncs-add-missing-variable-in-crm_mon_no_validation.patch b/RHEL-73689-2-ocf-shellfuncs-add-missing-variable-in-crm_mon_no_validation.patch new file mode 100644 index 0000000..77455bb --- /dev/null +++ b/RHEL-73689-2-ocf-shellfuncs-add-missing-variable-in-crm_mon_no_validation.patch @@ -0,0 +1,22 @@ +From 9bb30d743d04905dec474bd71e31553e5189a570 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 11 Feb 2025 10:38:59 +0100 +Subject: [PATCH] ocf-shellfuncs: add missing variable in + crm_mon_no_validation() + +--- + heartbeat/ocf-shellfuncs.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in +index 6b35b89c7..e834ded95 100644 +--- a/heartbeat/ocf-shellfuncs.in ++++ b/heartbeat/ocf-shellfuncs.in +@@ -741,6 +741,7 @@ crm_mon_no_validation() + { + # The subshell prevents parsing error with incompatible shells + ocf_version_cmp "$OCF_RESKEY_crm_feature_set" "3.19.7" ++ res=$? + if [ $res -eq 2 ] || [ $res -eq 1 ]; then + "$SHELL" -c "CIB_file=<(${HA_SBIN_DIR}/cibadmin -Q) \ + ${HA_SBIN_DIR}/crm_mon \$*" -- $* diff --git a/RHEL-75574-1-all-agents-use-grep-E-F.patch b/RHEL-75574-1-all-agents-use-grep-E-F.patch new file mode 100644 index 0000000..a1a104c --- /dev/null +++ b/RHEL-75574-1-all-agents-use-grep-E-F.patch @@ -0,0 +1,243 @@ +From 618ee5d013b6f4caeb703ffee6d2d696db887a1f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 21 Jan 2025 10:07:46 +0100 +Subject: [PATCH] All agents: use detected grep -E/-F instead of egrep/fgrep + when detected + +--- + configure.ac | 4 ++-- + heartbeat/IPaddr2 | 2 +- + heartbeat/ManageVE.in | 2 +- + heartbeat/SAPInstance | 2 +- + heartbeat/VirtualDomain | 4 ++-- + heartbeat/WAS | 2 +- + heartbeat/WAS6 | 2 +- + heartbeat/docker | 2 +- + heartbeat/eDir88.in | 2 +- + heartbeat/mysql-proxy | 2 +- + heartbeat/portblock | 2 +- + heartbeat/rabbitmq-server-ha | 2 +- + heartbeat/sapdb.sh | 2 +- + heartbeat/smb-share.in | 4 ++-- + heartbeat/symlink | 2 +- + 15 files changed, 18 insertions(+), 18 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 951a05430f..bb2bbaded2 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -492,12 +492,12 @@ AC_PATH_PROGS(TEST, test) + AC_PATH_PROGS(PING, ping, /bin/ping) + AC_PATH_PROGS(IFCONFIG, ifconfig, /sbin/ifconfig) + AC_PATH_PROGS(MAILCMD, mailx mail, mail) +-AC_PATH_PROGS(EGREP, egrep) + AC_PATH_PROGS(RM, rm) ++AC_PROG_EGREP ++AC_PROG_FGREP + + AC_SUBST(BASH_SHELL) + AC_SUBST(MAILCMD) +-AC_SUBST(EGREP) + AC_SUBST(SHELL) + AC_SUBST(PING) + AC_SUBST(RM) +diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 +index 27cae2d11a..489826b814 100755 +--- a/heartbeat/IPaddr2 ++++ b/heartbeat/IPaddr2 +@@ -1028,7 +1028,7 @@ ip_served() { + echo "partial2" + return 0 + fi +- if egrep -q "(^|,)${IP_INC_NO}(,|$)" $IP_CIP_FILE ; then ++ if $EGREP -q "(^|,)${IP_INC_NO}(,|$)" $IP_CIP_FILE ; then + echo "ok" + return 0 + else +diff --git a/heartbeat/ManageVE.in b/heartbeat/ManageVE.in +index f07ca5bdc6..540addd946 100644 +--- a/heartbeat/ManageVE.in ++++ b/heartbeat/ManageVE.in +@@ -273,7 +273,7 @@ esac + declare -i veidtest1=$? + + # really a number? +-`echo "$OCF_RESKEY_veid" | egrep -q '^[[:digit:]]+$'` ++`echo "$OCF_RESKEY_veid" | $EGREP -q '^[[:digit:]]+$'` + + if [[ $veidtest1 != 1 || $? != 0 ]]; then + ocf_log err "OCF_RESKEY_veid not set or not a number." +diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance +index 26fd541361..95140e9c45 100755 +--- a/heartbeat/SAPInstance ++++ b/heartbeat/SAPInstance +@@ -795,7 +795,7 @@ sapinstance_monitor() { + esac + + SEARCH=`echo "$OCF_RESKEY_MONITOR_SERVICES" | sed 's/\+/\\\+/g' | sed 's/\./\\\./g'` +- if [ `echo "$SERVICE" | egrep -c "$SEARCH"` -eq 1 ] ++ if [ `echo "$SERVICE" | $EGREP -c "$SEARCH"` -eq 1 ] + then + if [ $STATE -eq $OCF_NOT_RUNNING ] + then +diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain +index 3905695ae1..7db42bd12e 100755 +--- a/heartbeat/VirtualDomain ++++ b/heartbeat/VirtualDomain +@@ -975,7 +975,7 @@ VirtualDomain_migrate_to() { + sed "s/%n/$target_node/g"` + + # extract migrationuri from options +- elif echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then ++ elif echo "$migrate_opts" | $FGREP -qs -- "--migrateuri="; then + migrateuri=`echo "$migrate_opts" | + sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"` + +@@ -1148,7 +1148,7 @@ VirtualDomain_getconfig() { + VIRSH_OPTIONS="--connect=${OCF_RESKEY_hypervisor} --quiet" + + # Retrieve the domain name from the xml file. +- DOMAIN_NAME=`egrep '[[:space:]]*.*[[:space:]]*$' ${OCF_RESKEY_config} 2>/dev/null | sed -e 's/[[:space:]]*\(.*\)<\/name>[[:space:]]*$/\1/'` ++ DOMAIN_NAME=`$EGREP '[[:space:]]*.*[[:space:]]*$' ${OCF_RESKEY_config} 2>/dev/null | sed -e 's/[[:space:]]*\(.*\)<\/name>[[:space:]]*$/\1/'` + + EMULATOR_STATE="${HA_RSCTMP}/VirtualDomain-${DOMAIN_NAME}-emu.state" + } +diff --git a/heartbeat/WAS b/heartbeat/WAS +index 15b56e99e0..44aa83e20a 100755 +--- a/heartbeat/WAS ++++ b/heartbeat/WAS +@@ -227,7 +227,7 @@ WASPortInfo() { + esac + PortCount=`expr $PortCount + 1` + done +- netstat -ltnp 2>/dev/null| egrep -i "($pat) .*LISTEN" | sed 's%.*LISTEN *%%' ++ netstat -ltnp 2>/dev/null| $EGREP -i "($pat) .*LISTEN" | sed 's%.*LISTEN *%%' + } + + # +diff --git a/heartbeat/WAS6 b/heartbeat/WAS6 +index 9e18cd682d..e71eec9305 100755 +--- a/heartbeat/WAS6 ++++ b/heartbeat/WAS6 +@@ -196,7 +196,7 @@ WASPortInfo() { + esac + PortCount=`expr $PortCount + 1` + done +- netstat -ltnp 2>/dev/null| egrep -i "($pat) .*LISTEN" | sed 's%.*LISTEN *%%' ++ netstat -ltnp 2>/dev/null| $EGREP -i "($pat) .*LISTEN" | sed 's%.*LISTEN *%%' + } + + # +diff --git a/heartbeat/docker b/heartbeat/docker +index 50523db934..d51c468977 100755 +--- a/heartbeat/docker ++++ b/heartbeat/docker +@@ -512,7 +512,7 @@ image_exists() + # - image + # - repository:port/image + # - docker.io/image (some distro will display "docker.io/" as prefix) +- docker images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$" ++ docker images | awk '{print $1 ":" $2}' | $EGREP -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$" + if [ $? -eq 0 ]; then + # image found + return 0 +diff --git a/heartbeat/eDir88.in b/heartbeat/eDir88.in +index cd945d2c31..9a21ff852d 100644 +--- a/heartbeat/eDir88.in ++++ b/heartbeat/eDir88.in +@@ -331,7 +331,7 @@ eDir_monitor() { + + # Monitor IDM first, as it will start before LDAP + if [ $MONITOR_IDM -eq 1 ]; then +- RET=$($NDSTRACE --config-file "$NDSCONF" -c modules | egrep -i '^vrdim.*Running' | awk '{print $1}') ++ RET=$($NDSTRACE --config-file "$NDSCONF" -c modules | $EGREP -i '^vrdim.*Running' | awk '{print $1}') + if [ "$RET" != "vrdim" ]; then + ocf_log err "eDirectory IDM engine isn't running ($NDSCONF)." + return $OCF_ERR_GENERIC +diff --git a/heartbeat/mysql-proxy b/heartbeat/mysql-proxy +index 013c5e4ec3..2815860d72 100755 +--- a/heartbeat/mysql-proxy ++++ b/heartbeat/mysql-proxy +@@ -572,7 +572,7 @@ mysqlproxy_validate_all() + fi + + # check for valid log-level +- echo $log_level | egrep -q "^(error|warning|info|message|debug|)$" ++ echo $log_level | $EGREP -q "^(error|warning|info|message|debug|)$" + if [ $? -ne 0 ]; then + ocf_log err "MySQL Proxy log level '$log_level' not in valid range error|warning|info|message|debug" + return $OCF_ERR_CONFIGURED +diff --git a/heartbeat/portblock b/heartbeat/portblock +index e88ecc2a13..450e372081 100755 +--- a/heartbeat/portblock ++++ b/heartbeat/portblock +@@ -529,7 +529,7 @@ IptablesStop() + CheckPort() { + # Examples of valid port: "1080", "1", "0080" + # Examples of invalid port: "1080bad", "0", "0000", "" +- echo $1 |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ echo $1 | $EGREP -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' + } + + IptablesValidateAll() +diff --git a/heartbeat/rabbitmq-server-ha b/heartbeat/rabbitmq-server-ha +index 8b3cd9ee90..f483380359 100755 +--- a/heartbeat/rabbitmq-server-ha ++++ b/heartbeat/rabbitmq-server-ha +@@ -529,7 +529,7 @@ proc_stop() + pid="none" + else + # check if provide just a number +- echo "${pid_param}" | egrep -q '^[0-9]+$' ++ echo "${pid_param}" | $EGREP -q '^[0-9]+$' + if [ $? -eq 0 ]; then + pid="${pid_param}" + elif [ -e "${pid_param}" ]; then # check if passed in a pid file +diff --git a/heartbeat/sapdb.sh b/heartbeat/sapdb.sh +index 66e9854b6c..dccd36e172 100755 +--- a/heartbeat/sapdb.sh ++++ b/heartbeat/sapdb.sh +@@ -238,7 +238,7 @@ sapdatabase_monitor() { + esac + + SEARCH=`echo "$OCF_RESKEY_MONITOR_SERVICES" | sed 's/\+/\\\+/g' | sed 's/\./\\\./g'` +- if [ `echo "$SERVICE" | egrep -c "$SEARCH"` -eq 1 ] ++ if [ `echo "$SERVICE" | $EGREP -c "$SEARCH"` -eq 1 ] + then + if [ $STATE -eq $OCF_NOT_RUNNING ] + then +diff --git a/heartbeat/smb-share.in b/heartbeat/smb-share.in +index 8a1a0a8604..3e5bf60bf1 100755 +--- a/heartbeat/smb-share.in ++++ b/heartbeat/smb-share.in +@@ -435,7 +435,7 @@ smb_share_stop() { + smb_share_monitor() { + RES=$(smbcontrol smbd ping > /dev/null 2>&1) + if [ $? -eq 0 ];then +- if [ $(testparm -s 2>/dev/null| egrep -c \\[$OCF_RESKEY_share\\]) -eq 1 ];then ++ if [ $(testparm -s 2>/dev/null| $EGREP -c \\[$OCF_RESKEY_share\\]) -eq 1 ];then + return $OCF_SUCCESS + else + return $OCF_NOT_RUNNING +@@ -449,7 +449,7 @@ smb_share_state() { + smb_share_checktmpmount + RES=$(smbcontrol smbd ping > /dev/null 2>&1) + if [ $? -eq 0 ];then +- if [ $(testparm -s 2>/dev/null| egrep -c \\[$OCF_RESKEY_share\\]) -eq 1 ];then ++ if [ $(testparm -s 2>/dev/null| $EGREP -c \\[$OCF_RESKEY_share\\]) -eq 1 ];then + ocf_log info "Samba share $OCF_RESKEY_share is active" + return $OCF_SUCCESS + else +diff --git a/heartbeat/symlink b/heartbeat/symlink +index decd9f74e5..82a667a01f 100755 +--- a/heartbeat/symlink ++++ b/heartbeat/symlink +@@ -123,7 +123,7 @@ symlink_monitor() { + ocf_log debug "$OCF_RESKEY_link exists but is not a symbolic link, will be moved to ${OCF_RESKEY_link}${OCF_RESKEY_backup_suffix} on start" + rc=$OCF_NOT_RUNNING + fi +- elif readlink -m "$OCF_RESKEY_link" | egrep -q "^$(readlink -m ${OCF_RESKEY_target})$"; then ++ elif readlink -m "$OCF_RESKEY_link" | $EGREP -q "^$(readlink -m ${OCF_RESKEY_target})$"; then + ocf_log debug "$OCF_RESKEY_link exists and is a symbolic link to ${OCF_RESKEY_target}." + rc=$OCF_SUCCESS + else diff --git a/RHEL-75574-2-ocf-binaries-add-FGREP.patch b/RHEL-75574-2-ocf-binaries-add-FGREP.patch new file mode 100644 index 0000000..248c8f7 --- /dev/null +++ b/RHEL-75574-2-ocf-binaries-add-FGREP.patch @@ -0,0 +1,21 @@ +From 990d37a7209774f91b73ec9cc06e936a96db0f70 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 23 Jan 2025 16:04:39 +0100 +Subject: [PATCH] ocf-binaries: add FGREP + +--- + heartbeat/ocf-binaries.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/heartbeat/ocf-binaries.in b/heartbeat/ocf-binaries.in +index e9bf95fc28..e11ae1d6fb 100644 +--- a/heartbeat/ocf-binaries.in ++++ b/heartbeat/ocf-binaries.in +@@ -9,6 +9,7 @@ export PATH + # Binaries and binary options for use in Resource Agents + : ${AWK:=@AWK@} + : ${EGREP:="@EGREP@"} ++: ${FGREP:="@FGREP@"} + : ${IFCONFIG_A_OPT:="@IFCONFIG_A_OPT@"} + : ${MAILCMD:=@MAILCMD@} + : ${PING:=@PING@} diff --git a/RHEL-76007-Filesystem-dont-report-warnings-when-creating-resource.patch b/RHEL-76007-Filesystem-dont-report-warnings-when-creating-resource.patch new file mode 100644 index 0000000..92ce2bc --- /dev/null +++ b/RHEL-76007-Filesystem-dont-report-warnings-when-creating-resource.patch @@ -0,0 +1,23 @@ +From 2ba4b0c05902da4520a8931b9417b74c5659b5d1 Mon Sep 17 00:00:00 2001 +From: Satomi OSAWA +Date: Thu, 23 Jan 2025 10:55:28 +0900 +Subject: [PATCH] To avoid the Warning when executing the pcs resource create + command, the INFO log message in validate-all operation will be suppressed. + +--- + heartbeat/Filesystem | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem +index b48bee142..0c43220df 100755 +--- a/heartbeat/Filesystem ++++ b/heartbeat/Filesystem +@@ -1145,7 +1145,7 @@ check_binary $FSCK + check_binary $MOUNT + check_binary $UMOUNT + +-if [ "$OP" != "monitor" ]; then ++if [ "$OP" != "monitor" ] && [ "$OP" != "validate-all" ]; then + ocf_log info "Running $OP for $DEVICE on $MOUNTPOINT" + fi + diff --git a/RHEL-76037-1-storage-mon-remove-unused-variables.patch b/RHEL-76037-1-storage-mon-remove-unused-variables.patch new file mode 100644 index 0000000..9bfa2de --- /dev/null +++ b/RHEL-76037-1-storage-mon-remove-unused-variables.patch @@ -0,0 +1,23 @@ +From a1e22c5c612f369bac0830588642560dcea92e7c Mon Sep 17 00:00:00 2001 +From: Fujii Masao +Date: Sat, 9 Nov 2024 02:33:37 +0900 +Subject: [PATCH] Remove unused macro variables from storage_mon.c. + +--- + tools/storage_mon.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/tools/storage_mon.c b/tools/storage_mon.c +index f94268f6f..2519a9e72 100644 +--- a/tools/storage_mon.c ++++ b/tools/storage_mon.c +@@ -33,9 +33,6 @@ + #define DEFAULT_PIDFILE HA_VARRUNDIR "storage_mon.pid" + #define DEFAULT_ATTRNAME "#health-storage_mon" + #define SMON_GET_RESULT_COMMAND "get_check_value" +-#define SMON_RESULT_OK "green" +-#define SMON_RESULT_NG "red" +-#define SMON_RESULT_COMMAND_ERROR "unknown command" + #define SMON_BUFF_1MEG 1048576 + #define SMON_MAX_IPCSNAME 256 + #define SMON_MAX_MSGSIZE 128 diff --git a/RHEL-76037-2-storage-mon-fix-daemon-mode-bug-that-caused-delayed-initial-score.patch b/RHEL-76037-2-storage-mon-fix-daemon-mode-bug-that-caused-delayed-initial-score.patch new file mode 100644 index 0000000..c3366c7 --- /dev/null +++ b/RHEL-76037-2-storage-mon-fix-daemon-mode-bug-that-caused-delayed-initial-score.patch @@ -0,0 +1,79 @@ +From 46715c638829598d949dffab0898fe4c07074895 Mon Sep 17 00:00:00 2001 +From: Hideo Yamauchi +Date: Thu, 21 Nov 2024 15:21:19 +0900 +Subject: [PATCH 1/2] High: storage-mon: Correct the timing of setting + notification values to storage-mon(RA) clients. + +--- + tools/storage_mon.c | 17 ++++++++--------- + 1 file changed, 8 insertions(+), 9 deletions(-) + +diff --git a/tools/storage_mon.c b/tools/storage_mon.c +index 2519a9e72..27d2ff1d1 100644 +--- a/tools/storage_mon.c ++++ b/tools/storage_mon.c +@@ -320,7 +320,14 @@ static int32_t sigchld_handler(int32_t sig, void *data) + + finished_count++; + test_forks[index] = 0; +- ++ ++ /* Update the result value for the client response once all checks have completed. */ ++ if (device_count == finished_count) { ++ response_final_score = final_score; ++ if (!daemon_check_first_all_devices) { ++ daemon_check_first_all_devices = TRUE; ++ } ++ } + } + } + } else { +@@ -441,15 +448,7 @@ static int test_device_main(gpointer data) + if (is_child_runnning()) { + device_check = FALSE; + } +- +- if (device_count == finished_count && device_check) { +- /* Update the result value for the client response once all checks have completed. */ +- response_final_score = final_score; + +- if (!daemon_check_first_all_devices) { +- daemon_check_first_all_devices = TRUE; +- } +- } + } + + if (device_check) { + +From 1201390fb219d1b566c5d31463daacef60c31ab4 Mon Sep 17 00:00:00 2001 +From: Hideo Yamauchi +Date: Thu, 21 Nov 2024 15:43:33 +0900 +Subject: [PATCH 2/2] Mid: storage-mon RA: Wait until monitor confirms the + startup pid according to the OCF resource specification. + +--- + heartbeat/storage-mon.in | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in +index 284dec30f..7c9943d4f 100644 +--- a/heartbeat/storage-mon.in ++++ b/heartbeat/storage-mon.in +@@ -325,6 +325,17 @@ storage-mon_start() { + if [ "$?" -ne 0 ]; then + return $OCF_ERR_GENERIC + fi ++ ++ #Wait until monitor confirms the startup pid according to the ocf resource specification. ++ while true; do ++ storage-mon_monitor pid_check_only ++ rc="$?" ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ break ++ fi ++ sleep 1 ++ ocf_log debug "storage-mon daemon still hasn't started yet. Waiting..." ++ done + fi + } + diff --git a/RHEL-76037-3-storage-mon-only-use-underscores-in-functions.patch b/RHEL-76037-3-storage-mon-only-use-underscores-in-functions.patch new file mode 100644 index 0000000..80945fa --- /dev/null +++ b/RHEL-76037-3-storage-mon-only-use-underscores-in-functions.patch @@ -0,0 +1,148 @@ +From b72b329a45c058fda720c6739f881b9597fc8b30 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 23 Jan 2025 16:18:20 +0100 +Subject: [PATCH] storage-mon: replace dashes with underscores in functions + +Dashes in function names produce "`storage-mon_usage': not a valid identifier" +error when run with sh -x. +--- + heartbeat/storage-mon.in | 44 ++++++++++++++++++++-------------------- + 1 file changed, 22 insertions(+), 22 deletions(-) + +diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in +index 7c9943d4f..5edb96979 100644 +--- a/heartbeat/storage-mon.in ++++ b/heartbeat/storage-mon.in +@@ -152,7 +152,7 @@ END + + ####################################################################### + +-storage-mon_usage() { ++storage_mon_usage() { + cat < /dev/null 2>&1 +@@ -298,20 +298,20 @@ storage-mon_monitor() { + esac + done + +- storage-mon_update_attribute $status ++ storage_mon_update_attribute $status + return "$?" + fi + } + +-storage-mon_start() { ++storage_mon_start() { + if ! ocf_is_true "$OCF_RESKEY_daemonize"; then +- storage-mon_monitor ++ storage_mon_monitor + if [ $? -eq $OCF_SUCCESS ]; then + return $OCF_SUCCESS + fi + touch "${OCF_RESKEY_state_file}" + else +- storage-mon_init ++ storage_mon_init + # generate command line + cmdline="" + for DRIVE in ${OCF_RESKEY_drives}; do +@@ -328,7 +328,7 @@ storage-mon_start() { + + #Wait until monitor confirms the startup pid according to the ocf resource specification. + while true; do +- storage-mon_monitor pid_check_only ++ storage_mon_monitor pid_check_only + rc="$?" + if [ $rc -eq $OCF_SUCCESS ]; then + break +@@ -339,8 +339,8 @@ storage-mon_start() { + fi + } + +-storage-mon_stop() { +- storage-mon_monitor ++storage_mon_stop() { ++ storage_mon_monitor + rc=$? + + if ! ocf_is_true "$OCF_RESKEY_daemonize"; then +@@ -363,7 +363,7 @@ storage-mon_stop() { + fi + + while true; do +- storage-mon_monitor pid_check_only ++ storage_mon_monitor pid_check_only + rc="$?" + case "$rc" in + $OCF_SUCCESS) +@@ -379,8 +379,8 @@ storage-mon_stop() { + return $OCF_SUCCESS + } + +-storage-mon_validate() { +- storage-mon_init ++storage_mon_validate() { ++ storage_mon_init + + if ! ocf_is_true "$OCF_RESKEY_daemonize"; then + # Is the state directory writable? +@@ -396,13 +396,13 @@ storage-mon_validate() { + } + + case "$__OCF_ACTION" in +- start) storage-mon_start;; +- stop) storage-mon_stop;; +- monitor) storage-mon_monitor;; +- validate-all) storage-mon_validate;; ++ start) storage_mon_start;; ++ stop) storage_mon_stop;; ++ monitor) storage_mon_monitor;; ++ validate-all) storage_mon_validate;; + meta-data) meta_data;; +- usage|help) storage-mon_usage $OCF_SUCCESS;; +- *) storage-mon_usage $OCF_ERR_UNIMPLEMENTED;; ++ usage|help) storage_mon_usage $OCF_SUCCESS;; ++ *) storage_mon_usage $OCF_ERR_UNIMPLEMENTED;; + esac + rc=$? + ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" diff --git a/RHEL-76037-4-storage-mon-check-if-daemon-is-already-running.patch b/RHEL-76037-4-storage-mon-check-if-daemon-is-already-running.patch new file mode 100644 index 0000000..a2d262d --- /dev/null +++ b/RHEL-76037-4-storage-mon-check-if-daemon-is-already-running.patch @@ -0,0 +1,25 @@ +From c6f520344e830a7c946b2222f9f251be038b1b28 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 24 Jan 2025 10:01:30 +0100 +Subject: [PATCH] storage-mon: check if daemon is already running during + start-action + +--- + heartbeat/storage-mon.in | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in +index 5edb96979..00e42f68d 100644 +--- a/heartbeat/storage-mon.in ++++ b/heartbeat/storage-mon.in +@@ -311,6 +311,10 @@ storage_mon_start() { + fi + touch "${OCF_RESKEY_state_file}" + else ++ storage_mon_monitor pid_check_only ++ if [ $? -eq $OCF_SUCCESS ]; then ++ return $OCF_SUCCESS ++ fi + storage_mon_init + # generate command line + cmdline="" diff --git a/RHEL-76037-5-storage-mon-log-storage_mon-is-already-running-in-start-action.patch b/RHEL-76037-5-storage-mon-log-storage_mon-is-already-running-in-start-action.patch new file mode 100644 index 0000000..f49311a --- /dev/null +++ b/RHEL-76037-5-storage-mon-log-storage_mon-is-already-running-in-start-action.patch @@ -0,0 +1,22 @@ +From de51a1705ce761f1fb5f1b2294cfc1153af70c1c Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jan 2025 09:54:06 +0100 +Subject: [PATCH] storage-mon: log "storage_mon is already running" in + start-action + +--- + heartbeat/storage-mon.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in +index 00e42f68d..d60db4ad4 100644 +--- a/heartbeat/storage-mon.in ++++ b/heartbeat/storage-mon.in +@@ -313,6 +313,7 @@ storage_mon_start() { + else + storage_mon_monitor pid_check_only + if [ $? -eq $OCF_SUCCESS ]; then ++ ocf_log info "storage_mon is already running. PID=`cat $PIDFILE`" + return $OCF_SUCCESS + fi + storage_mon_init diff --git a/RHEL-79822-1-portblock-fix-version-detection.patch b/RHEL-79822-1-portblock-fix-version-detection.patch new file mode 100644 index 0000000..152a7cd --- /dev/null +++ b/RHEL-79822-1-portblock-fix-version-detection.patch @@ -0,0 +1,46 @@ +From 575dcec0cd97af26623975cbc43564d25b91b346 Mon Sep 17 00:00:00 2001 +From: abrychcy +Date: Wed, 12 Feb 2025 19:49:22 +0100 +Subject: [PATCH 1/2] Update portblock: improve version detection of iptables + +The awk script does not remove suffixes after the version string. ocf_version_cmp fails to compare this string. wait option will never be added if (nf_tables) or (legacy) is present after vX.Y.Z +--- + heartbeat/portblock | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/portblock b/heartbeat/portblock +index 450e37208..ea22f76a6 100755 +--- a/heartbeat/portblock ++++ b/heartbeat/portblock +@@ -618,7 +618,7 @@ if [ -z "$OCF_RESKEY_action" ]; then + fi + + # iptables v1.4.20+ is required to use -w (wait) +-version=$(iptables -V | awk -F ' v' '{print $NF}') ++version=$(iptables -V | grep -oE '[0-9]+(\.[0-9]+)+') + ocf_version_cmp "$version" "1.4.19.1" + if [ "$?" -eq "2" ]; then + wait="-w" + +From 938867b0c4a77448403961b94aa04f9a34c72b11 Mon Sep 17 00:00:00 2001 +From: abrychcy +Date: Thu, 13 Feb 2025 19:16:26 +0100 +Subject: [PATCH 2/2] simplify iptables version string regex + +--- + heartbeat/portblock | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/portblock b/heartbeat/portblock +index ea22f76a6..2fca0f893 100755 +--- a/heartbeat/portblock ++++ b/heartbeat/portblock +@@ -618,7 +618,7 @@ if [ -z "$OCF_RESKEY_action" ]; then + fi + + # iptables v1.4.20+ is required to use -w (wait) +-version=$(iptables -V | grep -oE '[0-9]+(\.[0-9]+)+') ++version=$(iptables -V | grep -oE '[0-9]+[\.0-9]+') + ocf_version_cmp "$version" "1.4.19.1" + if [ "$?" -eq "2" ]; then + wait="-w" diff --git a/RHEL-79822-2-portblock-use-ocf_log-for-logging.patch b/RHEL-79822-2-portblock-use-ocf_log-for-logging.patch new file mode 100644 index 0000000..4c7e6e2 --- /dev/null +++ b/RHEL-79822-2-portblock-use-ocf_log-for-logging.patch @@ -0,0 +1,43 @@ +From 8a3328c07d8e8f1cfe34dd37afaa5707dcafce9b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 20 Feb 2025 14:24:34 +0100 +Subject: [PATCH] portblock: use ocf_log() for logging + +--- + heartbeat/portblock | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/heartbeat/portblock b/heartbeat/portblock +index 2fca0f893..9b4f5db39 100755 +--- a/heartbeat/portblock ++++ b/heartbeat/portblock +@@ -359,17 +359,17 @@ tickle_local() + + SayActive() + { +- echo "$CMD DROP rule [$*] is running (OK)" ++ ocf_log debug "$CMD DROP rule [$*] is running (OK)" + } + + SayConsideredActive() + { +- echo "$CMD DROP rule [$*] considered to be running (OK)" ++ ocf_log debug "$CMD DROP rule [$*] considered to be running (OK)" + } + + SayInactive() + { +- echo "$CMD DROP rule [$*] is inactive" ++ ocf_log debug "$CMD DROP rule [$*] is inactive" + } + + #IptablesStatus {udp|tcp} portno,portno ip {in|out|both} {block|unblock} +@@ -430,7 +430,7 @@ DoIptables() + op=$1 proto=$2 ports=$3 ip=$4 chain=$5 + active=0; chain_isactive "$proto" "$ports" "$ip" "$chain" && active=1 + want_active=0; [ "$op" = "-I" ] && want_active=1 +- echo "active: $active want_active: $want_active" ++ ocf_log debug "active: $active want_active: $want_active" + if [ $active -eq $want_active ] ; then + : Chain already in desired state + else diff --git a/SOURCES/10-gcloud-support-info.patch b/SOURCES/10-gcloud-support-info.patch deleted file mode 100644 index ef96ca5..0000000 --- a/SOURCES/10-gcloud-support-info.patch +++ /dev/null @@ -1,25 +0,0 @@ -diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py ---- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100 -+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200 -@@ -900,6 +900,9 @@ - return """\ - For detailed information on this command and its flags, run: - {command_path} --help -+ -+WARNING: {command_path} is only supported for "{command_path} init" and for use -+with the agents in resource-agents. - """.format(command_path=' '.join(command.GetPath())) - - -diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py ---- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100 -+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200 -@@ -84,7 +84,7 @@ - - pkg_root = os.path.dirname(os.path.dirname(surface.__file__)) - loader = cli.CLILoader( -- name='gcloud', -+ name='gcloud-ra', - command_root_directory=os.path.join(pkg_root, 'surface'), - allow_non_existing_modules=True, - version_func=VersionFunc, diff --git a/SOURCES/7-gcp-bundled.patch b/SOURCES/7-gcp-bundled.patch deleted file mode 100644 index 287875e..0000000 --- a/SOURCES/7-gcp-bundled.patch +++ /dev/null @@ -1,45 +0,0 @@ -diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in ---- a/heartbeat/gcp-pd-move.in 2024-07-22 10:59:42.170483160 +0200 -+++ b/heartbeat/gcp-pd-move.in 2024-07-22 11:01:51.455543850 +0200 -@@ -32,6 +32,7 @@ - from ocf import logger - - try: -+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp') - import googleapiclient.discovery - except ImportError: - pass -diff --color -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in ---- a/heartbeat/gcp-vpc-move-ip.in 2024-07-22 10:59:42.170483160 +0200 -+++ b/heartbeat/gcp-vpc-move-ip.in 2024-07-22 11:01:18.010752081 +0200 -@@ -36,7 +36,7 @@ - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - - # Defaults --OCF_RESKEY_gcloud_default="/usr/bin/gcloud" -+OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra" - OCF_RESKEY_configuration_default="default" - OCF_RESKEY_vpc_network_default="default" - OCF_RESKEY_interface_default="eth0" -diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in ---- a/heartbeat/gcp-vpc-move-route.in 2024-07-22 10:59:42.170483160 +0200 -+++ b/heartbeat/gcp-vpc-move-route.in 2024-07-22 11:01:18.011752105 +0200 -@@ -45,6 +45,7 @@ - from ocf import * - - try: -+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp') - import googleapiclient.discovery - import pyroute2 - try: -diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in ---- a/heartbeat/gcp-vpc-move-vip.in 2024-07-22 10:59:42.170483160 +0200 -+++ b/heartbeat/gcp-vpc-move-vip.in 2024-07-22 11:01:18.012752128 +0200 -@@ -29,6 +29,7 @@ - from ocf import * - - try: -+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp') - import googleapiclient.discovery - try: - from google.oauth2.service_account import Credentials as ServiceAccountCredentials diff --git a/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch b/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch deleted file mode 100644 index de378c4..0000000 --- a/SOURCES/9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch +++ /dev/null @@ -1,129 +0,0 @@ -diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py ---- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100 -+++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200 -@@ -19,8 +19,14 @@ - certificates. - """ - -+from pyasn1.codec.der import decoder - from pyasn1_modules import pem --import rsa -+from pyasn1_modules.rfc2459 import Certificate -+from pyasn1_modules.rfc5208 import PrivateKeyInfo -+from cryptography.hazmat.primitives import serialization, hashes -+from cryptography.hazmat.primitives.asymmetric import padding -+from cryptography import x509 -+from cryptography.hazmat.backends import default_backend - import six - - from oauth2client import _helpers -@@ -40,7 +46,7 @@ - '-----END RSA PRIVATE KEY-----') - _PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----', - '-----END PRIVATE KEY-----') --_PKCS8_SPEC = None -+_PKCS8_SPEC = PrivateKeyInfo() - - - def _bit_list_to_bytes(bit_list): -@@ -67,7 +73,8 @@ - """ - - def __init__(self, pubkey): -- self._pubkey = pubkey -+ self._pubkey = serialization.load_pem_public_key(pubkey, -+ backend=default_backend()) - - def verify(self, message, signature): - """Verifies a message against a signature. -@@ -84,8 +91,9 @@ - """ - message = _helpers._to_bytes(message, encoding='utf-8') - try: -- return rsa.pkcs1.verify(message, signature, self._pubkey) -- except (ValueError, rsa.pkcs1.VerificationError): -+ return self._pubkey.verify(signature, message, padding.PKCS1v15(), -+ hashes.SHA256()) -+ except (ValueError, TypeError, InvalidSignature): - return False - - @classmethod -@@ -109,19 +117,18 @@ - """ - key_pem = _helpers._to_bytes(key_pem) - if is_x509_cert: -- from pyasn1.codec.der import decoder -- from pyasn1_modules import rfc2459 -- -- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE') -- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate()) -+ der = x509.load_pem_x509_certificate(pem_data, default_backend()) -+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate()) - if remaining != b'': - raise ValueError('Unused bytes', remaining) - - cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo'] - key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey']) -- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER') -+ pubkey = serialization.load_der_public_key(decoded_key, -+ backend=default_backend()) - else: -- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM') -+ pubkey = serialization.load_pem_public_key(decoded_key, -+ backend=default_backend()) - return cls(pubkey) - - -@@ -134,6 +141,8 @@ - - def __init__(self, pkey): - self._key = pkey -+ self._pubkey = serialization.load_pem_private_key(pkey, -+ backend=default_backend()) - - def sign(self, message): - """Signs a message. -@@ -145,7 +154,7 @@ - string, The signature of the message for the given key. - """ - message = _helpers._to_bytes(message, encoding='utf-8') -- return rsa.pkcs1.sign(message, self._key, 'SHA-256') -+ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256()) - - @classmethod - def from_string(cls, key, password='notasecret'): -@@ -163,27 +172,24 @@ - ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in - PEM format. - """ -- global _PKCS8_SPEC - key = _helpers._from_bytes(key) # pem expects str in Py3 - marker_id, key_bytes = pem.readPemBlocksFromFile( - six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) - - if marker_id == 0: -- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, -- format='DER') -- elif marker_id == 1: -- from pyasn1.codec.der import decoder -- from pyasn1_modules import rfc5208 -+ pkey = serialization.load_der_private_key( -+ key_bytes, password=None, -+ backend=default_backend()) - -- if _PKCS8_SPEC is None: -- _PKCS8_SPEC = rfc5208.PrivateKeyInfo() -+ elif marker_id == 1: - key_info, remaining = decoder.decode( - key_bytes, asn1Spec=_PKCS8_SPEC) - if remaining != b'': - raise ValueError('Unused bytes', remaining) - pkey_info = key_info.getComponentByName('privateKey') -- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), -- format='DER') -+ pkey = serialization.load_der_private_key( -+ pkey_info.asOctets(), password=None, -+ backend=default_backend()) - else: - raise ValueError('No key could be detected.') - diff --git a/SOURCES/RHEL-15302-1-exportfs-make-fsid-optional.patch b/SOURCES/RHEL-15302-1-exportfs-make-fsid-optional.patch deleted file mode 100644 index 5cac255..0000000 --- a/SOURCES/RHEL-15302-1-exportfs-make-fsid-optional.patch +++ /dev/null @@ -1,75 +0,0 @@ -From b806487ca758fce838c988767556007ecf66a6e3 Mon Sep 17 00:00:00 2001 -From: Roger Zhou -Date: Mon, 10 Apr 2023 18:08:56 +0800 -Subject: [PATCH] exportfs: make the "fsid=" parameter optional - -Based on feedback [1] from the kernel developer @neilbrown regarding the -NFS clustering use case, it has been determined that the fsid= parameter -is now considered optional and safe to omit. - -[1] https://bugzilla.suse.com/show_bug.cgi?id=1201271#c49 -""" -Since some time in 2007 NFS has used the UUID of a filesystem as the -primary identifier for that filesystem, rather than using the device -number. So from that time there should have been reduced need for the -"fsid=" option. Probably there are some filesystems that this didn't -work for. btrfs has been problematic at time, particularly when subvols -are exported. But for quite some years this has all "just worked" at -least for the major filesystems (ext4 xfs btrfs). [...] I would suggest -getting rid of the use of fsid= altogether. [...] I'm confident that it -was no longer an issue in SLE-12 and similarly not in SLE-15. -""" ---- - heartbeat/exportfs | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - -diff --git a/heartbeat/exportfs b/heartbeat/exportfs -index 2307a9e67b..435a19646b 100755 ---- a/heartbeat/exportfs -+++ b/heartbeat/exportfs -@@ -82,7 +82,7 @@ The directory or directories to export. - - - -- -+ - - The fsid option to pass to exportfs. This can be a unique positive - integer, a UUID (assuredly sans comma characters), or the special string -@@ -185,6 +185,8 @@ exportfs_methods() { - - reset_fsid() { - CURRENT_FSID=$OCF_RESKEY_fsid -+ [ -z "$CURRENT_FSID" ] && CURRENT_FSID=`echo "$OCF_RESKEY_options" | sed -n 's/.*fsid=\([^,]*\).*/\1/p'` -+ echo $CURRENT_FSID - } - bump_fsid() { - CURRENT_FSID=$((CURRENT_FSID+1)) -@@ -322,7 +324,7 @@ export_one() { - if echo "$opts" | grep fsid >/dev/null; then - #replace fsid in options list - opts=`echo "$opts" | sed "s,fsid=[^,]*,fsid=$(get_fsid),g"` -- else -+ elif [ -n "$OCF_RESKEY_fsid" ]; then - #tack the fsid option onto our options list. - opts="${opts}${sep}fsid=$(get_fsid)" - fi -@@ -448,8 +450,8 @@ exportfs_validate_all () - ocf_exit_reason "$OCF_RESKEY_fsid cannot contain a comma" - return $OCF_ERR_CONFIGURED - fi -- if [ $NUMDIRS -gt 1 ] && -- ! ocf_is_decimal "$OCF_RESKEY_fsid"; then -+ if [ $NUMDIRS -gt 1 ] && [ -n "$(reset_fsid)" ] && -+ ! ocf_is_decimal "$(reset_fsid)"; then - ocf_exit_reason "use integer fsid when exporting multiple directories" - return $OCF_ERR_CONFIGURED - fi -@@ -485,6 +487,6 @@ done - OCF_RESKEY_directory="${directories%% }" - - NUMDIRS=`echo "$OCF_RESKEY_directory" | wc -w` --OCF_REQUIRED_PARAMS="directory fsid clientspec" -+OCF_REQUIRED_PARAMS="directory clientspec" - OCF_REQUIRED_BINARIES="exportfs" - ocf_rarun $* diff --git a/SOURCES/RHEL-15302-2-ocft-exportfs-remove-fsid-required-test.patch b/SOURCES/RHEL-15302-2-ocft-exportfs-remove-fsid-required-test.patch deleted file mode 100644 index ee3ecca..0000000 --- a/SOURCES/RHEL-15302-2-ocft-exportfs-remove-fsid-required-test.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 1d1481aa6d848efab4d398ad6e74d80b5b32549f Mon Sep 17 00:00:00 2001 -From: Valentin Vidic -Date: Wed, 1 Nov 2023 18:25:45 +0100 -Subject: [PATCH] exportfs: remove test for "fsid=" parameter - -fsid parameter is now considered optional. ---- - tools/ocft/exportfs | 5 ----- - tools/ocft/exportfs-multidir | 5 ----- - 2 files changed, 10 deletions(-) - -diff --git a/tools/ocft/exportfs b/tools/ocft/exportfs -index 285a4b8ea0..1ec3d4c364 100644 ---- a/tools/ocft/exportfs -+++ b/tools/ocft/exportfs -@@ -28,11 +28,6 @@ CASE "check base env" - Include prepare - AgentRun start OCF_SUCCESS - --CASE "check base env: no 'OCF_RESKEY_fsid'" -- Include prepare -- Env OCF_RESKEY_fsid= -- AgentRun start OCF_ERR_CONFIGURED -- - CASE "check base env: invalid 'OCF_RESKEY_directory'" - Include prepare - Env OCF_RESKEY_directory=/no_such -diff --git a/tools/ocft/exportfs-multidir b/tools/ocft/exportfs-multidir -index 00e41f0859..ac6d5c7f6a 100644 ---- a/tools/ocft/exportfs-multidir -+++ b/tools/ocft/exportfs-multidir -@@ -28,11 +28,6 @@ CASE "check base env" - Include prepare - AgentRun start OCF_SUCCESS - --CASE "check base env: no 'OCF_RESKEY_fsid'" -- Include prepare -- Env OCF_RESKEY_fsid= -- AgentRun start OCF_ERR_CONFIGURED -- - CASE "check base env: invalid 'OCF_RESKEY_directory'" - Include prepare - Env OCF_RESKEY_directory=/no_such diff --git a/SOURCES/RHEL-15305-1-findif.sh-fix-loopback-handling.patch b/SOURCES/RHEL-15305-1-findif.sh-fix-loopback-handling.patch deleted file mode 100644 index 283f0f2..0000000 --- a/SOURCES/RHEL-15305-1-findif.sh-fix-loopback-handling.patch +++ /dev/null @@ -1,45 +0,0 @@ -From e4f84ae185b6943d1ff461d53c7f1b5295783086 Mon Sep 17 00:00:00 2001 -From: Valentin Vidic -Date: Wed, 1 Nov 2023 19:35:21 +0100 -Subject: [PATCH] findif.sh: fix loopback handling - -tools/ocft/IPaddr2 fails the loopback test because of the missing -table local parameter: - -$ ip -o -f inet route list match 127.0.0.3 scope host - -$ ip -o -f inet route list match 127.0.0.3 table local scope host -local 127.0.0.0/8 dev lo proto kernel src 127.0.0.1 - -Also rename the function because it is called only in for the special -loopback address case. ---- - heartbeat/findif.sh | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh -index 5f1c19ec3..7c766e6e0 100644 ---- a/heartbeat/findif.sh -+++ b/heartbeat/findif.sh -@@ -29,10 +29,10 @@ prefixcheck() { - fi - return 0 - } --getnetworkinfo() -+getloopbackinfo() - { - local line netinfo -- ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table:=main}" scope host | (while read line; -+ ip -o -f inet route list match $OCF_RESKEY_ip table local scope host | (while read line; - do - netinfo=`echo $line | awk '{print $2}'` - case $netinfo in -@@ -222,7 +222,7 @@ findif() - if [ $# = 0 ] ; then - case $OCF_RESKEY_ip in - 127.*) -- set -- `getnetworkinfo` -+ set -- `getloopbackinfo` - shift;; - esac - fi diff --git a/SOURCES/RHEL-15305-2-findif.sh-dont-use-table-parameter.patch b/SOURCES/RHEL-15305-2-findif.sh-dont-use-table-parameter.patch deleted file mode 100644 index 29dba3b..0000000 --- a/SOURCES/RHEL-15305-2-findif.sh-dont-use-table-parameter.patch +++ /dev/null @@ -1,20 +0,0 @@ ---- a/heartbeat/findif.sh 2024-02-08 11:31:53.414257686 +0100 -+++ b/heartbeat/findif.sh 2023-11-02 10:20:12.150853167 +0100 -@@ -210,14 +210,14 @@ - fi - findif_check_params $family || return $? - -- if [ -n "$netmask" ] ; then -+ if [ -n "$netmask" ]; then - match=$match/$netmask - fi - if [ -n "$nic" ] ; then - # NIC supports more than two. -- set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') -+ set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') - else -- set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') -+ set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') - fi - if [ $# = 0 ] ; then - case $OCF_RESKEY_ip in diff --git a/SOURCES/RHEL-16248-aws-vpc-move-ip-aws-vpc-route53-awseip-awsvip-auth_type-role.patch b/SOURCES/RHEL-16248-aws-vpc-move-ip-aws-vpc-route53-awseip-awsvip-auth_type-role.patch deleted file mode 100644 index 7d3256d..0000000 --- a/SOURCES/RHEL-16248-aws-vpc-move-ip-aws-vpc-route53-awseip-awsvip-auth_type-role.patch +++ /dev/null @@ -1,555 +0,0 @@ -From f45f76600a7e02c860566db7d1350dc3b09449c2 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 6 Nov 2023 15:49:44 +0100 -Subject: [PATCH] aws-vpc-move-ip/aws-vpc-route53/awseip/awsvip: add auth_type - parameter and AWS Policy based authentication type - ---- - heartbeat/aws-vpc-move-ip | 43 +++++++++++++++++++---- - heartbeat/aws-vpc-route53.in | 47 ++++++++++++++++++++----- - heartbeat/awseip | 68 +++++++++++++++++++++++++++--------- - heartbeat/awsvip | 60 ++++++++++++++++++++++++------- - 4 files changed, 173 insertions(+), 45 deletions(-) - -diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip -index dee040300f..54806f6eaa 100755 ---- a/heartbeat/aws-vpc-move-ip -+++ b/heartbeat/aws-vpc-move-ip -@@ -36,6 +36,7 @@ - - # Defaults - OCF_RESKEY_awscli_default="/usr/bin/aws" -+OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" - OCF_RESKEY_region_default="" - OCF_RESKEY_ip_default="" -@@ -48,6 +49,7 @@ OCF_RESKEY_monapi_default="false" - OCF_RESKEY_lookup_type_default="InstanceId" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} -+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} - : ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}} -@@ -58,8 +60,6 @@ OCF_RESKEY_lookup_type_default="InstanceId" - : ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} - : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} - : ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} -- --[ -n "$OCF_RESKEY_region" ] && region_opt="--region $OCF_RESKEY_region" - ####################################################################### - - -@@ -83,6 +83,10 @@ cat < - Resource Agent to move IP addresses within a VPC of the Amazon Webservices EC2 - by changing an entry in an specific routing table -+ -+Credentials needs to be setup by running "aws configure", or by using AWS Policies. -+ -+See https://aws.amazon.com/cli/ for more information about awscli. - - Move IP within a VPC of the AWS EC2 - -@@ -95,6 +99,15 @@ Path to command line tools for AWS - - - -+ -+ -+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure", -+or "role" to use AWS Policies. -+ -+Authentication type -+ -+ -+ - - - Valid AWS CLI profile name (see ~/.aws/config and 'aws configure') -@@ -198,7 +211,7 @@ END - execute_cmd_as_role(){ - cmd=$1 - role=$2 -- output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile $region_opt --output=text)" -+ output="$($AWSCLI_CMD sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --output=text)" - export AWS_ACCESS_KEY_ID="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $5}')" - export AWS_SECRET_ACCESS_KEY="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $7}')" - export AWS_SESSION_TOKEN="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $8}')" -@@ -220,11 +233,11 @@ ec2ip_set_address_param_compat(){ - } - - ec2ip_validate() { -- for cmd in $OCF_RESKEY_awscli ip curl; do -+ for cmd in "$OCF_RESKEY_awscli" ip curl; do - check_binary "$cmd" - done - -- if [ -z "$OCF_RESKEY_profile" ]; then -+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then - ocf_exit_reason "profile parameter not set" - return $OCF_ERR_CONFIGURED - fi -@@ -262,7 +275,7 @@ ec2ip_monitor() { - for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do - ocf_log info "monitor: check routing table (API call) - $rtb" - if [ -z "${OCF_RESKEY_routing_table_role}" ]; then -- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" -+ cmd="$AWSCLI_CMD --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" - ocf_log debug "executing command: $cmd" - ROUTE_TO_INSTANCE="$($cmd)" - else -@@ -368,7 +381,7 @@ ec2ip_get_and_configure() { - EC2_NETWORK_INTERFACE_ID="$(ec2ip_get_instance_eni)" - for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do - if [ -z "${OCF_RESKEY_routing_table_role}" ]; then -- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" -+ cmd="$AWSCLI_CMD --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" - ocf_log debug "executing command: $cmd" - $cmd - else -@@ -475,6 +488,22 @@ if ! ocf_is_root; then - exit $OCF_ERR_PERM - fi - -+AWSCLI_CMD="${OCF_RESKEY_awscli}" -+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}" -+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then -+ if [ -z "${OCF_RESKEY_region}" ]; then -+ ocf_exit_reason "region needs to be set when using role-based authentication" -+ exit $OCF_ERR_CONFIGURED -+ fi -+else -+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}" -+ exit $OCF_ERR_CONFIGURED -+fi -+if [ -n "${OCF_RESKEY_region}" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" -+fi -+ - ec2ip_set_address_param_compat - - ec2ip_validate -diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in -index 22cbb35833..18ab157e8a 100644 ---- a/heartbeat/aws-vpc-route53.in -+++ b/heartbeat/aws-vpc-route53.in -@@ -46,24 +46,22 @@ - - # Defaults - OCF_RESKEY_awscli_default="/usr/bin/aws" -+OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" -+OCF_RESKEY_region_default="" - OCF_RESKEY_hostedzoneid_default="" - OCF_RESKEY_fullname_default="" - OCF_RESKEY_ip_default="local" - OCF_RESKEY_ttl_default=10 - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} -+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} -+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}} - : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} - : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} - : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} --####################################################################### -- -- --AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10" --####################################################################### -- - - usage() { - cat <<-EOT -@@ -123,6 +121,15 @@ Path to command line tools for AWS - - - -+ -+ -+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure", -+or "role" to use AWS Policies. -+ -+Authentication type -+ -+ -+ - - - The name of the AWS CLI profile of the root account. This -@@ -196,7 +203,7 @@ r53_validate() { - - # Check for required binaries - ocf_log debug "Checking for required binaries" -- for command in curl dig; do -+ for command in "${OCF_RESKEY_awscli}" curl dig; do - check_binary "$command" - done - -@@ -216,7 +223,10 @@ r53_validate() { - esac - - # profile -- [[ -z "$OCF_RESKEY_profile" ]] && ocf_log error "AWS CLI profile not set $OCF_RESKEY_profile!" && exit $OCF_ERR_CONFIGURED -+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then -+ ocf_exit_reason "profile parameter not set" -+ return $OCF_ERR_CONFIGURED -+ fi - - # TTL - [[ -z "$OCF_RESKEY_ttl" ]] && ocf_log error "TTL not set $OCF_RESKEY_ttl!" && exit $OCF_ERR_CONFIGURED -@@ -417,7 +427,6 @@ _update_record() { - } - - ############################################################################### -- - case $__OCF_ACTION in - usage|help) - usage -@@ -427,6 +436,26 @@ case $__OCF_ACTION in - metadata - exit $OCF_SUCCESS - ;; -+esac -+ -+AWSCLI_CMD="${OCF_RESKEY_awscli}" -+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}" -+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then -+ if [ -z "${OCF_RESKEY_region}" ]; then -+ ocf_exit_reason "region needs to be set when using role-based authentication" -+ exit $OCF_ERR_CONFIGURED -+ fi -+else -+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}" -+ exit $OCF_ERR_CONFIGURED -+fi -+if [ -n "${OCF_RESKEY_region}" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" -+fi -+AWSCLI_CMD="$AWSCLI_CMD --cli-connect-timeout 10" -+ -+case $__OCF_ACTION in - start) - r53_validate || exit $? - r53_start -diff --git a/heartbeat/awseip b/heartbeat/awseip -index dc48460c85..49b0ca6155 100755 ---- a/heartbeat/awseip -+++ b/heartbeat/awseip -@@ -23,7 +23,8 @@ - # - # Prerequisites: - # --# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) -+# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) or -+# (AWSRole) Setup up relevant AWS Policies to allow agent related functions to be executed. - # - a reserved secondary private IP address for EC2 instances high availability - # - IAM user role with the following permissions: - # * DescribeInstances -@@ -44,11 +45,15 @@ - # Defaults - # - OCF_RESKEY_awscli_default="/usr/bin/aws" -+OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" -+OCF_RESKEY_region_default="" - OCF_RESKEY_api_delay_default="3" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} -+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} -+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} - - meta_data() { -@@ -63,7 +68,7 @@ Resource Agent for Amazon AWS Elastic IP Addresses. - - It manages AWS Elastic IP Addresses with awscli. - --Credentials needs to be setup by running "aws configure". -+Credentials needs to be setup by running "aws configure", or by using AWS Policies. - - See https://aws.amazon.com/cli/ for more information about awscli. - -@@ -79,6 +84,15 @@ command line tools for aws services - - - -+ -+ -+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure", -+or "role" to use AWS Policies. -+ -+Authentication type -+ -+ -+ - - - Valid AWS CLI profile name (see ~/.aws/config and 'aws configure') -@@ -111,6 +125,14 @@ predefined private ip address for ec2 instance - - - -+ -+ -+Region for AWS resource (required for role-based authentication) -+ -+Region -+ -+ -+ - - - a short delay between API calls, to avoid sending API too quick -@@ -157,13 +179,13 @@ awseip_start() { - NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") - fi - done -- $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \ -+ $AWSCLI_CMD ec2 associate-address \ - --network-interface-id ${NETWORK_ID} \ - --allocation-id ${ALLOCATION_ID} \ - --private-ip-address ${PRIVATE_IP_ADDRESS} - RET=$? - else -- $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \ -+ $AWSCLI_CMD ec2 associate-address \ - --instance-id ${INSTANCE_ID} \ - --allocation-id ${ALLOCATION_ID} - RET=$? -@@ -183,7 +205,7 @@ awseip_start() { - awseip_stop() { - awseip_monitor || return $OCF_SUCCESS - -- ASSOCIATION_ID=$($AWSCLI --profile $OCF_RESKEY_profile --output json ec2 describe-addresses \ -+ ASSOCIATION_ID=$($AWSCLI_CMD --output json ec2 describe-addresses \ - --allocation-id ${ALLOCATION_ID} | grep -m 1 "AssociationId" | awk -F'"' '{print$4}') - - if [ -z "${ASSOCIATION_ID}" ]; then -@@ -191,9 +213,7 @@ awseip_stop() { - return $OCF_NOT_RUNNING - fi - -- $AWSCLI --profile ${OCF_RESKEY_profile} \ -- ec2 disassociate-address \ -- --association-id ${ASSOCIATION_ID} -+ $AWSCLI_CMD ec2 disassociate-address --association-id ${ASSOCIATION_ID} - RET=$? - - # delay to avoid sending request too fast -@@ -208,7 +228,7 @@ awseip_stop() { - } - - awseip_monitor() { -- $AWSCLI --profile $OCF_RESKEY_profile ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${ELASTIC_IP}" -+ $AWSCLI_CMD ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${ELASTIC_IP}" - RET=$? - - if [ $RET -ne 0 ]; then -@@ -218,9 +238,9 @@ awseip_monitor() { - } - - awseip_validate() { -- check_binary ${AWSCLI} -+ check_binary "${OCF_RESKEY_awscli}" - -- if [ -z "$OCF_RESKEY_profile" ]; then -+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then - ocf_exit_reason "profile parameter not set" - return $OCF_ERR_CONFIGURED - fi -@@ -238,9 +258,27 @@ case $__OCF_ACTION in - meta_data - exit $OCF_SUCCESS - ;; --esac -+ usage|help) -+ awseip_usage -+ exit $OCF_SUCCESS -+ ;; -+esac - --AWSCLI="${OCF_RESKEY_awscli}" -+AWSCLI_CMD="${OCF_RESKEY_awscli}" -+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}" -+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then -+ if [ -z "${OCF_RESKEY_region}" ]; then -+ ocf_exit_reason "region needs to be set when using role-based authentication" -+ exit $OCF_ERR_CONFIGURED -+ fi -+else -+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}" -+ exit $OCF_ERR_CONFIGURED -+fi -+if [ -n "${OCF_RESKEY_region}" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" -+fi - ELASTIC_IP="${OCF_RESKEY_elastic_ip}" - ALLOCATION_ID="${OCF_RESKEY_allocation_id}" - PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" -@@ -272,10 +310,6 @@ case $__OCF_ACTION in - validate|validate-all) - awseip_validate - ;; -- usage|help) -- awseip_usage -- exit $OCF_SUCCESS -- ;; - *) - awseip_usage - exit $OCF_ERR_UNIMPLEMENTED -diff --git a/heartbeat/awsvip b/heartbeat/awsvip -index 037278e296..bdb4d68dd0 100755 ---- a/heartbeat/awsvip -+++ b/heartbeat/awsvip -@@ -23,7 +23,8 @@ - # - # Prerequisites: - # --# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) -+# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) or -+# (AWSRole) Setup up relevant AWS Policies to allow agent related functions to be executed. - # - a reserved secondary private IP address for EC2 instances high availablity - # - IAM user role with the following permissions: - # * DescribeInstances -@@ -43,11 +44,15 @@ - # Defaults - # - OCF_RESKEY_awscli_default="/usr/bin/aws" -+OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" -+OCF_RESKEY_region_default="" - OCF_RESKEY_api_delay_default="3" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} -+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} -+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} - - meta_data() { -@@ -62,7 +67,7 @@ Resource Agent for Amazon AWS Secondary Private IP Addresses. - - It manages AWS Secondary Private IP Addresses with awscli. - --Credentials needs to be setup by running "aws configure". -+Credentials needs to be setup by running "aws configure", or by using AWS Policies. - - See https://aws.amazon.com/cli/ for more information about awscli. - -@@ -78,6 +83,15 @@ command line tools for aws services - - - -+ -+ -+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure", -+or "role" to use AWS Policies. -+ -+Authentication type -+ -+ -+ - - - Valid AWS CLI profile name (see ~/.aws/config and 'aws configure') -@@ -94,6 +108,14 @@ reserved secondary private ip for ec2 instance - - - -+ -+ -+Region for AWS resource (required for role-based authentication) -+ -+Region -+ -+ -+ - - - a short delay between API calls, to avoid sending API too quick -@@ -131,7 +153,7 @@ END - awsvip_start() { - awsvip_monitor && return $OCF_SUCCESS - -- $AWSCLI --profile $OCF_RESKEY_profile ec2 assign-private-ip-addresses \ -+ $AWSCLI_CMD ec2 assign-private-ip-addresses \ - --network-interface-id ${NETWORK_ID} \ - --private-ip-addresses ${SECONDARY_PRIVATE_IP} \ - --allow-reassignment -@@ -151,7 +173,7 @@ awsvip_start() { - awsvip_stop() { - awsvip_monitor || return $OCF_SUCCESS - -- $AWSCLI --profile $OCF_RESKEY_profile ec2 unassign-private-ip-addresses \ -+ $AWSCLI_CMD ec2 unassign-private-ip-addresses \ - --network-interface-id ${NETWORK_ID} \ - --private-ip-addresses ${SECONDARY_PRIVATE_IP} - RET=$? -@@ -168,7 +190,7 @@ awsvip_stop() { - } - - awsvip_monitor() { -- $AWSCLI --profile ${OCF_RESKEY_profile} ec2 describe-instances \ -+ $AWSCLI_CMD ec2 describe-instances \ - --instance-id "${INSTANCE_ID}" \ - --query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \ - --output text | \ -@@ -182,9 +204,9 @@ awsvip_monitor() { - } - - awsvip_validate() { -- check_binary ${AWSCLI} -+ check_binary "${OCF_RESKEY_awscli}" - -- if [ -z "$OCF_RESKEY_profile" ]; then -+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then - ocf_exit_reason "profile parameter not set" - return $OCF_ERR_CONFIGURED - fi -@@ -202,9 +224,27 @@ case $__OCF_ACTION in - meta_data - exit $OCF_SUCCESS - ;; -+ usage|help) -+ awsvip_usage -+ exit $OCF_SUCCESS -+ ;; - esac - --AWSCLI="${OCF_RESKEY_awscli}" -+AWSCLI_CMD="${OCF_RESKEY_awscli}" -+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}" -+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then -+ if [ -z "${OCF_RESKEY_region}" ]; then -+ ocf_exit_reason "region needs to be set when using role-based authentication" -+ exit $OCF_ERR_CONFIGURED -+ fi -+else -+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}" -+ exit $OCF_ERR_CONFIGURED -+fi -+if [ -n "${OCF_RESKEY_region}" ]; then -+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" -+fi - SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" - TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") -@@ -236,10 +276,6 @@ case $__OCF_ACTION in - validate|validate-all) - awsvip_validate - ;; -- usage|help) -- awsvip_usage -- exit $OCF_SUCCESS -- ;; - *) - awsvip_usage - exit $OCF_ERR_UNIMPLEMENTED diff --git a/SOURCES/RHEL-17083-findif-EOS-fix.patch b/SOURCES/RHEL-17083-findif-EOS-fix.patch deleted file mode 100644 index aaf5505..0000000 --- a/SOURCES/RHEL-17083-findif-EOS-fix.patch +++ /dev/null @@ -1,22 +0,0 @@ -From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 20 Nov 2023 16:37:37 +0100 -Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS - ---- - tools/findif.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/findif.c b/tools/findif.c -index a25395fec..ab108a3c4 100644 ---- a/tools/findif.c -+++ b/tools/findif.c -@@ -669,7 +669,7 @@ main(int argc, char ** argv) { - } - } - -- if (netmaskbits) { -+ if (netmaskbits != NULL && *netmaskbits != EOS) { - best_netmask = netmask; - }else if (best_netmask == 0L) { - /* diff --git a/SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch b/SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch deleted file mode 100644 index bbe2847..0000000 --- a/SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch +++ /dev/null @@ -1,23 +0,0 @@ -From a9c4aeb971e9f4963345d0e215b729def62dd27c Mon Sep 17 00:00:00 2001 -From: pepadelic <162310096+pepadelic@users.noreply.github.com> -Date: Mon, 15 Apr 2024 13:52:54 +0200 -Subject: [PATCH] Update db2: fix OCF_SUCESS name in db2_notify - -fix OCF_SUCESS to OCF_SUCCESS in db2_notify ---- - heartbeat/db2 | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/db2 b/heartbeat/db2 -index 95447ab6cb..1cd66f15af 100755 ---- a/heartbeat/db2 -+++ b/heartbeat/db2 -@@ -848,7 +848,7 @@ db2_notify() { - - # only interested in pre-start - [ $OCF_RESKEY_CRM_meta_notify_type = pre \ -- -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCESS -+ -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCCESS - - # gets FIRST_ACTIVE_LOG - db2_get_cfg $dblist || return $? diff --git a/SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch b/SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch deleted file mode 100644 index 9f035a7..0000000 --- a/SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch +++ /dev/null @@ -1,343 +0,0 @@ -From fc0657b936f6a58f741e33f851b22f82bc68bffa Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 6 Feb 2024 13:28:12 +0100 -Subject: [PATCH 1/2] ocf-shellfuncs: add curl_retry() - ---- - heartbeat/ocf-shellfuncs.in | 34 ++++++++++++++++++++++++++++++++++ - 1 file changed, 34 insertions(+) - -diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in -index c5edb6f57..a69a9743d 100644 ---- a/heartbeat/ocf-shellfuncs.in -+++ b/heartbeat/ocf-shellfuncs.in -@@ -672,6 +672,40 @@ EOF - systemctl daemon-reload - } - -+# usage: curl_retry RETRIES SLEEP ARGS URL -+# -+# Use --show-error in ARGS to log HTTP error code -+# -+# returns: -+# 0 success -+# exit: -+# 1 fail -+curl_retry() -+{ -+ local retries=$1 sleep=$2 opts=$3 url=$4 -+ local tries=$(($retries + 1)) -+ local args="--fail $opts $url" -+ local result rc -+ -+ for try in $(seq $tries); do -+ ocf_log debug "curl $args try $try of $tries" -+ result=$(echo "$args" | xargs curl 2>&1) -+ rc=$? -+ -+ ocf_log debug "result: $result" -+ [ $rc -eq 0 ] && break -+ sleep $sleep -+ done -+ -+ if [ $rc -ne 0 ]; then -+ ocf_exit_reason "curl $args failed $tries tries" -+ exit $OCF_ERR_GENERIC -+ fi -+ -+ echo "$result" -+ return $rc -+} -+ - # usage: crm_mon_no_validation args... - # run crm_mon without any cib schema validation - # This is useful when an agent runs in a bundle to avoid potential - -From 80d330557319bdae9e45aad1279e435fc481d4e7 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 6 Feb 2024 13:28:25 +0100 -Subject: [PATCH 2/2] AWS agents: use curl_retry() - ---- - heartbeat/aws-vpc-move-ip | 35 ++++++++++++++++++++++++++--------- - heartbeat/aws-vpc-route53.in | 27 +++++++++++++++++++++++++-- - heartbeat/awseip | 36 +++++++++++++++++++++++++++++++----- - heartbeat/awsvip | 32 ++++++++++++++++++++++++++++---- - 4 files changed, 110 insertions(+), 20 deletions(-) - -diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip -index 54806f6ea..6115e5ba8 100755 ---- a/heartbeat/aws-vpc-move-ip -+++ b/heartbeat/aws-vpc-move-ip -@@ -47,6 +47,8 @@ OCF_RESKEY_interface_default="eth0" - OCF_RESKEY_iflabel_default="" - OCF_RESKEY_monapi_default="false" - OCF_RESKEY_lookup_type_default="InstanceId" -+OCF_RESKEY_curl_retries_default="3" -+OCF_RESKEY_curl_sleep_default="1" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} - : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} -@@ -60,6 +62,8 @@ OCF_RESKEY_lookup_type_default="InstanceId" - : ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} - : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} - : ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} -+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} -+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} - ####################################################################### - - -@@ -194,6 +198,22 @@ Name of resource type to lookup in route table. - - - -+ -+ -+curl retries before failing -+ -+curl retries -+ -+ -+ -+ -+ -+curl sleep between tries -+ -+curl sleep -+ -+ -+ - - - -@@ -250,8 +270,10 @@ ec2ip_validate() { - fi - fi - -- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") -- EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") -+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") -+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+ EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") -+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC - - if [ -z "${EC2_INSTANCE_ID}" ]; then - ocf_exit_reason "Instance ID not found. Is this a EC2 instance?" -@@ -365,14 +387,9 @@ ec2ip_get_instance_eni() { - fi - ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" - -- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\"" -- ocf_log debug "executing command: $cmd" -+ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\"" - EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" -- rc=$? -- if [ $rc != 0 ]; then -- ocf_log warn "command failed, rc: $rc" -- return $OCF_ERR_GENERIC -- fi -+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC - ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" - echo $EC2_NETWORK_INTERFACE_ID - } -diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in -index 18ab157e8..eba2ed95c 100644 ---- a/heartbeat/aws-vpc-route53.in -+++ b/heartbeat/aws-vpc-route53.in -@@ -53,6 +53,8 @@ OCF_RESKEY_hostedzoneid_default="" - OCF_RESKEY_fullname_default="" - OCF_RESKEY_ip_default="local" - OCF_RESKEY_ttl_default=10 -+OCF_RESKEY_curl_retries_default="3" -+OCF_RESKEY_curl_sleep_default="1" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} - : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} -@@ -62,6 +64,8 @@ OCF_RESKEY_ttl_default=10 - : ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}} - : ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}} - : ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}} -+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} -+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} - - usage() { - cat <<-EOT -@@ -185,6 +189,22 @@ Time to live for Route53 ARECORD - ARECORD TTL - - -+ -+ -+ -+curl retries before failing -+ -+curl retries -+ -+ -+ -+ -+ -+curl sleep between tries -+ -+curl sleep -+ -+ - - - -@@ -357,8 +377,11 @@ r53_monitor() { - _get_ip() { - case $OCF_RESKEY_ip in - local|public) -- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") -- IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");; -+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") -+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+ IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4") -+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+ ;; - *.*.*.*) - IPADDRESS="${OCF_RESKEY_ip}";; - esac -diff --git a/heartbeat/awseip b/heartbeat/awseip -index 49b0ca615..ffb6223a1 100755 ---- a/heartbeat/awseip -+++ b/heartbeat/awseip -@@ -49,12 +49,16 @@ OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" - OCF_RESKEY_region_default="" - OCF_RESKEY_api_delay_default="3" -+OCF_RESKEY_curl_retries_default="3" -+OCF_RESKEY_curl_sleep_default="1" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} - : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} - : ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} -+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} -+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} - - meta_data() { - cat < - - -+ -+ -+curl retries before failing -+ -+curl retries -+ -+ -+ -+ -+ -+curl sleep between tries -+ -+curl sleep -+ -+ -+ - - - -@@ -171,14 +191,18 @@ awseip_start() { - awseip_monitor && return $OCF_SUCCESS - - if [ -n "${PRIVATE_IP_ADDRESS}" ]; then -- NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN") -+ NETWORK_INTERFACES_MACS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/") - for MAC in ${NETWORK_INTERFACES_MACS}; do -- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" | -+ curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/local-ipv4s" | - grep -q "^${PRIVATE_IP_ADDRESS}$" - if [ $? -eq 0 ]; then -- NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") -+ NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/interface-id") - fi - done -+ if [ -z "$NETWORK_ID" ]; then -+ ocf_exit_reason "Could not find network interface for private_ip_address: $PRIVATE_IP_ADDRESS" -+ exit $OCF_ERR_GENERIC -+ fi - $AWSCLI_CMD ec2 associate-address \ - --network-interface-id ${NETWORK_ID} \ - --allocation-id ${ALLOCATION_ID} \ -@@ -282,8 +306,10 @@ fi - ELASTIC_IP="${OCF_RESKEY_elastic_ip}" - ALLOCATION_ID="${OCF_RESKEY_allocation_id}" - PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" --TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") --INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") -+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC - - case $__OCF_ACTION in - start) -diff --git a/heartbeat/awsvip b/heartbeat/awsvip -index bdb4d68dd..f2b238a0f 100755 ---- a/heartbeat/awsvip -+++ b/heartbeat/awsvip -@@ -48,12 +48,16 @@ OCF_RESKEY_auth_type_default="key" - OCF_RESKEY_profile_default="default" - OCF_RESKEY_region_default="" - OCF_RESKEY_api_delay_default="3" -+OCF_RESKEY_curl_retries_default="3" -+OCF_RESKEY_curl_sleep_default="1" - - : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} - : ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}} - : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} - : ${OCF_RESKEY_region=${OCF_RESKEY_region_default}} - : ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}} -+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}} -+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}} - - meta_data() { - cat < - - -+ -+ -+curl retries before failing -+ -+curl retries -+ -+ -+ -+ -+ -+curl sleep between tries -+ -+curl sleep -+ -+ -+ - - - -@@ -246,10 +266,14 @@ if [ -n "${OCF_RESKEY_region}" ]; then - AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}" - fi - SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" --TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") --INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") --MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN") --NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") -+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC -+NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id") -+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC - - case $__OCF_ACTION in - start) diff --git a/SOURCES/RHEL-44923-aliyun-gcp-fix-bundled-urllib3-CVE-2024-37891.patch b/SOURCES/RHEL-44923-aliyun-gcp-fix-bundled-urllib3-CVE-2024-37891.patch deleted file mode 100644 index 4d0ac31..0000000 --- a/SOURCES/RHEL-44923-aliyun-gcp-fix-bundled-urllib3-CVE-2024-37891.patch +++ /dev/null @@ -1,48 +0,0 @@ -From accff72ecc2f6cf5a76d9570198a93ac7c90270e Mon Sep 17 00:00:00 2001 -From: Quentin Pradet -Date: Mon, 17 Jun 2024 11:09:06 +0400 -Subject: [PATCH] Merge pull request from GHSA-34jh-p97f-mpxf - -* Strip Proxy-Authorization header on redirects - -* Fix test_retry_default_remove_headers_on_redirect - -* Set release date ---- - CHANGES.rst | 5 +++++ - src/urllib3/util/retry.py | 4 +++- - test/test_retry.py | 6 ++++- - test/with_dummyserver/test_poolmanager.py | 27 ++++++++++++++++++++--- - 4 files changed, 37 insertions(+), 5 deletions(-) - -diff --git a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py -index 7a76a4a6ad..0456cceba4 100644 ---- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py -+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py -@@ -189,7 +189,9 @@ class Retry: - RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - - #: Default headers to be used for ``remove_headers_on_redirect`` -- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"]) -+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset( -+ ["Cookie", "Authorization", "Proxy-Authorization"] -+ ) - - #: Default maximum backoff time. - DEFAULT_BACKOFF_MAX = 120 - -diff --git a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py -index 7a76a4a6ad..0456cceba4 100644 ---- a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py -+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py -@@ -189,7 +189,9 @@ class Retry: - RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) - - #: Default headers to be used for ``remove_headers_on_redirect`` -- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"]) -+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset( -+ ["Cookie", "Authorization", "Proxy-Authorization"] -+ ) - - #: Default maximum backoff time. - DEFAULT_BACKOFF_MAX = 120 diff --git a/SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch b/SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch deleted file mode 100644 index 423d4cb..0000000 --- a/SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch +++ /dev/null @@ -1,201 +0,0 @@ ---- a/setuptools/package_index.py 1980-01-01 09:00:00.000000000 +0100 -+++ b/setuptools/package_index.py 2024-07-25 10:11:40.537307665 +0200 -@@ -1,5 +1,6 @@ - """PyPI and direct package downloading""" - import sys -+import subprocess - import os - import re - import shutil -@@ -563,7 +564,7 @@ - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir -- found = self._download_url(scheme.group(1), spec, tmpdir) -+ found = self._download_url(spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found, fragment, tmpdir) -@@ -775,7 +776,7 @@ - raise DistutilsError("Download error for %s: %s" - % (url, v)) - -- def _download_url(self, scheme, url, tmpdir): -+ def _download_url(self, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) -@@ -790,19 +791,59 @@ - - filename = os.path.join(tmpdir, name) - -- # Download the file -- # -- if scheme == 'svn' or scheme.startswith('svn+'): -- return self._download_svn(url, filename) -- elif scheme == 'git' or scheme.startswith('git+'): -- return self._download_git(url, filename) -- elif scheme.startswith('hg+'): -- return self._download_hg(url, filename) -- elif scheme == 'file': -- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) -- else: -- self.url_ok(url, True) # raises error if not allowed -- return self._attempt_download(url, filename) -+ return self._download_vcs(url, filename) or self._download_other(url, filename) -+ -+ @staticmethod -+ def _resolve_vcs(url): -+ """ -+ >>> rvcs = PackageIndex._resolve_vcs -+ >>> rvcs('git+http://foo/bar') -+ 'git' -+ >>> rvcs('hg+https://foo/bar') -+ 'hg' -+ >>> rvcs('git:myhost') -+ 'git' -+ >>> rvcs('hg:myhost') -+ >>> rvcs('http://foo/bar') -+ """ -+ scheme = urllib.parse.urlsplit(url).scheme -+ pre, sep, post = scheme.partition('+') -+ # svn and git have their own protocol; hg does not -+ allowed = set(['svn', 'git'] + ['hg'] * bool(sep)) -+ return next(iter({pre} & allowed), None) -+ -+ def _download_vcs(self, url, spec_filename): -+ vcs = self._resolve_vcs(url) -+ if not vcs: -+ return -+ if vcs == 'svn': -+ raise DistutilsError( -+ f"Invalid config, SVN download is not supported: {url}" -+ ) -+ -+ filename, _, _ = spec_filename.partition('#') -+ url, rev = self._vcs_split_rev_from_url(url) -+ -+ self.info(f"Doing {vcs} clone from {url} to {filename}") -+ subprocess.check_call([vcs, 'clone', '--quiet', url, filename]) -+ -+ co_commands = dict( -+ git=[vcs, '-C', filename, 'checkout', '--quiet', rev], -+ hg=[vcs, '--cwd', filename, 'up', '-C', '-r', rev, '-q'], -+ ) -+ if rev is not None: -+ self.info(f"Checking out {rev}") -+ subprocess.check_call(co_commands[vcs]) -+ -+ return filename -+ -+ def _download_other(self, url, filename): -+ scheme = urllib.parse.urlsplit(url).scheme -+ if scheme == 'file': # pragma: no cover -+ return urllib.request.url2pathname(urllib.parse.urlparse(url).path) -+ # raise error if not allowed -+ self.url_ok(url, True) -+ return self._attempt_download(url, filename) - - def scan_url(self, url): - self.process_url(url, True) -@@ -829,76 +870,37 @@ - os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at " + url) - -- def _download_svn(self, url, filename): -- url = url.split('#', 1)[0] # remove any fragment for svn's sake -- creds = '' -- if url.lower().startswith('svn:') and '@' in url: -- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) -- if not netloc and path.startswith('//') and '/' in path[2:]: -- netloc, path = path[2:].split('/', 1) -- auth, host = splituser(netloc) -- if auth: -- if ':' in auth: -- user, pw = auth.split(':', 1) -- creds = " --username=%s --password=%s" % (user, pw) -- else: -- creds = " --username=" + auth -- netloc = host -- parts = scheme, netloc, url, p, q, f -- url = urllib.parse.urlunparse(parts) -- self.info("Doing subversion checkout from %s to %s", url, filename) -- os.system("svn checkout%s -q %s %s" % (creds, url, filename)) -- return filename -- - @staticmethod -- def _vcs_split_rev_from_url(url, pop_prefix=False): -- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) -- -- scheme = scheme.split('+', 1)[-1] -- -- # Some fragment identification fails -- path = path.split('#', 1)[0] -- -- rev = None -- if '@' in path: -- path, rev = path.rsplit('@', 1) -- -- # Also, discard fragment -- url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) -- -- return url, rev -- -- def _download_git(self, url, filename): -- filename = filename.split('#', 1)[0] -- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) -- -- self.info("Doing git clone from %s to %s", url, filename) -- os.system("git clone --quiet %s %s" % (url, filename)) -+ def _vcs_split_rev_from_url(url): -+ """ -+ Given a possible VCS URL, return a clean URL and resolved revision if any. - -- if rev is not None: -- self.info("Checking out %s", rev) -- os.system("(cd %s && git checkout --quiet %s)" % ( -- filename, -- rev, -- )) -+ >>> vsrfu = PackageIndex._vcs_split_rev_from_url -+ >>> vsrfu('git+https://github.com/pypa/setuptools@v69.0.0#egg-info=setuptools') -+ ('https://github.com/pypa/setuptools', 'v69.0.0') -+ >>> vsrfu('git+https://github.com/pypa/setuptools#egg-info=setuptools') -+ ('https://github.com/pypa/setuptools', None) -+ >>> vsrfu('http://foo/bar') -+ ('http://foo/bar', None) -+ """ -+ parts = urllib.parse.urlsplit(url) - -- return filename -+ clean_scheme = parts.scheme.split('+', 1)[-1] - -- def _download_hg(self, url, filename): -- filename = filename.split('#', 1)[0] -- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) -+ # Some fragment identification fails -+ no_fragment_path, _, _ = parts.path.partition('#') - -- self.info("Doing hg clone from %s to %s", url, filename) -- os.system("hg clone --quiet %s %s" % (url, filename)) -+ pre, sep, post = no_fragment_path.rpartition('@') -+ clean_path, rev = (pre, post) if sep else (post, None) - -- if rev is not None: -- self.info("Updating to %s", rev) -- os.system("(cd %s && hg up -C -r %s >&-)" % ( -- filename, -- rev, -- )) -+ resolved = parts._replace( -+ scheme=clean_scheme, -+ path=clean_path, -+ # discard the fragment -+ fragment='', -+ ).geturl() - -- return filename -+ return resolved, rev - - def debug(self, msg, *args): - log.debug(msg, *args) diff --git a/SOURCES/RHEL-61138-nfsserver-also-stop-rpc-statd-for-nfsv4_only.patch b/SOURCES/RHEL-61138-nfsserver-also-stop-rpc-statd-for-nfsv4_only.patch deleted file mode 100644 index 73e2324..0000000 --- a/SOURCES/RHEL-61138-nfsserver-also-stop-rpc-statd-for-nfsv4_only.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 38eaf00bc81af7530c56eba282918762a47a9326 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 19 Sep 2024 13:01:53 +0200 -Subject: [PATCH] nfsserver: also stop rpc-statd for nfsv4_only to avoid stop - failing in some cases - -E.g. nfs_no_notify=true nfsv4_only=true nfs_shared_infodir=/nfsmq/nfsinfo would cause a "Failed to unmount a bind mount" error ---- - heartbeat/nfsserver | 16 +++++++--------- - 1 file changed, 7 insertions(+), 9 deletions(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 5793d7a70..fd9268afc 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -947,15 +947,13 @@ nfsserver_stop () - sleep 1 - done - -- if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -- nfs_exec stop rpc-statd > /dev/null 2>&1 -- ocf_log info "Stop: rpc-statd" -- rpcinfo -t localhost 100024 > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -eq "0" ]; then -- ocf_exit_reason "Failed to stop rpc-statd" -- return $OCF_ERR_GENERIC -- fi -+ nfs_exec stop rpc-statd > /dev/null 2>&1 -+ ocf_log info "Stop: rpc-statd" -+ rpcinfo -t localhost 100024 > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ ocf_exit_reason "Failed to stop rpc-statd" -+ return $OCF_ERR_GENERIC - fi - - nfs_exec stop nfs-idmapd > /dev/null 2>&1 diff --git a/SOURCES/RHEL-69297-1-Filesystem-dont-kill-unrelated-processes.patch b/SOURCES/RHEL-69297-1-Filesystem-dont-kill-unrelated-processes.patch deleted file mode 100644 index ca24f32..0000000 --- a/SOURCES/RHEL-69297-1-Filesystem-dont-kill-unrelated-processes.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 4b09b3e467a7f8076bbf20f5b027efecf16303e7 Mon Sep 17 00:00:00 2001 -From: Gianluca Piccolo -Date: Thu, 6 Jun 2024 17:34:41 +0200 -Subject: [PATCH] Fix #1944 - ---- - heartbeat/Filesystem | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index a445349b9..59b6c1b51 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -664,7 +664,7 @@ get_pids() - if [ "X${HOSTOS}" = "XOpenBSD" ];then - fstat | grep $dir | awk '{print $3}' - else -- $FUSER -m $dir 2>/dev/null -+ $FUSER -Mm $dir 2>/dev/null - fi - elif [ "$FORCE_UNMOUNT" = "safe" ]; then - procs=$(find /proc/[0-9]*/ -type l -lname "${dir}/*" -or -lname "${dir}" 2>/dev/null | awk -F/ '{print $3}') diff --git a/SOURCES/RHEL-69297-2-Filesystem-update-bsd-logic.patch b/SOURCES/RHEL-69297-2-Filesystem-update-bsd-logic.patch deleted file mode 100644 index 8299fa8..0000000 --- a/SOURCES/RHEL-69297-2-Filesystem-update-bsd-logic.patch +++ /dev/null @@ -1,26 +0,0 @@ -From c9ba6ac66ee27a70c69e1156f17aa6beac277bc5 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 7 Jun 2024 14:23:28 +0200 -Subject: [PATCH] Filesystem: use fuser -c on FreeBSD, as -m and -M are used - for other functionality - ---- - heartbeat/Filesystem | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 59b6c1b51..88fe2e2eb 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -661,8 +661,10 @@ get_pids() - fi - - if ocf_is_true "$FORCE_UNMOUNT"; then -- if [ "X${HOSTOS}" = "XOpenBSD" ];then -+ if [ "X${HOSTOS}" = "XOpenBSD" ]; then - fstat | grep $dir | awk '{print $3}' -+ elif [ "X${HOSTOS}" = "XFreeBSD" ]; then -+ $FUSER -c $dir 2>/dev/null - else - $FUSER -Mm $dir 2>/dev/null - fi diff --git a/SOURCES/RHEL-79823-portblock-fix-version-detection.patch b/SOURCES/RHEL-79823-portblock-fix-version-detection.patch deleted file mode 100644 index c70ecea..0000000 --- a/SOURCES/RHEL-79823-portblock-fix-version-detection.patch +++ /dev/null @@ -1,448 +0,0 @@ ---- a/heartbeat/portblock 2025-02-20 14:54:18.047134471 +0100 -+++ b/heartbeat/portblock 2025-02-20 14:09:44.546869740 +0100 -@@ -25,6 +25,7 @@ - # Defaults - OCF_RESKEY_protocol_default="" - OCF_RESKEY_portno_default="" -+OCF_RESKEY_direction_default="in" - OCF_RESKEY_action_default="" - OCF_RESKEY_ip_default="0.0.0.0/0" - OCF_RESKEY_reset_local_on_unblock_stop_default="false" -@@ -33,6 +34,7 @@ - - : ${OCF_RESKEY_protocol=${OCF_RESKEY_protocol_default}} - : ${OCF_RESKEY_portno=${OCF_RESKEY_portno_default}} -+: ${OCF_RESKEY_direction=${OCF_RESKEY_direction_default}} - : ${OCF_RESKEY_action=${OCF_RESKEY_action_default}} - : ${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}} - : ${OCF_RESKEY_reset_local_on_unblock_stop=${OCF_RESKEY_reset_local_on_unblock_stop_default}} -@@ -217,6 +219,18 @@ - Connection state file synchronization script - - -+ -+ -+ -+Whether to block incoming or outgoing traffic. Can be either "in", -+"out", or "both". -+If "in" is used, the incoming ports are blocked on the INPUT chain. -+If "out" is used, the outgoing ports are blocked on the OUTPUT chain. -+If "both" is used, both the incoming and outgoing ports are blocked. -+ -+Whether to block incoming or outgoing traffic, or both -+ -+ - - - -@@ -240,36 +254,73 @@ - # and disable us -- but we're still in some sense active... - # - --#active_grep_pat {udp|tcp} portno,portno -+#active_grep_pat {udp|tcp} portno,portno ip {d|s} -+# d = look for destination ports -+# s = look for source ports - active_grep_pat() - { - w="[ ][ ]*" - any="0\\.0\\.0\\.0/0" -- echo "^DROP${w}${1}${w}--${w}${any}${w}${3}${w}multiport${w}dports${w}${2}\>" -+ src=$any dst=$3 -+ if [ "$4" = "s" ]; then -+ local src=$3 -+ local dst=$any -+ fi -+ # iptables 1.8.9 briefly broke the output format, returning the -+ # numeric protocol value instead of a string. Support both variants. -+ if [ "$1" = "tcp" ]; then -+ local prot="(tcp|6)" -+ else -+ local prot="(udp|17)" -+ fi -+ echo "^DROP${w}${prot}${w}--${w}${src}${w}${dst}${w}multiport${w}${4}ports${w}${2}$" - } - --#chain_isactive {udp|tcp} portno,portno ip -+#chain_isactive {udp|tcp} portno,portno ip chain - chain_isactive() - { -- PAT=`active_grep_pat "$1" "$2" "$3"` -- $IPTABLES $wait -n -L INPUT | grep "$PAT" >/dev/null -+ [ "$4" = "OUTPUT" ] && ds="s" || ds="d" -+ PAT=$(active_grep_pat "$1" "$2" "$3" "$ds") -+ $IPTABLES $wait -n -L "$4" | grep -qE "$PAT" -+} -+ -+# netstat -tn and ss -Htn, split on whitespace and colon, -+# look very similar: -+# tcp 0 0 10.43.55.1 675 10.43.9.8 2049 ESTABLISHED -+# ESTAB 0 0 10.43.55.1 675 10.43.9.8 2049 -+# so we can write one awk script for both -+get_established_tcp_connections() -+{ -+ local columns -+ if [ -z "$1" ] ; then -+ columns='$4,$5, $6,$7' -+ else -+ # swap local and remote for "tickle_local" -+ columns='$6,$7, $4,$5' -+ fi -+ $ss_or_netstat | awk -F '[:[:space:]]+' ' -+ ( $8 == "ESTABLISHED" || $1 == "ESTAB" ) && $4 == "'$OCF_RESKEY_ip'" \ -+ {printf "%s:%s\t%s:%s\n", '"$columns"'}' - } - - save_tcp_connections() - { - [ -z "$OCF_RESKEY_tickle_dir" ] && return - statefile=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip -+ # If we have _no_ sync script, we probably have a shared -+ # (or replicated) directory, and need to fsync, or we might -+ # end up with the just truncated file after failover, exactly -+ # when we need it. -+ # -+ # If we _do_ have a sync script, it is not that important whether -+ # the local state file is fsync'ed or not, the sync script is -+ # responsible to "atomically" communicate the state to the peer(s). - if [ -z "$OCF_RESKEY_sync_script" ]; then -- netstat -tn |awk -F '[:[:space:]]+' ' -- $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \ -- {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' | -- dd of="$statefile".new conv=fsync status=none && -- mv "$statefile".new "$statefile" -+ get_established_tcp_connections | -+ dd of="$statefile".new conv=fsync status=none && -+ mv "$statefile".new "$statefile" - else -- netstat -tn |awk -F '[:[:space:]]+' ' -- $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \ -- {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' \ -- > $statefile -+ get_established_tcp_connections > $statefile - $OCF_RESKEY_sync_script $statefile > /dev/null 2>&1 & - fi - } -@@ -277,7 +328,6 @@ - tickle_remote() - { - [ -z "$OCF_RESKEY_tickle_dir" ] && return -- echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle - f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip - [ -r $f ] || return - $TICKLETCP -n 3 < $f -@@ -289,11 +339,6 @@ - f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip - [ -r $f ] || return - -- checkcmd="netstat -tn" -- if ! have_binary "netstat"; then -- checkcmd="ss -Htn" -- fi -- - # swap "local" and "remote" address, - # so we tickle ourselves. - # We set up a REJECT with tcp-reset before we do so, so we get rid of -@@ -302,122 +347,152 @@ - # the way if we switch-over and then switch-back in quick succession. - local i - awk '{ print $2, $1; }' $f | $TICKLETCP -- $checkcmd | grep -Fw $OCF_RESKEY_ip || return -+ $ss_or_netstat | grep -Fw $OCF_RESKEY_ip || return - for i in 0.1 0.5 1 2 4 ; do - sleep $i -- awk '{ print $2, $1; }' $f | $TICKLETCP -- $checkcmd | grep -Fw $OCF_RESKEY_ip || break -+ # now kill what is currently in the list, -+ # not what was recorded during last monitor -+ get_established_tcp_connections swap | $TICKLETCP -+ $ss_or_netstat | grep -Fw $OCF_RESKEY_ip || break - done - } - - SayActive() - { -- echo "$CMD DROP rule for INPUT chain [$*] is running (OK)" -+ ocf_log debug "$CMD DROP rule [$*] is running (OK)" - } - - SayConsideredActive() - { -- echo "$CMD DROP rule for INPUT chain [$*] considered to be running (OK)" -+ ocf_log debug "$CMD DROP rule [$*] considered to be running (OK)" - } - - SayInactive() - { -- echo "$CMD DROP rule for INPUT chain [$*] is inactive" -+ ocf_log debug "$CMD DROP rule [$*] is inactive" - } - --#IptablesStatus {udp|tcp} portno,portno ip {block|unblock} -+#IptablesStatus {udp|tcp} portno,portno ip {in|out|both} {block|unblock} - IptablesStatus() { -- local rc -- rc=$OCF_ERR_GENERIC -- activewords="$CMD $1 $2 is running (OK)" -- if chain_isactive "$1" "$2" "$3"; then -- case $4 in -- block) -- SayActive $* -- rc=$OCF_SUCCESS -- ;; -- *) -- SayInactive $* -- rc=$OCF_NOT_RUNNING -- ;; -- esac -- else -- case $4 in -- block) -- if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then -- SayConsideredActive $* -- rc=$OCF_SUCCESS -- else -- SayInactive $* -- rc=$OCF_NOT_RUNNING -- fi -- ;; -- -- *) -- if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then -- SayActive $* -- #This is only run on real monitor events. -- save_tcp_connections -- rc=$OCF_SUCCESS -- else -- SayInactive $* -- rc=$OCF_NOT_RUNNING -- fi -- ;; -- esac -- fi -- -- return $rc -+ local rc -+ rc=$OCF_ERR_GENERIC -+ is_active=0 -+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then -+ chain_isactive "$1" "$2" "$3" INPUT -+ is_active=$? -+ fi -+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then -+ chain_isactive "$1" "$2" "$3" OUTPUT -+ r=$? -+ [ $r -gt $is_active ] && is_active=$r -+ fi -+ if [ $is_active -eq 0 ]; then -+ case $5 in -+ block) -+ SayActive $* -+ rc=$OCF_SUCCESS -+ ;; -+ *) -+ SayInactive $* -+ rc=$OCF_NOT_RUNNING -+ ;; -+ esac -+ else -+ case $5 in -+ block) -+ if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then -+ SayConsideredActive $* -+ rc=$OCF_SUCCESS -+ else -+ SayInactive $* -+ rc=$OCF_NOT_RUNNING -+ fi -+ ;; -+ *) -+ if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then -+ SayActive $* -+ #This is only run on real monitor events. -+ save_tcp_connections -+ rc=$OCF_SUCCESS -+ else -+ SayInactive $* -+ rc=$OCF_NOT_RUNNING -+ fi -+ ;; -+ esac -+ fi -+ return $rc - } - --#IptablesBLOCK {udp|tcp} portno,portno ip --IptablesBLOCK() -+#DoIptables {-I|-D} {udp|tcp} portno,portno ip chain -+DoIptables() - { -- local rc=0 -- local try_reset=false -- if [ "$1/$4/$__OCF_ACTION" = tcp/unblock/stop ] && -- ocf_is_true $reset_local_on_unblock_stop -- then -- try_reset=true -- fi -- if -- chain_isactive "$1" "$2" "$3" -- then -- : OK -- chain already active -+ op=$1 proto=$2 ports=$3 ip=$4 chain=$5 -+ active=0; chain_isactive "$proto" "$ports" "$ip" "$chain" && active=1 -+ want_active=0; [ "$op" = "-I" ] && want_active=1 -+ ocf_log debug "active: $active want_active: $want_active" -+ if [ $active -eq $want_active ] ; then -+ : Chain already in desired state - else -- if $try_reset ; then -- $IPTABLES $wait -I OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset -- tickle_local -+ [ "$chain" = "OUTPUT" ] && ds="s" || ds="d" -+ $IPTABLES $wait "$op" "$chain" -p "$proto" -${ds} "$ip" -m multiport --${ds}ports "$ports" -j DROP -+ fi -+} -+ -+#IptablesBLOCK {udp|tcp} portno,portno ip {in|out|both} {block|unblock} -+IptablesBLOCK() -+{ -+ local rc_in=0 -+ local rc_out=0 -+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then -+ local try_reset=false -+ if [ "$1/$5/$__OCF_ACTION" = tcp/unblock/stop ] && -+ ocf_is_true $reset_local_on_unblock_stop -+ then -+ try_reset=true - fi -- $IPTABLES $wait -I INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP -- rc=$? -- if $try_reset ; then -- $IPTABLES $wait -D OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset -+ if -+ chain_isactive "$1" "$2" "$3" INPUT -+ then -+ : OK -- chain already active -+ else -+ if $try_reset ; then -+ $IPTABLES $wait -I OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset -+ tickle_local -+ fi -+ $IPTABLES $wait -I INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP -+ rc_in=$? -+ if $try_reset ; then -+ $IPTABLES $wait -D OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset -+ fi - fi - fi -+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then -+ DoIptables -I "$1" "$2" "$3" OUTPUT -+ rc_out=$? -+ fi - -- return $rc -+ [ $rc_in -gt $rc_out ] && return $rc_in || return $rc_out - } - --#IptablesUNBLOCK {udp|tcp} portno,portno ip -+#IptablesUNBLOCK {udp|tcp} portno,portno ip {in|out|both} - IptablesUNBLOCK() - { -- if -- chain_isactive "$1" "$2" "$3" -- then -- $IPTABLES $wait -D INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP -- else -- : Chain Not active -+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then -+ DoIptables -D "$1" "$2" "$3" INPUT -+ fi -+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then -+ DoIptables -D "$1" "$2" "$3" OUTPUT - fi - - return $? - } - --#IptablesStart {udp|tcp} portno,portno ip {block|unblock} -+#IptablesStart {udp|tcp} portno,portno ip {in|out|both} {block|unblock} - IptablesStart() - { - ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" start -- case $4 in -+ case $5 in - block) IptablesBLOCK "$@";; - unblock) - IptablesUNBLOCK "$@" -@@ -432,11 +507,11 @@ - return $? - } - --#IptablesStop {udp|tcp} portno,portno ip {block|unblock} -+#IptablesStop {udp|tcp} portno,portno ip {in|out|both} {block|unblock} - IptablesStop() - { - ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" stop -- case $4 in -+ case $5 in - block) IptablesUNBLOCK "$@";; - unblock) - save_tcp_connections -@@ -454,7 +529,7 @@ - CheckPort() { - # Examples of valid port: "1080", "1", "0080" - # Examples of invalid port: "1080bad", "0", "0000", "" -- echo $1 |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' -+ echo $1 | $EGREP -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' - } - - IptablesValidateAll() -@@ -543,7 +618,7 @@ - fi - - # iptables v1.4.20+ is required to use -w (wait) --version=$(iptables -V | awk -F ' v' '{print $NF}') -+version=$(iptables -V | grep -oE '[0-9]+[\.0-9]+') - ocf_version_cmp "$version" "1.4.19.1" - if [ "$?" -eq "2" ]; then - wait="-w" -@@ -553,21 +628,36 @@ - - protocol=$OCF_RESKEY_protocol - portno=$OCF_RESKEY_portno -+direction=$OCF_RESKEY_direction - action=$OCF_RESKEY_action - ip=$OCF_RESKEY_ip - reset_local_on_unblock_stop=$OCF_RESKEY_reset_local_on_unblock_stop - -+ -+# If "tickle" is enabled, we need to record the list of currently established -+# connections during monitor. Use ss where available, and netstat otherwise. -+if [ -n "$OCF_RESKEY_tickle_dir" ] ; then -+ if have_binary ss ; then -+ ss_or_netstat="ss -Htn" -+ elif have_binary netstat ; then -+ ss_or_netstat="netstat -tn" -+ else -+ ocf_log err "Neither ss nor netstat found, but needed to record estblished connections." -+ exit $OCF_ERR_INSTALLED -+ fi -+fi -+ - case $1 in - start) -- IptablesStart $protocol $portno $ip $action -+ IptablesStart $protocol $portno $ip $direction $action - ;; - - stop) -- IptablesStop $protocol $portno $ip $action -+ IptablesStop $protocol $portno $ip $direction $action - ;; - - status|monitor) -- IptablesStatus $protocol $portno $ip $action -+ IptablesStatus $protocol $portno $ip $direction $action - ;; - - validate-all) diff --git a/SOURCES/aliyun-vpc-move-ip-4-bundled.patch b/SOURCES/aliyun-vpc-move-ip-4-bundled.patch deleted file mode 100644 index 1f2ff0a..0000000 --- a/SOURCES/aliyun-vpc-move-ip-4-bundled.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200 -+++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200 -@@ -35,10 +35,10 @@ - USAGE="usage: $0 {start|stop|status|meta-data}"; - - if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then -- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" -+ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)" - fi - --if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then -+if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli-ra' ] || [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then - OUTPUT="text" - EXECUTING='{ print $3 }' - IFS_=" " diff --git a/SOURCES/aliyuncli-python3-fixes.patch b/SOURCES/aliyuncli-python3-fixes.patch deleted file mode 100644 index 22be4e1..0000000 --- a/SOURCES/aliyuncli-python3-fixes.patch +++ /dev/null @@ -1,398 +0,0 @@ -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100 -@@ -13,7 +13,7 @@ - - def getFileName(self,keyValues): - filename = None -- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: -+ if '--filename' in keyValues and len(keyValues['--filename']) > 0: - filename = keyValues['--filename'][0] - else: - return filename, "A file name is needed! please use \'--filename\' and add the file name." -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100 -@@ -13,7 +13,7 @@ - - def getFileName(self,keyValues): - filename = None -- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: -+ if '--filename' in keyValues and len(keyValues['--filename']) > 0: - filename = keyValues['--filename'][0] - else: - print("A profile is needed! please use \'--filename\' and add the profile name.") -@@ -21,7 +21,7 @@ - - def getInstanceCount(self,keyValues): - count = 1 -- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0: -+ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0: - if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: - count = keyValues['--instancecount'][0] - else: -@@ -113,7 +113,7 @@ - - def isAllocatePublicIpAddress(self,keyValues): - _publicIp = False -- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0: -+ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0: - if keyValues['--allocatepublicip'][0] == "yes": - _publicIp = True - return _publicIp -@@ -125,7 +125,7 @@ - ''' - data = json.loads(jsonbody) - ''' -- if data.has_key('InstanceId') and len(data['InstanceId']) > 0: -+ if 'InstanceId' in data and len(data['InstanceId']) > 0: - instanceId = data['InstanceId'] - except Exception as e: - pass -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100 -@@ -38,7 +38,7 @@ - - def getFileName(self,keyValues): - filename = None -- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: -+ if '--filename' in keyValues and len(keyValues['--filename']) > 0: - filename = keyValues['--filename'][0] - else: - return filename, "A file name is needed! please use \'--filename\' and add the file name." -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100 -@@ -13,7 +13,7 @@ - - def getFileName(self,keyValues): - filename = None -- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: -+ if '--filename' in keyValues and len(keyValues['--filename']) > 0: - filename = keyValues['--filename'][0] - else: - return filename, "A filename is needed! please use \'--filename\' and add the file name." -@@ -21,7 +21,7 @@ - def getInstanceCount(self,keyValues): - count = 1 - import_count = "--count" -- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0: -+ if import_count in keyValues and len(keyValues[import_count]) > 0: - if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0: - count = keyValues[import_count][0] - else: -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100 -@@ -17,37 +17,37 @@ - - def getConfigHandlerOptions(self): - return [ConfigCmd.name] -- -+ - def showConfig(self): - _credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials) - _configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure) - config = dict() - configContent = dict() -- credentialsContent = dict () -- if os.path.exists(_configurePath): -+ credentialsContent = dict () -+ if os.path.exists(_configurePath): - for line in open(_configurePath): - line = line.strip('\n') - if line.find('=') > 0: - list = line.split("=",1) -- configContent[list[0]] = list[1] -- else: -- pass -- config['configure'] = configContent -- if os.path.exists(_credentialsPath): -- for line in open(_credentialsPath): -+ configContent[list[0]] = list[1] -+ else: -+ pass -+ config['configure'] = configContent -+ if os.path.exists(_credentialsPath): -+ for line in open(_credentialsPath): - line = line.strip('\n') - if line.find('=') > 0: - list = line.split("=",1) -- credentialsContent[list[0]] = list[1] -- else: -- pass -- config ['credentials'] = credentialsContent -- response.display_response("showConfigure",config,'table') -+ credentialsContent[list[0]] = list[1] -+ else: -+ pass -+ config ['credentials'] = credentialsContent -+ response.display_response("showConfigure",config,'table') - def importConfig(): - pass - def exportConfig(): - pass -- -+ - - - if __name__ == "__main__": -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100 -@@ -20,7 +20,7 @@ - def handleProfileCmd(self, cmd, keyValues): - if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right - #check --name is valid -- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: -+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: - _value = keyValues[ProfileCmd.name][0] # use the first value - self.extensionCliHandler.setUserProfile(_value) - else: -@@ -34,7 +34,7 @@ - newProfileName = '' - if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right - #check --name is valid -- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0: -+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0: - _value = keyValues[ProfileCmd.name][0] # check the first value - # only input key and secret - newProfileName = _value -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100 -@@ -137,9 +137,9 @@ - values.append(self.args[index]) - index = index + 1 - keyValues[currentValue] = values -- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0: -+ if keystr in keyValues and keyValues[keystr].__len__() > 0: - _key = keyValues[keystr][0] -- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: -+ if secretstr in keyValues and keyValues[secretstr].__len__() > 0: - _secret = keyValues[secretstr][0] - #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) - return _key, _secret -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100 -@@ -19,8 +19,9 @@ - ''' - - import sys --reload(sys) --sys.setdefaultencoding('utf-8') -+if sys.version_info[0] < 3: -+ reload(sys) -+ sys.setdefaultencoding('utf-8') - __author__ = 'xixi.xxx' - import aliyunCliMain - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100 -@@ -18,7 +18,7 @@ - ''' - - import aliyunCliConfiugre --import urllib2 -+import urllib3 - import re - import os - import platform -@@ -151,7 +151,7 @@ - # this functino will get the latest version - def _getLatestTimeFromServer(self): - try: -- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5) -+ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5) - s = f.read() - return s - except Exception as e: -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100 -@@ -26,7 +26,7 @@ - import aliyunSdkConfigure - import json - import cliError --import urllib2 -+import urllib3 - import handleEndPoint - - from __init__ import __version__ -@@ -259,7 +259,7 @@ - def changeEndPoint(self, classname, keyValues): - endpoint = "Endpoint" - try: -- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0: -+ if endpoint in keyValues and keyValues[endpoint].__len__() > 0: - classname._RestApi__domain = keyValues[endpoint][0] - except Exception as e: - pass -@@ -444,10 +444,10 @@ - - def getTempVersion(self,keyValues): - key='--version' -- if keyValues is not None and keyValues.has_key(key): -+ if keyValues is not None and key in keyValues: - return keyValues.get(key) - key = 'version' -- if keyValues is not None and keyValues.has_key(key): -+ if keyValues is not None and key in keyValues: - return keyValues.get(key) - - def getVersionFromFile(self,cmd): -@@ -513,7 +513,7 @@ - self.checkForServer(response,cmd,operation) - def getRequestId(self,response): - try: -- if response.has_key('RequestId') and len(response['RequestId']) > 0: -+ if 'RequestId' in response and len(response['RequestId']) > 0: - requestId = response['RequestId'] - return requestId - except Exception: -@@ -532,7 +532,7 @@ - ua = "" - url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation - try: -- f = urllib2.urlopen(url,data=None,timeout=5) -+ f = urllib3.urlopen(url,data=None,timeout=5) - s = f.read() - return s - except Exception : -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100 -@@ -39,7 +39,7 @@ - - def sdkConfigure(self,cmd,operation): - keyValues = self.parser._getKeyValues() -- if keyValues.has_key('--version') and len(keyValues['--version']) > 0: -+ if '--version' in keyValues and len(keyValues['--version']) > 0: - version=keyValues['--version'][0] - filename=self.fileName - self.writeCmdVersionToFile(cmd,version,filename) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100 -@@ -23,6 +23,8 @@ - import aliyunCliParser - import platform - -+if sys.version_info[0] > 2: -+ raw_input = input - - OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~') - OSS_CONFIG_SECTION = 'OSSCredentials' -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100 -@@ -19,7 +19,7 @@ - #/usr/bin/env python - #!-*- coding:utf-8 -*- - import os --import urllib2 -+import urllib3 - import cliError - - -@@ -64,9 +64,9 @@ - print(e) - def _getParamFromUrl(prefix,value,mode): - -- req = urllib2.Request(value) -+ req = urllib3.Request(value) - try: -- response=urllib2.urlopen(req) -+ response=urllib3.urlopen(req) - if response.getcode() == 200: - return response.read() - else: -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100 -@@ -340,8 +340,8 @@ - - - _urllib_error_moved_attributes = [ -- MovedAttribute("URLError", "urllib2", "urllib.error"), -- MovedAttribute("HTTPError", "urllib2", "urllib.error"), -+ MovedAttribute("URLError", "urllib3", "urllib.error"), -+ MovedAttribute("HTTPError", "urllib3", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), - ] - for attr in _urllib_error_moved_attributes: -@@ -359,34 +359,34 @@ - - - _urllib_request_moved_attributes = [ -- MovedAttribute("urlopen", "urllib2", "urllib.request"), -- MovedAttribute("install_opener", "urllib2", "urllib.request"), -- MovedAttribute("build_opener", "urllib2", "urllib.request"), -+ MovedAttribute("urlopen", "urllib3", "urllib.request"), -+ MovedAttribute("install_opener", "urllib3", "urllib.request"), -+ MovedAttribute("build_opener", "urllib3", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), -- MovedAttribute("Request", "urllib2", "urllib.request"), -- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), -- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), -- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), -- MovedAttribute("BaseHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), -- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), -- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), -- MovedAttribute("FileHandler", "urllib2", "urllib.request"), -- MovedAttribute("FTPHandler", "urllib2", "urllib.request"), -- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), -- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), -- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), -+ MovedAttribute("Request", "urllib3", "urllib.request"), -+ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"), -+ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"), -+ MovedAttribute("BaseHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"), -+ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"), -+ MovedAttribute("FileHandler", "urllib3", "urllib.request"), -+ MovedAttribute("FTPHandler", "urllib3", "urllib.request"), -+ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"), -+ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"), -+ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), -diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py ---- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100 -@@ -24,7 +24,7 @@ - - install_requires = [ - 'colorama>=0.2.5,<=0.3.3', -- 'jmespath>=0.7.0,<=0.7.1', -+ 'jmespath>=0.7.0', - ] - def main(): - setup( diff --git a/SOURCES/bz1691456-gcloud-dont-detect-python2.patch b/SOURCES/bz1691456-gcloud-dont-detect-python2.patch deleted file mode 100644 index ae414a6..0000000 --- a/SOURCES/bz1691456-gcloud-dont-detect-python2.patch +++ /dev/null @@ -1,14 +0,0 @@ ---- a/bundled/gcp/google-cloud-sdk/bin/gcloud 1980-01-01 09:00:00.000000000 +0100 -+++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2021-10-14 11:30:17.726138166 +0200 -@@ -128,6 +128,11 @@ - fi - } - -+if [ -z "$CLOUDSDK_PYTHON" ]; then -+ CLOUDSDK_PYTHON="/usr/libexec/platform-python" -+ CLOUDSDK_PYTHON_SITEPACKAGES=1 -+fi -+ - setup_cloudsdk_python - - # $PYTHONHOME can interfere with gcloud. Users should use diff --git a/SOURCES/bz1904465-mysql-common-improve-error-message.patch b/SOURCES/bz1904465-mysql-common-improve-error-message.patch deleted file mode 100644 index 4a19fc4..0000000 --- a/SOURCES/bz1904465-mysql-common-improve-error-message.patch +++ /dev/null @@ -1,68 +0,0 @@ -From fcceb714085836de9db4493b527e94d85dd72626 Mon Sep 17 00:00:00 2001 -From: ut002970 -Date: Wed, 6 Sep 2023 15:27:05 +0800 -Subject: [PATCH 1/3] modify error message - ---- - heartbeat/mysql-common.sh | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh -index 8104019b03..a93acc4c60 100755 ---- a/heartbeat/mysql-common.sh -+++ b/heartbeat/mysql-common.sh -@@ -254,7 +254,7 @@ mysql_common_start() - while [ $start_wait = 1 ]; do - if ! ps $pid > /dev/null 2>&1; then - wait $pid -- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation" -+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation, log message you can check $OCF_RESKEY_log" - return $OCF_ERR_GENERIC - fi - mysql_common_status info - -From 8f9b344cd5b3cb96ea0f94b7ab0306da2234ac00 Mon Sep 17 00:00:00 2001 -From: ut002970 -Date: Wed, 6 Sep 2023 15:56:24 +0800 -Subject: [PATCH 2/3] modify error message - ---- - heartbeat/mysql-common.sh | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh -index a93acc4c60..d5b2286737 100755 ---- a/heartbeat/mysql-common.sh -+++ b/heartbeat/mysql-common.sh -@@ -254,7 +254,7 @@ mysql_common_start() - while [ $start_wait = 1 ]; do - if ! ps $pid > /dev/null 2>&1; then - wait $pid -- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation, log message you can check $OCF_RESKEY_log" -+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), Check $OCF_RESKEY_log for details" - return $OCF_ERR_GENERIC - fi - mysql_common_status info - -From a292b3c552bf3f2beea5f73e0d171546c0a1273c Mon Sep 17 00:00:00 2001 -From: ut002970 -Date: Wed, 6 Sep 2023 16:10:48 +0800 -Subject: [PATCH 3/3] modify error message - ---- - heartbeat/mysql-common.sh | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh -index d5b2286737..d6b4e3cdf4 100755 ---- a/heartbeat/mysql-common.sh -+++ b/heartbeat/mysql-common.sh -@@ -254,7 +254,7 @@ mysql_common_start() - while [ $start_wait = 1 ]; do - if ! ps $pid > /dev/null 2>&1; then - wait $pid -- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), Check $OCF_RESKEY_log for details" -+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?). Check $OCF_RESKEY_log for details" - return $OCF_ERR_GENERIC - fi - mysql_common_status info diff --git a/SOURCES/bz1905820-LVM-activate-fix-return-codes.patch b/SOURCES/bz1905820-LVM-activate-fix-return-codes.patch deleted file mode 100644 index 4597e3f..0000000 --- a/SOURCES/bz1905820-LVM-activate-fix-return-codes.patch +++ /dev/null @@ -1,195 +0,0 @@ -From 640c2b57f0f3e7256d587ddd5960341cb38b1982 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Sun, 13 Dec 2020 14:58:34 -0800 -Subject: [PATCH] LVM-activate: Fix return codes - -OCF_ERR_ARGS should be used when the configuration isn't valid for the -**local** node, and so the resource should not attempt to start again -locally until the issue is corrected. - -OCF_ERR_CONFIGURED should be used when the configuration isn't valid on -**any** node, and so the resource should not attempt to start again -anywhere until the issue is corrected. - -One remaining gray area: Should lvmlockd/lvmetad/clvmd improperly -running (or improperly not running) be an OCF_ERR_GENERIC or -OCF_ERR_ARGS? The fact that it's a state issue rather than a config -issue suggests OCF_ERR_GENERIC. The fact that it won't be fixed without -user intervention suggests OCF_ERR_ARGS. The approach here is to use -GENERIC for all of these. One can make the case that "improperly -running" should use ARGS, since a process must be manually stopped to -fix the issue, and that "improperly not running" should use GENERIC, -since there's a small chance the process died and will be recovered in -some way. - -More info about return code meanings: - - https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Administration/html/agents.html#how-are-ocf-return-codes-interpreted - -Resolves: RHBZ#1905820 - -Signed-off-by: Reid Wahl ---- - heartbeat/LVM-activate | 47 +++++++++++++++++++++--------------------- - 1 file changed, 23 insertions(+), 24 deletions(-) - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index c86606637..e951a08e9 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -333,8 +333,7 @@ config_verify() - real=$(lvmconfig "$name" | cut -d'=' -f2) - if [ "$real" != "$expect" ]; then - ocf_exit_reason "config item $name: expect=$expect but real=$real" -- exit $OCF_ERR_CONFIGURED -- -+ exit $OCF_ERR_ARGS - fi - - return $OCF_SUCCESS -@@ -366,12 +365,12 @@ lvmlockd_check() - fi - - ocf_exit_reason "lvmlockd daemon is not running!" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - if pgrep clvmd >/dev/null 2>&1 ; then - ocf_exit_reason "clvmd daemon is running unexpectedly." -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - return $OCF_SUCCESS -@@ -402,17 +401,17 @@ clvmd_check() - # Good: clvmd is running, and lvmlockd is not running - if ! pgrep clvmd >/dev/null 2>&1 ; then - ocf_exit_reason "clvmd daemon is not running!" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - if pgrep lvmetad >/dev/null 2>&1 ; then - ocf_exit_reason "Please stop lvmetad daemon when clvmd is running." -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - if pgrep lvmlockd >/dev/null 2>&1 ; then - ocf_exit_reason "lvmlockd daemon is running unexpectedly." -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - return $OCF_SUCCESS -@@ -424,12 +423,12 @@ systemid_check() - source=$(lvmconfig 'global/system_id_source' 2>/dev/null | cut -d"=" -f2) - if [ "$source" = "" ] || [ "$source" = "none" ]; then - ocf_exit_reason "system_id_source in lvm.conf is not set correctly!" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_ARGS - fi - - if [ -z ${SYSTEM_ID} ]; then - ocf_exit_reason "local/system_id is not set!" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_ARGS - fi - - return $OCF_SUCCESS -@@ -441,18 +440,18 @@ tagging_check() - # The volume_list must be initialized to something in order to - # guarantee our tag will be filtered on startup - if ! lvm dumpconfig activation/volume_list; then -- ocf_log err "LVM: Improper setup detected" -+ ocf_log err "LVM: Improper setup detected" - ocf_exit_reason "The volume_list filter must be initialized in lvm.conf for exclusive activation without clvmd" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_ARGS - fi - - # Our tag must _NOT_ be in the volume_list. This agent - # overrides the volume_list during activation using the - # special tag reserved for cluster activation - if lvm dumpconfig activation/volume_list | grep -e "\"@${OUR_TAG}\"" -e "\"${VG}\""; then -- ocf_log err "LVM: Improper setup detected" -+ ocf_log err "LVM: Improper setup detected" - ocf_exit_reason "The volume_list in lvm.conf must not contain the cluster tag, \"${OUR_TAG}\", or volume group, ${VG}" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_ARGS - fi - - return $OCF_SUCCESS -@@ -463,13 +462,13 @@ read_parameters() - if [ -z "$VG" ] - then - ocf_exit_reason "You must identify the volume group name!" -- exit $OCF_ERR_ARGS -+ exit $OCF_ERR_CONFIGURED - fi - - if [ "$LV_activation_mode" != "shared" ] && [ "$LV_activation_mode" != "exclusive" ] - then - ocf_exit_reason "Invalid value for activation_mode: $LV_activation_mode" -- exit $OCF_ERR_ARGS -+ exit $OCF_ERR_CONFIGURED - fi - - # Convert VG_access_mode from string to index -@@ -519,8 +518,10 @@ lvm_validate() { - exit $OCF_NOT_RUNNING - fi - -+ # Could be a transient error (e.g., iSCSI connection -+ # issue) so use OCF_ERR_GENERIC - ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!" -- exit $OCF_ERR_CONFIGURED -+ exit $OCF_ERR_GENERIC - fi - - # Inconsistency might be due to missing physical volumes, which doesn't -@@ -549,7 +550,7 @@ lvm_validate() { - mode=$? - if [ $VG_access_mode_num -ne 4 ] && [ $mode -ne $VG_access_mode_num ]; then - ocf_exit_reason "The specified vg_access_mode doesn't match the lock_type on VG metadata!" -- exit $OCF_ERR_ARGS -+ exit $OCF_ERR_CONFIGURED - fi - - # Nothing to do if the VG has no logical volume -@@ -561,11 +562,11 @@ lvm_validate() { - - # Check if the given $LV is in the $VG - if [ -n "$LV" ]; then -- OUT=$(lvs --foreign --noheadings ${VG}/${LV} 2>&1) -+ output=$(lvs --foreign --noheadings ${VG}/${LV} 2>&1) - if [ $? -ne 0 ]; then -- ocf_log err "lvs: ${OUT}" -+ ocf_log err "lvs: ${output}" - ocf_exit_reason "LV ($LV) is not in the given VG ($VG)." -- exit $OCF_ERR_ARGS -+ exit $OCF_ERR_CONFIGURED - fi - fi - -@@ -580,7 +581,6 @@ lvm_validate() { - 3) - systemid_check - ;; -- - 4) - tagging_check - ;; -@@ -808,10 +808,9 @@ lvm_status() { - dd if=${dm_name} of=/dev/null bs=1 count=1 >/dev/null \ - 2>&1 - if [ $? -ne 0 ]; then -- return $OCF_NOT_RUNNING -- else -- return $OCF_SUCCESS -+ return $OCF_ERR_GENERIC - fi -+ return $OCF_SUCCESS - ;; - *) - ocf_exit_reason "unsupported monitor level $OCF_CHECK_LEVEL" diff --git a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-set-domain-parameters-default.patch b/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-set-domain-parameters-default.patch deleted file mode 100644 index 8ee70e5..0000000 --- a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-set-domain-parameters-default.patch +++ /dev/null @@ -1,55 +0,0 @@ -From bb5cfa172ca58cd8adcedcaca92bde54d0645661 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 14 Jul 2022 10:55:19 +0200 -Subject: [PATCH] openstack-agents: set domain parameter's default to Default - and fix missing parameter name in ocf_exit_reason - ---- - heartbeat/openstack-common.sh | 10 +++++++--- - 1 file changed, 7 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh -index b6eec09c..14d290bd 100644 ---- a/heartbeat/openstack-common.sh -+++ b/heartbeat/openstack-common.sh -@@ -1,6 +1,10 @@ -+OCF_RESKEY_user_domain_name_default="Default" -+OCF_RESKEY_project_domain_name_default="Default" - OCF_RESKEY_openstackcli_default="/usr/bin/openstack" - OCF_RESKEY_insecure_default="false" - -+: ${OCF_RESKEY_user_domain_name=${OCF_RESKEY_user_domain_name_default}} -+: ${OCF_RESKEY_project_domain_name=${OCF_RESKEY_project_domain_name_default}} - : ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} - : ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}} - -@@ -64,7 +68,7 @@ Keystone Project. - Keystone User Domain Name. - - Keystone User Domain Name -- -+ - - - -@@ -72,7 +76,7 @@ Keystone User Domain Name. - Keystone Project Domain Name. - - Keystone Project Domain Name -- -+ - - - -@@ -133,7 +137,7 @@ get_config() { - exit $OCF_ERR_CONFIGURED - fi - if [ -z "$OCF_RESKEY_project_domain_name" ]; then -- ocf_exit_reason " not set" -+ ocf_exit_reason "project_domain_name not set" - exit $OCF_ERR_CONFIGURED - fi - --- -2.36.1 - diff --git a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-warn-when-openstackcli-slow.patch b/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-warn-when-openstackcli-slow.patch deleted file mode 100644 index 3f8bf0c..0000000 --- a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-warn-when-openstackcli-slow.patch +++ /dev/null @@ -1,282 +0,0 @@ -From ebea4c3620261c529cad908c0e52064df84b0c61 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 11 Jul 2022 10:28:11 +0200 -Subject: [PATCH] openstack-agents: warn when openstackcli is slow - ---- - heartbeat/openstack-cinder-volume | 19 +++++++++++-------- - heartbeat/openstack-common.sh | 22 ++++++++++++++++++++++ - heartbeat/openstack-floating-ip | 17 ++++++++++------- - heartbeat/openstack-info.in | 20 ++++++++++---------- - heartbeat/openstack-virtual-ip | 20 ++++++++++---------- - 5 files changed, 63 insertions(+), 35 deletions(-) - -diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume -index 19bf04faf..116442c41 100755 ---- a/heartbeat/openstack-cinder-volume -+++ b/heartbeat/openstack-cinder-volume -@@ -113,11 +113,14 @@ _get_node_id() { - } - - osvol_validate() { -+ local result -+ - check_binary "$OCF_RESKEY_openstackcli" - - get_config - -- if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then -+ result=$(run_openstackcli "volume list") -+ if ! echo "$result" | grep -q $OCF_RESKEY_volume_id; then - ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found" - return $OCF_ERR_CONFIGURED - fi -@@ -156,17 +159,17 @@ osvol_monitor() { - # Is the volue attached? - # We use the API - # -- result=$($OCF_RESKEY_openstackcli volume show \ -+ result=$(run_openstackcli "volume show \ - --column status \ - --column attachments \ - --format value \ -- $OCF_RESKEY_volume_id) -+ $OCF_RESKEY_volume_id") - -- if echo "$result" | grep -q available ; then -+ if echo "$result" | grep -q available; then - ocf_log warn "$OCF_RESKEY_volume_id is not attached to any instance" - return $OCF_NOT_RUNNING - else -- export attached_server_id=$(echo $result|head -n1| -+ export attached_server_id=$(echo "$result"|head -n1| - grep -P -o "'server_id': '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}'"| - grep -P -o "[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") - ocf_log info "$OCF_RESKEY_volume_id is attached to instance $attached_server_id" -@@ -199,7 +202,7 @@ osvol_stop() { - # - # Detach the volume - # -- if ! $OCF_RESKEY_openstackcli server remove volume $node_id $OCF_RESKEY_volume_id ; then -+ if ! run_openstackcli "server remove volume $node_id $OCF_RESKEY_volume_id"; then - ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $node_id" - return $OCF_ERR_GENERIC - fi -@@ -225,7 +228,7 @@ osvol_start() { - # TODO: make it optional in case multi-attachment is allowed by Cinder - # - if [ ! -z $attached_server_id ] ; then -- if ! $OCF_RESKEY_openstackcli server remove volume $attached_server_id $OCF_RESKEY_volume_id ; then -+ if ! run_openstackcli "server remove volume $attached_server_id $OCF_RESKEY_volume_id"; then - ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $attached_server_id" - return $OCF_ERR_GENERIC - fi -@@ -238,7 +241,7 @@ osvol_start() { - # - # Attach the volume - # -- $OCF_RESKEY_openstackcli server add volume $node_id $OCF_RESKEY_volume_id -+ run_openstackcli "server add volume $node_id $OCF_RESKEY_volume_id" - if [ $? != $OCF_SUCCESS ]; then - ocf_log error "Couldn't add volume $OCF_RESKEY_volume_id to instance $node_id" - return $OCF_ERR_GENERIC -diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh -index 4763c90db..b6eec09c2 100644 ---- a/heartbeat/openstack-common.sh -+++ b/heartbeat/openstack-common.sh -@@ -145,3 +145,25 @@ get_config() { - OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name" - fi - } -+ -+run_openstackcli() { -+ local cmd="${OCF_RESKEY_openstackcli} $1" -+ local result -+ local rc -+ local start_time=$(date +%s) -+ local end_time -+ local elapsed_time -+ -+ result=$($cmd) -+ rc=$? -+ end_time=$(date +%s) -+ elapsed_time=$(expr $end_time - $start_time) -+ -+ if [ $elapsed_time -gt 20 ]; then -+ ocf_log warn "$cmd took ${elapsed_time}s to complete" -+ fi -+ -+ echo "$result" -+ -+ return $rc -+} -diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip -index 6e2895654..7317f19a8 100755 ---- a/heartbeat/openstack-floating-ip -+++ b/heartbeat/openstack-floating-ip -@@ -101,11 +101,14 @@ END - } - - osflip_validate() { -+ local result -+ - check_binary "$OCF_RESKEY_openstackcli" - - get_config - -- if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then -+ result=$(run_openstackcli "floating ip list") -+ if ! echo "$result" | grep -q $OCF_RESKEY_ip_id; then - ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found" - return $OCF_ERR_CONFIGURED - fi -@@ -132,14 +135,14 @@ osflip_monitor() { - | awk '{gsub("[^ ]*:", "");print}') - - # Is the IP active and attached? -- result=$($OCF_RESKEY_openstackcli floating ip show \ -+ result=$(run_openstackcli "floating ip show \ - --column port_id --column floating_ip_address \ - --format yaml \ -- $OCF_RESKEY_ip_id) -+ $OCF_RESKEY_ip_id") - - for port in $node_port_ids ; do -- if echo $result | grep -q $port ; then -- floating_ip=$(echo $result | awk '/floating_ip_address/ {print $2}') -+ if echo "$result" | grep -q $port ; then -+ floating_ip=$(echo "$result" | awk '/floating_ip_address/ {print $2}') - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_floating_ip -v $floating_ip - - return $OCF_SUCCESS -@@ -160,7 +163,7 @@ osflip_stop() { - return $OCF_SUCCESS - fi - -- if ! $OCF_RESKEY_openstackcli floating ip unset --port $OCF_RESKEY_ip_id ; then -+ if ! run_openstackcli "floating ip unset --port $OCF_RESKEY_ip_id"; then - return $OCF_ERR_GENERIC - fi - -@@ -194,7 +197,7 @@ osflip_start() { - - ocf_log info "Moving IP address $OCF_RESKEY_ip_id to port ID $node_port_id" - -- $OCF_RESKEY_openstackcli floating ip set --port $node_port_id $OCF_RESKEY_ip_id -+ run_openstackcli "floating ip set --port $node_port_id $OCF_RESKEY_ip_id" - if [ $? != $OCF_SUCCESS ]; then - ocf_log error "$OCF_RESKEY_ip_id Cannot be set to port $node_port_id" - return $OCF_ERR_GENERIC -diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in -index f3a59fc7a..6502f1df1 100755 ---- a/heartbeat/openstack-info.in -+++ b/heartbeat/openstack-info.in -@@ -119,9 +119,7 @@ END - ####################################################################### - - OSInfoStats() { -- local result - local value -- local node - local node_id - - get_config -@@ -141,31 +139,33 @@ OSInfoStats() { - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id" - - # Nova data: flavor -- value=$($OCF_RESKEY_openstackcli server show \ -+ value=$(run_openstackcli "server show \ - --format value \ - --column flavor \ -- $node_id) -+ $node_id") - - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value" - - # Nova data: availability zone -- value=$($OCF_RESKEY_openstackcli server show \ -+ value=$(run_openstackcli "server show \ - --format value \ - --column OS-EXT-AZ:availability_zone \ -- $node_id) -+ $node_id") - - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value" - - # Network data: ports - value="" -- for port_id in $($OCF_RESKEY_openstackcli port list \ -+ for port_id in $(run_openstackcli "port list \ - --format value \ - --column id \ -- --server $node_id); do -- subnet_id=$($OCF_RESKEY_openstackcli port show \ -+ --server $node_id"); do -+ subnet_result=$(run_openstackcli "port show \ - --format json \ - --column fixed_ips \ -- ${port_id} | grep -P '\"subnet_id\": \".*\",$' | -+ ${port_id}") -+ subnet_id=$(echo "$subnet_result" | -+ grep -P '\"subnet_id\": \".*\",$' | - grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') - value="${value}${subnet_id}:${port_id}," - done -diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip -index c654d980a..361357d55 100755 ---- a/heartbeat/openstack-virtual-ip -+++ b/heartbeat/openstack-virtual-ip -@@ -132,11 +132,11 @@ osvip_monitor() { - - node_port_id=$(osvip_port_id) - -- result=$($OCF_RESKEY_openstackcli port show \ -+ result=$(run_openstackcli "port show \ - --format value \ - --column allowed_address_pairs \ -- ${node_port_id}) -- if echo $result | grep -q "$OCF_RESKEY_ip"; then -+ ${node_port_id}") -+ if echo "$result" | grep -q "$OCF_RESKEY_ip"; then - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip - - return $OCF_SUCCESS -@@ -158,20 +158,20 @@ osvip_stop() { - return $OCF_SUCCESS - fi - -- mac_address=$($OCF_RESKEY_openstackcli port show \ -+ mac_address=$(run_openstackcli "port show \ - --format value \ - --column mac_address \ -- $node_port_id) -- echo ${mac_address} | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$" -+ $node_port_id") -+ echo "${mac_address}" | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$" - if [ $? -ne 0 ]; then - ocf_log error "MAC address '${mac_address}' is not valid." - return $OCF_ERR_GENERIC - fi - -- if ! $OCF_RESKEY_openstackcli port unset \ -+ if ! run_openstackcli "port unset \ - --allowed-address \ - ip-address=$OCF_RESKEY_ip,mac-address=${mac_address} \ -- $node_port_id; then -+ $node_port_id"; then - return $OCF_ERR_GENERIC - fi - -@@ -196,9 +196,9 @@ osvip_start() { - - ocf_log info "Moving IP address $OCF_RESKEY_ip to port ID $node_port_id" - -- $OCF_RESKEY_openstackcli port set \ -+ run_openstackcli "port set \ - --allowed-address ip-address=$OCF_RESKEY_ip \ -- $node_port_id -+ $node_port_id" - if [ $? != $OCF_SUCCESS ]; then - ocf_log error "$OCF_RESKEY_ip Cannot be set to port $node_port_id" - return $OCF_ERR_GENERIC diff --git a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-update-openstack-agents.patch b/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-update-openstack-agents.patch deleted file mode 100644 index 7b1a6e8..0000000 --- a/SOURCES/bz1908146-bz1908147-bz1908148-bz1949114-update-openstack-agents.patch +++ /dev/null @@ -1,770 +0,0 @@ -diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am ---- a/heartbeat/Makefile.am 2022-03-15 16:14:29.355209012 +0100 -+++ b/heartbeat/Makefile.am 2022-03-15 16:18:35.917048467 +0100 -@@ -217,6 +217,7 @@ - lvm-clvm.sh \ - lvm-plain.sh \ - lvm-tag.sh \ -+ openstack-common.sh \ - ora-common.sh \ - mysql-common.sh \ - nfsserver-redhat.sh \ -diff --color -uNr a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume ---- a/heartbeat/openstack-cinder-volume 2022-03-15 16:14:29.370209063 +0100 -+++ b/heartbeat/openstack-cinder-volume 2022-03-15 16:17:36.231840008 +0100 -@@ -34,11 +34,11 @@ - : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - -+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh -+ - # Defaults --OCF_RESKEY_openstackcli_default="/usr/bin/openstack" - OCF_RESKEY_volume_local_check_default="true" - --: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} - : ${OCF_RESKEY_volume_local_check=${OCF_RESKEY_volume_local_check_default}} - - ####################################################################### -@@ -68,14 +68,11 @@ - Attach a cinder volume - - -- -- --Path to command line tools for openstack. -- --Path to Openstack CLI tool -- -- -+END - -+common_meta_data -+ -+cat < - - This option allows the cluster to monitor the cinder volume presence without -@@ -85,28 +82,19 @@ - - - -- -- --Valid Openstack credentials as openrc file from api_access/openrc. -- --openrc file -- -- -- - - --Cinder volume identifier to use to attach the bloc storage. -+Cinder volume identifier to use to attach the block storage. - - Volume ID - - -- - - - - - -- -+ - - - -@@ -127,17 +115,7 @@ - osvol_validate() { - check_binary "$OCF_RESKEY_openstackcli" - -- if [ -z "$OCF_RESKEY_openrc" ]; then -- ocf_exit_reason "openrc parameter not set" -- return $OCF_ERR_CONFIGURED -- fi -- -- if [ ! -f "$OCF_RESKEY_openrc" ] ; then -- ocf_exit_reason "openrc file not found" -- return $OCF_ERR_CONFIGURED -- fi -- -- . $OCF_RESKEY_openrc -+ get_config - - if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then - ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found" -diff --color -uNr a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh ---- a/heartbeat/openstack-common.sh 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/openstack-common.sh 2022-03-15 16:17:36.232840011 +0100 -@@ -0,0 +1,147 @@ -+OCF_RESKEY_openstackcli_default="/usr/bin/openstack" -+OCF_RESKEY_insecure_default="false" -+ -+: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} -+: ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}} -+ -+if ocf_is_true "${OCF_RESKEY_insecure}"; then -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --insecure" -+fi -+ -+common_meta_data() { -+ cat < -+ -+Openstack cloud (from ~/.config/openstack/clouds.yaml or /etc/openstack/clouds.yaml). -+ -+Cloud from clouds.yaml -+ -+ -+ -+ -+ -+Openstack credentials as openrc file from api_access/openrc. -+ -+openrc file -+ -+ -+ -+ -+ -+Keystone Auth URL -+ -+Keystone Auth URL -+ -+ -+ -+ -+ -+Username. -+ -+Username -+ -+ -+ -+ -+ -+Password. -+ -+Password -+ -+ -+ -+ -+ -+Keystone Project. -+ -+Keystone Project -+ -+ -+ -+ -+ -+Keystone User Domain Name. -+ -+Keystone User Domain Name -+ -+ -+ -+ -+ -+Keystone Project Domain Name. -+ -+Keystone Project Domain Name -+ -+ -+ -+ -+ -+Path to command line tools for openstack. -+ -+Path to Openstack CLI tool -+ -+ -+ -+ -+ -+Allow insecure connections -+ -+Allow insecure connections -+ -+ -+END -+} -+ -+get_config() { -+ if [ -n "$OCF_RESKEY_cloud" ]; then -+ TILDE=$(echo ~) -+ clouds_yaml="$TILDE/.config/openstack/clouds.yaml" -+ if [ ! -f "$clouds_yaml" ]; then -+ clouds_yaml="/etc/openstack/clouds.yaml" -+ fi -+ if [ ! -f "$clouds_yaml" ]; then -+ ocf_exit_reason "~/.config/openstack/clouds.yaml and /etc/openstack/clouds.yaml does not exist" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-cloud $OCF_RESKEY_cloud" -+ elif [ -n "$OCF_RESKEY_openrc" ]; then -+ if [ ! -f "$OCF_RESKEY_openrc" ]; then -+ ocf_exit_reason "$OCF_RESKEY_openrc does not exist" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ . $OCF_RESKEY_openrc -+ else -+ if [ -z "$OCF_RESKEY_auth_url" ]; then -+ ocf_exit_reason "auth_url not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ if [ -z "$OCF_RESKEY_username" ]; then -+ ocf_exit_reason "username not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ if [ -z "$OCF_RESKEY_password" ]; then -+ ocf_exit_reason "password not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ if [ -z "$OCF_RESKEY_project_name" ]; then -+ ocf_exit_reason "project_name not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ if [ -z "$OCF_RESKEY_user_domain_name" ]; then -+ ocf_exit_reason "user_domain_name not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ if [ -z "$OCF_RESKEY_project_domain_name" ]; then -+ ocf_exit_reason " not set" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-auth-url $OCF_RESKEY_auth_url" -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-username $OCF_RESKEY_username" -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-password $OCF_RESKEY_password" -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-name $OCF_RESKEY_project_name" -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-user-domain-name $OCF_RESKEY_user_domain_name" -+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name" -+ fi -+} -diff --color -uNr a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip ---- a/heartbeat/openstack-floating-ip 2022-03-15 16:14:29.370209063 +0100 -+++ b/heartbeat/openstack-floating-ip 2022-03-15 16:17:36.233840014 +0100 -@@ -34,10 +34,9 @@ - : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - --# Defaults --OCF_RESKEY_openstackcli_default="/usr/bin/openstack" -+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh - --: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} -+# Defaults - - ####################################################################### - -@@ -67,22 +66,11 @@ - Move a floating IP - - -- -- --Path to command line tools for openstack. -- --Path to Openstack CLI tool -- -- -+END - -- -- --Valid Openstack credentials as openrc file from api_access/openrc. -- --openrc file -- -- -+common_meta_data - -+cat < - - Floating IP Identifier. -@@ -104,7 +92,7 @@ - - - -- -+ - - - -@@ -115,17 +103,7 @@ - osflip_validate() { - check_binary "$OCF_RESKEY_openstackcli" - -- if [ -z "$OCF_RESKEY_openrc" ]; then -- ocf_exit_reason "openrc parameter not set" -- return $OCF_ERR_CONFIGURED -- fi -- -- if [ ! -f "$OCF_RESKEY_openrc" ] ; then -- ocf_exit_reason "openrc file not found" -- return $OCF_ERR_CONFIGURED -- fi -- -- . $OCF_RESKEY_openrc -+ get_config - - if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then - ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found" -diff --color -uNr a/heartbeat/openstack-info b/heartbeat/openstack-info ---- a/heartbeat/openstack-info 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/openstack-info 2022-03-15 16:17:36.234840018 +0100 -@@ -0,0 +1,270 @@ -+#!/bin/sh -+# -+# -+# OCF resource agent to set attributes from Openstack instance details. -+# It records (in the CIB) various attributes of a node -+# -+# Copyright (c) 2018 Mathieu Grzybek -+# All Rights Reserved. -+# -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of version 2 of the GNU General Public License as -+# published by the Free Software Foundation. -+# -+# This program is distributed in the hope that it would be useful, but -+# WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+# -+# Further, this software is distributed without any warranty that it is -+# free of the rightful claim of any third person regarding infringement -+# or the like. Any license provided herein, whether implied or -+# otherwise, applies only to this software file. Patent licenses, if -+# any, provided herein do not apply to combinations of this program with -+# other software, or any other product whatsoever. -+# -+# You should have received a copy of the GNU General Public License -+# along with this program; if not, write the Free Software Foundation, -+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. -+# -+####################################################################### -+# Initialization: -+ -+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs -+ -+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh -+ -+# Defaults -+OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}" -+OCF_RESKEY_delay_default="0" -+OCF_RESKEY_clone_default="0" -+OCF_RESKEY_curlcli_default="/usr/bin/curl" -+OCF_RESKEY_pythoncli_default="/usr/bin/python" -+ -+: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}} -+: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}} -+: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}} -+: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}} -+: ${OCF_RESKEY_clone=${OCF_RESKEY_clone_default}} -+ -+####################################################################### -+ -+meta_data() { -+ cat < -+ -+ -+1.0 -+ -+ -+OCF resource agent to set attributes from Openstack instance details. -+It records (in the CIB) various attributes of a node. -+Sample output: -+ openstack_az : nova -+ openstack_flavor : c1.small -+ openstack_id : 60ac4343-5828-49b1-8aac-7c69b1417f31 -+ openstack_ports : 7960d889-9750-4160-bf41-c69a41ad72d9:96530d18-57a3-4718-af32-30f2a74c22a2,b0e55a06-bd75-468d-8baa-22cfeb65799f:a55ae917-8016-4b1e-8ffa-04311b9dc7d6 -+ -+The layout of openstack_ports is a comma-separated list of tuples "subnet_id:port_id". -+ -+Records various node attributes in the CIB -+ -+ -+END -+ -+common_meta_data -+ -+ cat < -+PID file -+PID file -+ -+ -+ -+ -+Interval to allow values to stabilize -+Dampening Delay -+ -+ -+ -+ -+ -+Path to command line cURL binary. -+ -+Path to cURL binary -+ -+ -+ -+ -+ -+Path to command line Python interpreter. -+ -+Path to Python interpreter -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+END -+} -+ -+####################################################################### -+ -+OSInfoStats() { -+ local result -+ local value -+ local node -+ local node_id -+ -+ get_config -+ -+ # Nova data: server ID -+ node_id=$($OCF_RESKEY_curlcli \ -+ -s http://169.254.169.254/openstack/latest/meta_data.json | -+ $OCF_RESKEY_pythoncli -m json.tool | -+ grep -P '\"uuid\": \".*\",$' | -+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') -+ -+ if [ $? -ne 0 ] ; then -+ ocf_exit_reason "Cannot find server ID" -+ exit $OCF_ERR_GENERIC -+ fi -+ -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id" -+ -+ # Nova data: flavor -+ value=$($OCF_RESKEY_openstackcli server show \ -+ --format value \ -+ --column flavor \ -+ $node_id) -+ -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value" -+ -+ # Nova data: availability zone -+ value=$($OCF_RESKEY_openstackcli server show \ -+ --format value \ -+ --column OS-EXT-AZ:availability_zone \ -+ $node_id) -+ -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value" -+ -+ # Network data: ports -+ value="" -+ for port_id in $($OCF_RESKEY_openstackcli port list \ -+ --format value \ -+ --column id \ -+ --server $node_id); do -+ subnet_id=$($OCF_RESKEY_openstackcli port show \ -+ --format json \ -+ --column fixed_ips \ -+ ${port_id} | grep -P '\"subnet_id\": \".*\",$' | -+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') -+ value+="${subnet_id}:${port_id}," -+ done -+ value=$(echo ${value} | sed -e 's/,$//g') -+ -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value" -+ -+ if [ ! -z "$OS_REGION_NAME" ] ; then -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_region -v "$OS_REGION_NAME" -+ fi -+ -+ if [ ! -z "$OS_TENANT_ID" ] ; then -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_id -v "$OS_TENANT_ID" -+ -+ if [ ! -z "$OS_TENANT_NAME" ] ; then -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_name -v "$OS_TENANT_NAME" -+ fi -+ else -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_id -v "$OS_PROJECT_ID" -+ -+ if [ ! -z "$OS_PROJECT_NAME" ] ; then -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_name -v "$OS_PROJECT_NAME" -+ fi -+ fi -+ -+} -+ -+OSInfo_usage() { -+ cat < $OCF_RESKEY_pidfile -+ OSInfoStats -+ exit $OCF_SUCCESS -+} -+ -+OSInfo_stop() { -+ rm -f $OCF_RESKEY_pidfile -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_id -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_flavor -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_az -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_ports -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_region -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_id -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_name -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_id -+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_name -+ exit $OCF_SUCCESS -+} -+ -+OSInfo_monitor() { -+ if [ -f "$OCF_RESKEY_pidfile" ] ; then -+ OSInfoStats -+ exit $OCF_RUNNING -+ fi -+ exit $OCF_NOT_RUNNING -+} -+ -+OSInfo_validate() { -+ check_binary "$OCF_RESKEY_curlcli" -+ check_binary "$OCF_RESKEY_openstackcli" -+ check_binary "$OCF_RESKEY_pythoncli" -+ -+ return $OCF_SUCCESS -+} -+ -+if [ $# -ne 1 ]; then -+ OSInfo_usage -+ exit $OCF_ERR_ARGS -+fi -+ -+if [ x != x${OCF_RESKEY_delay} ]; then -+ OCF_RESKEY_delay="-d ${OCF_RESKEY_delay}" -+fi -+ -+case $__OCF_ACTION in -+meta-data) meta_data -+ exit $OCF_SUCCESS -+ ;; -+start) OSInfo_validate || exit $? -+ OSInfo_start -+ ;; -+stop) OSInfo_stop -+ ;; -+monitor) OSInfo_monitor -+ ;; -+validate-all) OSInfo_validate -+ ;; -+usage|help) OSInfo_usage -+ exit $OCF_SUCCESS -+ ;; -+*) OSInfo_usage -+ exit $OCF_ERR_UNIMPLEMENTED -+ ;; -+esac -+ -+exit $? -diff --color -uNr a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in ---- a/heartbeat/openstack-info.in 2022-03-15 16:14:29.370209063 +0100 -+++ b/heartbeat/openstack-info.in 2022-03-15 16:17:36.234840018 +0100 -@@ -32,16 +32,16 @@ - : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - -+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh -+ - # Defaults - OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}" - OCF_RESKEY_delay_default="0" - OCF_RESKEY_clone_default="0" - OCF_RESKEY_curlcli_default="/usr/bin/curl" --OCF_RESKEY_openstackcli_default="/usr/bin/openstack" - OCF_RESKEY_pythoncli_default="@PYTHON@" - - : ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}} --: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} - : ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}} - : ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}} - : ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}} -@@ -70,25 +70,23 @@ - Records various node attributes in the CIB - - -+END -+ -+common_meta_data -+ -+ cat < - PID file - PID file - - -+ - - Interval to allow values to stabilize - Dampening Delay - - - -- -- --Valid Openstack credentials as openrc file from api_access/openrc. -- --openrc file -- -- -- - - - Path to command line cURL binary. -@@ -97,14 +95,6 @@ - - - -- -- --Path to command line tools for openstack. -- --Path to Openstack CLI tool -- -- -- - - - Path to command line Python interpreter. -@@ -116,9 +106,9 @@ - - - -- -- -- -+ -+ -+ - - - -@@ -134,7 +124,7 @@ - local node - local node_id - -- . $OCF_RESKEY_openrc -+ get_config - - # Nova data: server ID - node_id=$($OCF_RESKEY_curlcli \ -@@ -244,16 +234,6 @@ - check_binary "$OCF_RESKEY_openstackcli" - check_binary "$OCF_RESKEY_pythoncli" - -- if [ -z "$OCF_RESKEY_openrc" ]; then -- ocf_exit_reason "openrc parameter not set" -- return $OCF_ERR_CONFIGURED -- fi -- -- if [ ! -f "$OCF_RESKEY_openrc" ] ; then -- ocf_exit_reason "openrc file not found" -- return $OCF_ERR_CONFIGURED -- fi -- - return $OCF_SUCCESS - } - -diff --color -uNr a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip ---- a/heartbeat/openstack-virtual-ip 2022-03-15 16:14:29.370209063 +0100 -+++ b/heartbeat/openstack-virtual-ip 2022-03-15 16:17:36.235840021 +0100 -@@ -34,10 +34,9 @@ - : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - --# Defaults --OCF_RESKEY_openstackcli_default="/usr/bin/openstack" -+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh - --: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}} -+# Defaults - - ####################################################################### - -@@ -68,22 +67,11 @@ - Move a virtual IP - - -- -- --Path to command line tools for openstack. -- --Path to Openstack CLI tool -- -- -+END - -- -- --Valid Openstack credentials as openrc file from api_access/openrc. -- --openrc file -- -- -+common_meta_data - -+cat < - - Virtual IP Address. -@@ -105,7 +93,7 @@ - - - -- -+ - - - -@@ -128,17 +116,7 @@ - osvip_validate() { - check_binary "$OCF_RESKEY_openstackcli" - -- if [ -z "$OCF_RESKEY_openrc" ]; then -- ocf_exit_reason "openrc parameter not set" -- return $OCF_ERR_CONFIGURED -- fi -- -- if [ ! -f "$OCF_RESKEY_openrc" ] ; then -- ocf_exit_reason "openrc file not found" -- return $OCF_ERR_CONFIGURED -- fi -- -- . $OCF_RESKEY_openrc -+ get_config - - ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 - if [ $? -ne 0 ] ; then diff --git a/SOURCES/bz1908146-bz1908147-bz1949114-openstack-agents-fixes.patch b/SOURCES/bz1908146-bz1908147-bz1949114-openstack-agents-fixes.patch deleted file mode 100644 index 451fba7..0000000 --- a/SOURCES/bz1908146-bz1908147-bz1949114-openstack-agents-fixes.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 64f434014bc198055478a139532c7cc133967c5d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 8 Jul 2022 15:41:34 +0200 -Subject: [PATCH] openstack-agents: fixes - -- openstack-cinder-volume: dont do volume_local_check during start/stop-action -- openstack-floating-ip/openstack-virtual-ip: dont fail in validate() - during probe-calls -- openstack-floating-ip: fix awk only catching last id for node_port_ids ---- - heartbeat/openstack-cinder-volume | 2 +- - heartbeat/openstack-floating-ip | 4 ++-- - heartbeat/openstack-virtual-ip | 4 ++-- - 3 files changed, 5 insertions(+), 5 deletions(-) - -diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume -index cc12e58ae..19bf04faf 100755 ---- a/heartbeat/openstack-cinder-volume -+++ b/heartbeat/openstack-cinder-volume -@@ -138,7 +138,7 @@ osvol_monitor() { - - node_id=$(_get_node_id) - -- if ocf_is_true $OCF_RESKEY_volume_local_check ; then -+ if [ "$__OCF_ACTION" = "monitor" ] && ocf_is_true $OCF_RESKEY_volume_local_check ; then - # - # Is the volue attached? - # We check the local devices -diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip -index 8c135cc24..6e2895654 100755 ---- a/heartbeat/openstack-floating-ip -+++ b/heartbeat/openstack-floating-ip -@@ -111,7 +111,7 @@ osflip_validate() { - fi - - ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 -- if [ $? -ne 0 ] ; then -+ if [ $? -ne 0 ] && ! ocf_is_probe; then - ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE" - return $OCF_ERR_GENERIC - fi -@@ -129,7 +129,7 @@ osflip_monitor() { - node_port_ids=$(${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) \ - | awk -F= '{gsub("\"","");print $NF}' \ - | tr ',' ' ' \ -- | awk -F: '{print $NF}') -+ | awk '{gsub("[^ ]*:", "");print}') - - # Is the IP active and attached? - result=$($OCF_RESKEY_openstackcli floating ip show \ -diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip -index a1084c420..c654d980a 100755 ---- a/heartbeat/openstack-virtual-ip -+++ b/heartbeat/openstack-virtual-ip -@@ -119,7 +119,7 @@ osvip_validate() { - get_config - - ${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1 -- if [ $? -ne 0 ] ; then -+ if [ $? -ne 0 ] && ! ocf_is_probe; then - ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE" - return $OCF_ERR_GENERIC - fi -@@ -136,7 +136,7 @@ osvip_monitor() { - --format value \ - --column allowed_address_pairs \ - ${node_port_id}) -- if echo $result | grep -q $OCF_RESKEY_ip ; then -+ if echo $result | grep -q "$OCF_RESKEY_ip"; then - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip - - return $OCF_SUCCESS diff --git a/SOURCES/bz1908148-openstack-info-fix-bashism.patch b/SOURCES/bz1908148-openstack-info-fix-bashism.patch deleted file mode 100644 index 4f78d54..0000000 --- a/SOURCES/bz1908148-openstack-info-fix-bashism.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 8b1d3257e5176a2f50a843a21888c4b4f51f370b Mon Sep 17 00:00:00 2001 -From: Valentin Vidic -Date: Sun, 3 Apr 2022 20:31:50 +0200 -Subject: [PATCH] openstack-info: fix bashism - -Also simplify striping of trailing comma. ---- - heartbeat/openstack-info.in | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in -index f6dc1ee4d..f3a59fc7a 100755 ---- a/heartbeat/openstack-info.in -+++ b/heartbeat/openstack-info.in -@@ -167,9 +167,9 @@ OSInfoStats() { - --column fixed_ips \ - ${port_id} | grep -P '\"subnet_id\": \".*\",$' | - grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}') -- value+="${subnet_id}:${port_id}," -+ value="${value}${subnet_id}:${port_id}," - done -- value=$(echo ${value} | sed -e 's/,$//g') -+ value=${value%,} - - ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value" - diff --git a/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch b/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch deleted file mode 100644 index b9ed544..0000000 --- a/SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch +++ /dev/null @@ -1,52 +0,0 @@ -From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001 -From: Georg Brandl -Date: Thu, 10 Dec 2020 08:19:21 +0100 -Subject: [PATCH] fixes #1625: infinite loop in SML lexer - -Reason was a lookahead-only pattern which was included in the state -where the lookahead was transitioning to. ---- - pygments/lexers/ml.py | 12 ++++++------ - 2 files changed, 14 insertions(+), 6 deletions(-) - -diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py -index 8ca8ce3eb..f2ac367c5 100644 ---- a/pygments/lexers/ml.py -+++ b/pygments/lexers/ml.py -@@ -142,7 +142,7 @@ def id_callback(self, match): - (r'#\s+(%s)' % symbolicid_re, Name.Label), - # Some reserved words trigger a special, local lexer state change - (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), -- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), -+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'), - (r'\b(functor|include|open|signature|structure)\b(?!\')', - Keyword.Reserved, 'sname'), - (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), -@@ -315,15 +315,14 @@ def id_callback(self, match): - 'ename': [ - include('whitespace'), - -- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, -+ (r'(and\b)(\s+)(%s)' % alphanumid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), -- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, -+ (r'(and\b)(\s*)(%s)' % symbolicid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(of)\b(?!\')', Keyword.Reserved), -+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class), - -- include('breakout'), -- include('core'), -- (r'\S+', Error), -+ default('#pop'), - ], - - 'datcon': [ -@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer): - ], - } - -+ - class OpaLexer(RegexLexer): - """ - Lexer for the Opa language (http://opalang.org). diff --git a/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch b/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch deleted file mode 100644 index d28028c..0000000 --- a/SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch +++ /dev/null @@ -1,138 +0,0 @@ -From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001 -From: Georg Brandl -Date: Mon, 11 Jan 2021 09:46:34 +0100 -Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben - Caller/Doyensec - ---- - pygments/lexers/archetype.py | 2 +- - pygments/lexers/factor.py | 4 ++-- - pygments/lexers/jvm.py | 1 - - pygments/lexers/matlab.py | 6 +++--- - pygments/lexers/objective.py | 4 ++-- - pygments/lexers/templates.py | 2 +- - pygments/lexers/varnish.py | 2 +- - 8 files changed, 14 insertions(+), 12 deletions(-) - -diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py -index 65046613d..26f5ea8c9 100644 ---- a/pygments/lexers/archetype.py -+++ b/pygments/lexers/archetype.py -@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer): - (r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|' - r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date), - (r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), -- (r'[+-]?(\d+)*\.\d+%?', Number.Float), -+ (r'[+-]?\d*\.\d+%?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[+-]?\d+%?', Number.Integer), - ], -diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py -index be7b30dff..9200547f9 100644 ---- a/pygments/lexers/factor.py -+++ b/pygments/lexers/factor.py -@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer): - (r'(?:)\s', Keyword.Namespace), - - # strings -- (r'"""\s+(?:.|\n)*?\s+"""', String), -+ (r'"""\s(?:.|\n)*?\s"""', String), - (r'"(?:\\\\|\\"|[^"])*"', String), - (r'\S+"\s+(?:\\\\|\\"|[^"])*"', String), - (r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char), -@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer): - 'slots': [ - (r'\s+', Text), - (r';\s', Keyword, '#pop'), -- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)', -+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)', - bygroups(Text, Name.Variable, Text)), - (r'\S+', Name.Variable), - ], -diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py -index 62dfd45e5..9a9397c2d 100644 ---- a/pygments/lexers/jvm.py -+++ b/pygments/lexers/jvm.py -@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer): - (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - (r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char), -- (r'".*``.*``.*"', String.Interpol), - (r'(\.)([a-z_]\w*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_]\w*:', Name.Label), -diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py -index 4823c6a7e..578848623 100644 ---- a/pygments/lexers/matlab.py -+++ b/pygments/lexers/matlab.py -@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer): - (r'.', Comment.Multiline), - ], - 'deffunc': [ -- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', -+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', - bygroups(Whitespace, Text, Whitespace, Punctuation, - Whitespace, Name.Function, Punctuation, Text, - Punctuation, Whitespace), '#pop'), -@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer): - (r"[^']*'", String, '#pop'), - ], - 'deffunc': [ -- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', -+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', - bygroups(Whitespace, Text, Whitespace, Punctuation, - Whitespace, Name.Function, Punctuation, Text, - Punctuation, Whitespace), '#pop'), -@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer): - (r'.', String, '#pop'), - ], - 'deffunc': [ -- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', -+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', - bygroups(Whitespace, Text, Whitespace, Punctuation, - Whitespace, Name.Function, Punctuation, Text, - Punctuation, Whitespace), '#pop'), -diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py -index 34e4062f6..38ac9bb05 100644 ---- a/pygments/lexers/objective.py -+++ b/pygments/lexers/objective.py -@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer): - 'logos_classname'), - (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', - bygroups(Keyword, Text, Name.Class)), -- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', -+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)', - bygroups(Keyword, Text, Name.Variable, Text, String, Text)), - (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), - 'function'), -- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', -+ (r'(%new)(\s*)(\()(.*?)(\))', - bygroups(Keyword, Text, Keyword, String, Keyword)), - (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), - inherit, -diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py -index 33c06c4c4..5c3346b4c 100644 ---- a/pygments/lexers/templates.py -+++ b/pygments/lexers/templates.py -@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer): - # see doc for handling first name arg: /directives/evoque/ - # + minor inconsistency: the "name" in e.g. $overlay{name=site_base} - # should be using(PythonLexer), not passed out as String -- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?' -+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?' - r'(.*?)((?(4)%)\})', - bygroups(Punctuation, Name.Builtin, Punctuation, None, - String, using(PythonLexer), Punctuation)), -diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py -index 23653f7a1..9d358bd7c 100644 ---- a/pygments/lexers/varnish.py -+++ b/pygments/lexers/varnish.py -@@ -61,7 +61,7 @@ def analyse_text(text): - bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)), - (r'(\.probe)(\s*=\s*)(\{)', - bygroups(Name.Attribute, Operator, Punctuation), 'probe'), -- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)', -+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)', - bygroups(Name.Attribute, Operator, using(this), Punctuation)), - (r'\{', Punctuation, '#push'), - (r'\}', Punctuation, '#pop'), diff --git a/SOURCES/bz1977012-azure-events-az-new-ra.patch b/SOURCES/bz1977012-azure-events-az-new-ra.patch deleted file mode 100644 index 88c7781..0000000 --- a/SOURCES/bz1977012-azure-events-az-new-ra.patch +++ /dev/null @@ -1,903 +0,0 @@ -From 5dcd5153f0318e4766f7f4d3e61dfdb4b352c39c Mon Sep 17 00:00:00 2001 -From: MSSedusch -Date: Mon, 30 May 2022 15:08:10 +0200 -Subject: [PATCH 1/2] add new Azure Events AZ resource agent - ---- - .gitignore | 1 + - configure.ac | 8 + - doc/man/Makefile.am | 4 + - heartbeat/Makefile.am | 4 + - heartbeat/azure-events-az.in | 782 +++++++++++++++++++++++++++++++++++ - 5 files changed, 799 insertions(+) - create mode 100644 heartbeat/azure-events-az.in - -diff --git a/.gitignore b/.gitignore -index 0c259b5cf..e2b7c039c 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -54,6 +54,7 @@ heartbeat/Squid - heartbeat/SysInfo - heartbeat/aws-vpc-route53 - heartbeat/azure-events -+heartbeat/azure-events-az - heartbeat/clvm - heartbeat/conntrackd - heartbeat/dnsupdate -diff --git a/configure.ac b/configure.ac -index eeecfad0e..5716a2be2 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -523,6 +523,13 @@ if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then - fi - AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) - -+BUILD_AZURE_EVENTS_AZ=1 -+if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then -+ BUILD_AZURE_EVENTS_AZ=0 -+ AC_MSG_WARN("Not building azure-events-az") -+fi -+AM_CONDITIONAL(BUILD_AZURE_EVENTS_AZ, test $BUILD_AZURE_EVENTS_AZ -eq 1) -+ - BUILD_GCP_PD_MOVE=1 - if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then - BUILD_GCP_PD_MOVE=0 -@@ -976,6 +983,7 @@ rgmanager/Makefile \ - - dnl Files we output that need to be executable - AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) -+AC_CONFIG_FILES([heartbeat/azure-events-az], [chmod +x heartbeat/azure-events-az]) - AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) - AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) - AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) -diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am -index cd8fd16bf..658c700ac 100644 ---- a/doc/man/Makefile.am -+++ b/doc/man/Makefile.am -@@ -219,6 +219,10 @@ if BUILD_AZURE_EVENTS - man_MANS += ocf_heartbeat_azure-events.7 - endif - -+if BUILD_AZURE_EVENTS_AZ -+man_MANS += ocf_heartbeat_azure-events-az.7 -+endif -+ - if BUILD_GCP_PD_MOVE - man_MANS += ocf_heartbeat_gcp-pd-move.7 - endif -diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am -index 20d41e36a..1133dc13e 100644 ---- a/heartbeat/Makefile.am -+++ b/heartbeat/Makefile.am -@@ -188,6 +188,10 @@ if BUILD_AZURE_EVENTS - ocf_SCRIPTS += azure-events - endif - -+if BUILD_AZURE_EVENTS_AZ -+ocf_SCRIPTS += azure-events-az -+endif -+ - if BUILD_GCP_PD_MOVE - ocf_SCRIPTS += gcp-pd-move - endif -diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in -new file mode 100644 -index 000000000..616fc8d9e ---- /dev/null -+++ b/heartbeat/azure-events-az.in -@@ -0,0 +1,782 @@ -+#!@PYTHON@ -tt -+# -+# Resource agent for monitoring Azure Scheduled Events -+# -+# License: GNU General Public License (GPL) -+# (c) 2018 Tobias Niekamp, Microsoft Corp. -+# and Linux-HA contributors -+ -+import os -+import sys -+import time -+import subprocess -+import json -+try: -+ import urllib2 -+ from urllib2 import URLError -+except ImportError: -+ import urllib.request as urllib2 -+ from urllib.error import URLError -+import socket -+from collections import defaultdict -+ -+OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) -+sys.path.append(OCF_FUNCTIONS_DIR) -+import ocf -+ -+############################################################################## -+ -+ -+VERSION = "0.10" -+USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro()) -+ -+attr_globalPullState = "azure-events-az_globalPullState" -+attr_lastDocVersion = "azure-events-az_lastDocVersion" -+attr_curNodeState = "azure-events-az_curNodeState" -+attr_pendingEventIDs = "azure-events-az_pendingEventIDs" -+attr_healthstate = "#health-azure" -+ -+default_loglevel = ocf.logging.INFO -+default_relevantEventTypes = set(["Reboot", "Redeploy"]) -+ -+global_pullMaxAttempts = 3 -+global_pullDelaySecs = 1 -+ -+############################################################################## -+ -+class attrDict(defaultdict): -+ """ -+ A wrapper for accessing dict keys like an attribute -+ """ -+ def __init__(self, data): -+ super(attrDict, self).__init__(attrDict) -+ for d in data.keys(): -+ self.__setattr__(d, data[d]) -+ -+ def __getattr__(self, key): -+ try: -+ return self[key] -+ except KeyError: -+ raise AttributeError(key) -+ -+ def __setattr__(self, key, value): -+ self[key] = value -+ -+############################################################################## -+ -+class azHelper: -+ """ -+ Helper class for Azure's metadata API (including Scheduled Events) -+ """ -+ metadata_host = "http://169.254.169.254/metadata" -+ instance_api = "instance" -+ events_api = "scheduledevents" -+ api_version = "2019-08-01" -+ -+ @staticmethod -+ def _sendMetadataRequest(endpoint, postData=None): -+ """ -+ Send a request to Azure's Azure Metadata Service API -+ """ -+ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) -+ data = "" -+ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) -+ ocf.logger.debug("_sendMetadataRequest: url = %s" % url) -+ -+ if postData and type(postData) != bytes: -+ postData = postData.encode() -+ -+ req = urllib2.Request(url, postData) -+ req.add_header("Metadata", "true") -+ req.add_header("User-Agent", USER_AGENT) -+ try: -+ resp = urllib2.urlopen(req) -+ except URLError as e: -+ if hasattr(e, 'reason'): -+ ocf.logger.warning("Failed to reach the server: %s" % e.reason) -+ clusterHelper.setAttr(attr_globalPullState, "IDLE") -+ elif hasattr(e, 'code'): -+ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) -+ clusterHelper.setAttr(attr_globalPullState, "IDLE") -+ else: -+ data = resp.read() -+ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) -+ -+ if data: -+ data = json.loads(data) -+ -+ ocf.logger.debug("_sendMetadataRequest: finished") -+ return data -+ -+ @staticmethod -+ def getInstanceInfo(): -+ """ -+ Fetch details about the current VM from Azure's Azure Metadata Service API -+ """ -+ ocf.logger.debug("getInstanceInfo: begin") -+ -+ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) -+ ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) -+ -+ if jsondata: -+ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) -+ return attrDict(jsondata["compute"]) -+ else: -+ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") -+ sys.exit(ocf.OCF_ERR_GENERIC) -+ -+ @staticmethod -+ def pullScheduledEvents(): -+ """ -+ Retrieve all currently scheduled events via Azure Metadata Service API -+ """ -+ ocf.logger.debug("pullScheduledEvents: begin") -+ -+ jsondata = azHelper._sendMetadataRequest(azHelper.events_api) -+ ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata) -+ -+ ocf.logger.debug("pullScheduledEvents: finished") -+ return attrDict(jsondata) -+ -+ @staticmethod -+ def forceEvents(eventIDs): -+ """ -+ Force a set of events to start immediately -+ """ -+ ocf.logger.debug("forceEvents: begin") -+ -+ events = [] -+ for e in eventIDs: -+ events.append({ -+ "EventId": e, -+ }) -+ postData = { -+ "StartRequests" : events -+ } -+ ocf.logger.info("forceEvents: postData = %s" % postData) -+ resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData)) -+ -+ ocf.logger.debug("forceEvents: finished") -+ return -+ -+############################################################################## -+ -+class clusterHelper: -+ """ -+ Helper functions for Pacemaker control via crm -+ """ -+ @staticmethod -+ def _getLocation(node): -+ """ -+ Helper function to retrieve local/global attributes -+ """ -+ if node: -+ return ["--node", node] -+ else: -+ return ["--type", "crm_config"] -+ -+ @staticmethod -+ def _exec(command, *args): -+ """ -+ Helper function to execute a UNIX command -+ """ -+ args = list(args) -+ ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args))) -+ -+ def flatten(*n): -+ return (str(e) for a in n -+ for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),))) -+ command = list(flatten([command] + args)) -+ ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) -+ try: -+ ret = subprocess.check_output(command) -+ if type(ret) != str: -+ ret = ret.decode() -+ ocf.logger.debug("_exec: return = %s" % ret) -+ return ret.rstrip() -+ except Exception as err: -+ ocf.logger.exception(err) -+ return None -+ -+ @staticmethod -+ def setAttr(key, value, node=None): -+ """ -+ Set the value of a specific global/local attribute in the Pacemaker cluster -+ """ -+ ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node)) -+ -+ if value: -+ ret = clusterHelper._exec("crm_attribute", -+ "--name", key, -+ "--update", value, -+ clusterHelper._getLocation(node)) -+ else: -+ ret = clusterHelper._exec("crm_attribute", -+ "--name", key, -+ "--delete", -+ clusterHelper._getLocation(node)) -+ -+ ocf.logger.debug("setAttr: finished") -+ return len(ret) == 0 -+ -+ @staticmethod -+ def getAttr(key, node=None): -+ """ -+ Retrieve a global/local attribute from the Pacemaker cluster -+ """ -+ ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node)) -+ -+ val = clusterHelper._exec("crm_attribute", -+ "--name", key, -+ "--query", "--quiet", -+ "--default", "", -+ clusterHelper._getLocation(node)) -+ ocf.logger.debug("getAttr: finished") -+ if not val: -+ return None -+ return val if not val.isdigit() else int(val) -+ -+ @staticmethod -+ def getAllNodes(): -+ """ -+ Get a list of hostnames for all nodes in the Pacemaker cluster -+ """ -+ ocf.logger.debug("getAllNodes: begin") -+ -+ nodes = [] -+ nodeList = clusterHelper._exec("crm_node", "--list") -+ for n in nodeList.split("\n"): -+ nodes.append(n.split()[1]) -+ ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) -+ -+ return nodes -+ -+ @staticmethod -+ def getHostNameFromAzName(azName): -+ """ -+ Helper function to get the actual host name from an Azure node name -+ """ -+ return clusterHelper.getAttr("hostName_%s" % azName) -+ -+ @staticmethod -+ def removeHoldFromNodes(): -+ """ -+ Remove the ON_HOLD state from all nodes in the Pacemaker cluster -+ """ -+ ocf.logger.debug("removeHoldFromNodes: begin") -+ -+ for n in clusterHelper.getAllNodes(): -+ if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD": -+ clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n) -+ ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n) -+ -+ ocf.logger.debug("removeHoldFromNodes: finished") -+ return False -+ -+ @staticmethod -+ def otherNodesAvailable(exceptNode): -+ """ -+ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE -+ """ -+ ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode) -+ -+ for n in clusterHelper.getAllNodes(): -+ state = clusterHelper.getAttr(attr_curNodeState, node=n) -+ state = stringToNodeState(state) if state else AVAILABLE -+ if state == AVAILABLE and n != exceptNode.hostName: -+ ocf.logger.info("otherNodesAvailable: at least %s is available" % n) -+ ocf.logger.debug("otherNodesAvailable: finished") -+ return True -+ ocf.logger.info("otherNodesAvailable: no other nodes are available") -+ ocf.logger.debug("otherNodesAvailable: finished") -+ -+ return False -+ -+ @staticmethod -+ def transitionSummary(): -+ """ -+ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) -+ """ -+ # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? -+ # # crm_simulate -Ls -+ # Transition Summary: -+ # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) -+ # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) -+ # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1) -+ # * Start rsc_nc_HN1_HDB03 (hsr3-db1) -+ # # Excepted result when there are no pending actions: -+ # Transition Summary: -+ ocf.logger.debug("transitionSummary: begin") -+ -+ summary = clusterHelper._exec("crm_simulate", "-Ls") -+ if not summary: -+ ocf.logger.warning("transitionSummary: could not load transition summary") -+ return False -+ if summary.find("Transition Summary:") < 0: -+ ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) -+ return False -+ summary = summary.split("Transition Summary:")[1] -+ ret = summary.split("\n").pop(0) -+ -+ ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) -+ return ret -+ -+ @staticmethod -+ def listOperationsOnNode(node): -+ """ -+ Get a list of all current operations for a given node (used to check if any resources are pending) -+ """ -+ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0 -+ # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete -+ # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete -+ # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending -+ # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete -+ ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node) -+ -+ resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node) -+ if len(resources) == 0: -+ ret = [] -+ else: -+ ret = resources.split("\n") -+ -+ ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) -+ return ret -+ -+ @staticmethod -+ def noPendingResourcesOnNode(node): -+ """ -+ Check that there are no pending resources on a given node -+ """ -+ ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node) -+ -+ for r in clusterHelper.listOperationsOnNode(node): -+ ocf.logger.debug("noPendingResourcesOnNode: * %s" % r) -+ resource = r.split()[-1] -+ if resource == "pending": -+ ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource) -+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = False") -+ return False -+ ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node) -+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = True") -+ -+ return True -+ -+ @staticmethod -+ def allResourcesStoppedOnNode(node): -+ """ -+ Check that all resources on a given node are stopped -+ """ -+ ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node) -+ -+ if clusterHelper.noPendingResourcesOnNode(node): -+ if len(clusterHelper.transitionSummary()) == 0: -+ ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node) -+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True") -+ return True -+ ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty") -+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") -+ return False -+ -+ ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node) -+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") -+ return False -+ -+############################################################################## -+ -+AVAILABLE = 0 # Node is online and ready to handle events -+STOPPING = 1 # Standby has been triggered, but some resources are still running -+IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service -+ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available -+ -+def stringToNodeState(name): -+ if type(name) == int: return name -+ if name == "STOPPING": return STOPPING -+ if name == "IN_EVENT": return IN_EVENT -+ if name == "ON_HOLD": return ON_HOLD -+ return AVAILABLE -+ -+def nodeStateToString(state): -+ if state == STOPPING: return "STOPPING" -+ if state == IN_EVENT: return "IN_EVENT" -+ if state == ON_HOLD: return "ON_HOLD" -+ return "AVAILABLE" -+ -+############################################################################## -+ -+class Node: -+ """ -+ Core class implementing logic for a cluster node -+ """ -+ def __init__(self, ra): -+ self.raOwner = ra -+ self.azInfo = azHelper.getInstanceInfo() -+ self.azName = self.azInfo.name -+ self.hostName = socket.gethostname() -+ self.setAttr("azName", self.azName) -+ clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName) -+ -+ def getAttr(self, key): -+ """ -+ Get a local attribute -+ """ -+ return clusterHelper.getAttr(key, node=self.hostName) -+ -+ def setAttr(self, key, value): -+ """ -+ Set a local attribute -+ """ -+ return clusterHelper.setAttr(key, value, node=self.hostName) -+ -+ def selfOrOtherNode(self, node): -+ """ -+ Helper function to distinguish self/other node -+ """ -+ return node if node else self.hostName -+ -+ def setState(self, state, node=None): -+ """ -+ Set the state for a given node (or self) -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state))) -+ -+ clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node) -+ -+ ocf.logger.debug("setState: finished") -+ -+ def getState(self, node=None): -+ """ -+ Get the state for a given node (or self) -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("getState: begin; node = %s" % node) -+ -+ state = clusterHelper.getAttr(attr_curNodeState, node=node) -+ ocf.logger.debug("getState: state = %s" % state) -+ ocf.logger.debug("getState: finished") -+ if not state: -+ return AVAILABLE -+ return stringToNodeState(state) -+ -+ def setEventIDs(self, eventIDs, node=None): -+ """ -+ Set pending EventIDs for a given node (or self) -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs))) -+ -+ if eventIDs: -+ eventIDStr = ",".join(eventIDs) -+ else: -+ eventIDStr = None -+ clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node) -+ -+ ocf.logger.debug("setEventIDs: finished") -+ return -+ -+ def getEventIDs(self, node=None): -+ """ -+ Get pending EventIDs for a given node (or self) -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("getEventIDs: begin; node = %s" % node) -+ -+ eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) -+ if eventIDStr: -+ eventIDs = eventIDStr.split(",") -+ else: -+ eventIDs = None -+ -+ ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs)) -+ return eventIDs -+ -+ def updateNodeStateAndEvents(self, state, eventIDs, node=None): -+ """ -+ Set the state and pending EventIDs for a given node (or self) -+ """ -+ ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs))) -+ -+ self.setState(state, node=node) -+ self.setEventIDs(eventIDs, node=node) -+ -+ ocf.logger.debug("updateNodeStateAndEvents: finished") -+ return state -+ -+ def putNodeStandby(self, node=None): -+ """ -+ Put self to standby -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("putNodeStandby: begin; node = %s" % node) -+ -+ clusterHelper._exec("crm_attribute", -+ "--node", node, -+ "--name", attr_healthstate, -+ "--update", "-1000000", -+ "--lifetime=forever") -+ -+ ocf.logger.debug("putNodeStandby: finished") -+ -+ def isNodeInStandby(self, node=None): -+ """ -+ check if node is in standby -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("isNodeInStandby: begin; node = %s" % node) -+ isInStandy = False -+ -+ healthAttributeStr = clusterHelper.getAttr(attr_healthstate, node) -+ if healthAttributeStr is not None: -+ try: -+ healthAttribute = int(healthAttributeStr) -+ isInStandy = healthAttribute < 0 -+ except ValueError: -+ # Handle the exception -+ ocf.logger.warn("Health attribute %s on node %s cannot be converted to an integer value" % (healthAttributeStr, node)) -+ -+ ocf.logger.debug("isNodeInStandby: finished - result %s" % isInStandy) -+ return isInStandy -+ -+ def putNodeOnline(self, node=None): -+ """ -+ Put self back online -+ """ -+ node = self.selfOrOtherNode(node) -+ ocf.logger.debug("putNodeOnline: begin; node = %s" % node) -+ -+ clusterHelper._exec("crm_attribute", -+ "--node", node, -+ "--name", "#health-azure", -+ "--update", "0", -+ "--lifetime=forever") -+ -+ ocf.logger.debug("putNodeOnline: finished") -+ -+ def separateEvents(self, events): -+ """ -+ Split own/other nodes' events -+ """ -+ ocf.logger.debug("separateEvents: begin; events = %s" % str(events)) -+ -+ localEvents = [] -+ remoteEvents = [] -+ for e in events: -+ e = attrDict(e) -+ if e.EventType not in self.raOwner.relevantEventTypes: -+ continue -+ if self.azName in e.Resources: -+ localEvents.append(e) -+ else: -+ remoteEvents.append(e) -+ ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents))) -+ return (localEvents, remoteEvents) -+ -+############################################################################## -+ -+class raAzEvents: -+ """ -+ Main class for resource agent -+ """ -+ def __init__(self, relevantEventTypes): -+ self.node = Node(self) -+ self.relevantEventTypes = relevantEventTypes -+ -+ def monitor(self): -+ ocf.logger.debug("monitor: begin") -+ -+ events = azHelper.pullScheduledEvents() -+ -+ # get current document version -+ curDocVersion = events.DocumentIncarnation -+ lastDocVersion = self.node.getAttr(attr_lastDocVersion) -+ ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion)) -+ -+ # split events local/remote -+ (localEvents, remoteEvents) = self.node.separateEvents(events.Events) -+ -+ # ensure local events are only executing once -+ if curDocVersion == lastDocVersion: -+ ocf.logger.info("monitor: already handled curDocVersion, skip") -+ return ocf.OCF_SUCCESS -+ -+ localAzEventIDs = set() -+ for e in localEvents: -+ localAzEventIDs.add(e.EventId) -+ -+ curState = self.node.getState() -+ clusterEventIDs = self.node.getEventIDs() -+ -+ ocf.logger.debug("monitor: curDocVersion has not been handled yet") -+ -+ if clusterEventIDs: -+ # there are pending events set, so our state must be STOPPING or IN_EVENT -+ i = 0; touchedEventIDs = False -+ while i < len(clusterEventIDs): -+ # clean up pending events that are already finished according to AZ -+ if clusterEventIDs[i] not in localAzEventIDs: -+ ocf.logger.info("monitor: remove finished local clusterEvent %s" % (clusterEventIDs[i])) -+ clusterEventIDs.pop(i) -+ touchedEventIDs = True -+ else: -+ i += 1 -+ if len(clusterEventIDs) > 0: -+ # there are still pending events (either because we're still stopping, or because the event is still in place) -+ # either way, we need to wait -+ if touchedEventIDs: -+ ocf.logger.info("monitor: added new local clusterEvent %s" % str(clusterEventIDs)) -+ self.node.setEventIDs(clusterEventIDs) -+ else: -+ ocf.logger.info("monitor: no local clusterEvents were updated") -+ else: -+ # there are no more pending events left after cleanup -+ if clusterHelper.noPendingResourcesOnNode(self.node.hostName): -+ # and no pending resources on the node -> set it back online -+ ocf.logger.info("monitor: all local events finished -> clean up, put node online and AVAILABLE") -+ curState = self.node.updateNodeStateAndEvents(AVAILABLE, None) -+ self.node.putNodeOnline() -+ clusterHelper.removeHoldFromNodes() -+ # If Azure Scheduled Events are not used for 24 hours (e.g. because the cluster was asleep), it will be disabled for a VM. -+ # When the cluster wakes up and starts using it again, the DocumentIncarnation is reset. -+ # We need to remove it during cleanup, otherwise azure-events-az will not process the event after wakeup -+ self.node.setAttr(attr_lastDocVersion, None) -+ else: -+ ocf.logger.info("monitor: all local events finished, but some resources have not completed startup yet -> wait") -+ else: -+ if curState == AVAILABLE: -+ if len(localAzEventIDs) > 0: -+ if clusterHelper.otherNodesAvailable(self.node): -+ ocf.logger.info("monitor: can handle local events %s -> set state STOPPING" % (str(localAzEventIDs))) -+ curState = self.node.updateNodeStateAndEvents(STOPPING, localAzEventIDs) -+ else: -+ ocf.logger.info("monitor: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(localAzEventIDs)) -+ self.node.setState(ON_HOLD) -+ else: -+ ocf.logger.debug("monitor: no local azEvents to handle") -+ -+ if curState == STOPPING: -+ eventIDsForNode = {} -+ if clusterHelper.noPendingResourcesOnNode(self.node.hostName): -+ if not self.node.isNodeInStandby(): -+ ocf.logger.info("monitor: all local resources are started properly -> put node standby and exit") -+ self.node.putNodeStandby() -+ return ocf.OCF_SUCCESS -+ -+ for e in localEvents: -+ ocf.logger.info("monitor: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources))) -+ # before we can force an event to start, we need to ensure all nodes involved have stopped their resources -+ if e.EventStatus == "Scheduled": -+ allNodesStopped = True -+ for azName in e.Resources: -+ hostName = clusterHelper.getHostNameFromAzName(azName) -+ state = self.node.getState(node=hostName) -+ if state == STOPPING: -+ # the only way we can continue is when node state is STOPPING, but all resources have been stopped -+ if not clusterHelper.allResourcesStoppedOnNode(hostName): -+ ocf.logger.info("monitor: (at least) node %s has still resources running -> wait" % hostName) -+ allNodesStopped = False -+ break -+ elif state in (AVAILABLE, IN_EVENT, ON_HOLD): -+ ocf.logger.info("monitor: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state))) -+ allNodesStopped = False -+ break -+ if allNodesStopped: -+ ocf.logger.info("monitor: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId)) -+ for n in e.Resources: -+ hostName = clusterHelper.getHostNameFromAzName(n) -+ if hostName in eventIDsForNode: -+ eventIDsForNode[hostName].append(e.EventId) -+ else: -+ eventIDsForNode[hostName] = [e.EventId] -+ elif e.EventStatus == "Started": -+ ocf.logger.info("monitor: remote event already started") -+ -+ # force the start of all events whose nodes are ready (i.e. have no more resources running) -+ if len(eventIDsForNode.keys()) > 0: -+ eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist]) -+ ocf.logger.info("monitor: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce))) -+ for node, eventId in eventIDsForNode.items(): -+ self.node.updateNodeStateAndEvents(IN_EVENT, eventId, node=node) -+ azHelper.forceEvents(eventIDsToForce) -+ self.node.setAttr(attr_lastDocVersion, curDocVersion) -+ else: -+ ocf.logger.info("monitor: some local resources are not clean yet -> wait") -+ -+ ocf.logger.debug("monitor: finished") -+ return ocf.OCF_SUCCESS -+ -+############################################################################## -+ -+def setLoglevel(verbose): -+ # set up writing into syslog -+ loglevel = default_loglevel -+ if verbose: -+ opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)) -+ urllib2.install_opener(opener) -+ loglevel = ocf.logging.DEBUG -+ ocf.log.setLevel(loglevel) -+ -+description = ( -+ "Microsoft Azure Scheduled Events monitoring agent", -+ """This resource agent implements a monitor for scheduled -+(maintenance) events for a Microsoft Azure VM. -+ -+If any relevant events are found, it moves all Pacemaker resources -+away from the affected node to allow for a graceful shutdown. -+ -+ Usage: -+ [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events-az ACTION -+ -+ action (required): Supported values: monitor, help, meta-data -+ eventTypes (optional): List of event types to be considered -+ relevant by the resource agent (comma-separated). -+ Supported values: Freeze,Reboot,Redeploy -+ Default = Reboot,Redeploy -+/ verbose (optional): If set to true, displays debug info. -+ Default = false -+ -+ Deployment: -+ crm configure primitive rsc_azure-events-az ocf:heartbeat:azure-events-az \ -+ op monitor interval=10s -+ crm configure clone cln_azure-events-az rsc_azure-events-az -+ -+For further information on Microsoft Azure Scheduled Events, please -+refer to the following documentation: -+https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events -+""") -+ -+def monitor_action(eventTypes): -+ relevantEventTypes = set(eventTypes.split(",") if eventTypes else []) -+ ra = raAzEvents(relevantEventTypes) -+ return ra.monitor() -+ -+def validate_action(eventTypes): -+ if eventTypes: -+ for event in eventTypes.split(","): -+ if event not in ("Freeze", "Reboot", "Redeploy"): -+ ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes) -+ return ocf.OCF_ERR_CONFIGURED -+ return ocf.OCF_SUCCESS -+ -+def main(): -+ agent = ocf.Agent("azure-events-az", shortdesc=description[0], longdesc=description[1]) -+ agent.add_parameter( -+ "eventTypes", -+ shortdesc="List of resources to be considered", -+ longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)", -+ content_type="string", -+ default="Reboot,Redeploy") -+ agent.add_parameter( -+ "verbose", -+ shortdesc="Enable verbose agent logging", -+ longdesc="Set to true to enable verbose logging", -+ content_type="boolean", -+ default="false") -+ agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS) -+ agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS) -+ agent.add_action("validate-all", timeout=20, handler=validate_action) -+ agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action) -+ setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false"))) -+ agent.run() -+ -+if __name__ == '__main__': -+ main() -\ No newline at end of file - -From a95337d882c7cc69d604b050159ad50b679f18be Mon Sep 17 00:00:00 2001 -From: MSSedusch -Date: Thu, 2 Jun 2022 14:10:33 +0200 -Subject: [PATCH 2/2] Remove developer documentation - ---- - heartbeat/azure-events-az.in | 11 ----------- - 1 file changed, 11 deletions(-) - -diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in -index 616fc8d9e..59d095306 100644 ---- a/heartbeat/azure-events-az.in -+++ b/heartbeat/azure-events-az.in -@@ -723,17 +723,6 @@ description = ( - If any relevant events are found, it moves all Pacemaker resources - away from the affected node to allow for a graceful shutdown. - -- Usage: -- [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events-az ACTION -- -- action (required): Supported values: monitor, help, meta-data -- eventTypes (optional): List of event types to be considered -- relevant by the resource agent (comma-separated). -- Supported values: Freeze,Reboot,Redeploy -- Default = Reboot,Redeploy --/ verbose (optional): If set to true, displays debug info. -- Default = false -- - Deployment: - crm configure primitive rsc_azure-events-az ocf:heartbeat:azure-events-az \ - op monitor interval=10s diff --git a/SOURCES/bz1992661-mysql-use-ssl-mode.patch b/SOURCES/bz1992661-mysql-use-ssl-mode.patch deleted file mode 100644 index c930c4e..0000000 --- a/SOURCES/bz1992661-mysql-use-ssl-mode.patch +++ /dev/null @@ -1,24 +0,0 @@ -From ed5bc606a4db5108995df9297698cf9dc14cccb2 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 18 Jan 2022 11:32:05 +0100 -Subject: [PATCH] mysql-common: fix local SSL connection by using - --ssl-mode=REQUIRED which is available on 5.7+ (--ssl is not available in - 8.0) - ---- - heartbeat/mysql-common.sh | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh -index 459948b10..de8763544 100755 ---- a/heartbeat/mysql-common.sh -+++ b/heartbeat/mysql-common.sh -@@ -97,7 +97,7 @@ MYSQL_BINDIR=`dirname ${OCF_RESKEY_binary}` - - MYSQL=$OCF_RESKEY_client_binary - if ocf_is_true "$OCF_RESKEY_replication_require_ssl"; then -- MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl" -+ MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl-mode=REQUIRED" - else - MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="" - fi diff --git a/SOURCES/bz1995178-storage-mon-fix-typo.patch b/SOURCES/bz1995178-storage-mon-fix-typo.patch deleted file mode 100644 index 0a8269a..0000000 --- a/SOURCES/bz1995178-storage-mon-fix-typo.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 09cde6531a87fd6a04568eaae94d5c489f36a8b6 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 6 Sep 2021 15:07:41 +0200 -Subject: [PATCH] storage-mon: update metadata to suggest usage in combination - with HealthSMART agent - ---- - heartbeat/storage-mon.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in -index 5b289fe55..875095670 100644 ---- a/heartbeat/storage-mon.in -+++ b/heartbeat/storage-mon.in -@@ -75,7 +75,7 @@ meta_data() { - - System health agent that checks the storage I/O status of the given drives and - updates the #health-storage attribute. Usage is highly recommended in combination --with storage-mon monitoring agent. The agent currently support a maximum of 25 -+with the HealthSMART monitoring agent. The agent currently support a maximum of 25 - devices per instance. - - storage I/O health status diff --git a/SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch b/SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch deleted file mode 100644 index 770f8a5..0000000 --- a/SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch +++ /dev/null @@ -1,2016 +0,0 @@ -From d6b954890b496fcdd8a76d7c2dd44a36fa0ad42c Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 7 Oct 2021 11:10:16 +0200 -Subject: [PATCH 1/3] all agents: specify agent and OCF versions corrently in - metadata - ---- - heartbeat/AoEtarget.in | 2 +- - heartbeat/AudibleAlarm | 2 +- - heartbeat/CTDB.in | 2 +- - heartbeat/ClusterMon | 2 +- - heartbeat/Delay | 2 +- - heartbeat/Dummy | 2 +- - heartbeat/EvmsSCC | 2 +- - heartbeat/Evmsd | 2 +- - heartbeat/Filesystem | 4 ++-- - heartbeat/ICP | 2 +- - heartbeat/IPaddr | 2 +- - heartbeat/IPaddr2 | 2 +- - heartbeat/IPsrcaddr | 2 +- - heartbeat/IPv6addr.c | 2 +- - heartbeat/LVM | 2 +- - heartbeat/LVM-activate | 2 +- - heartbeat/LinuxSCSI | 2 +- - heartbeat/MailTo | 2 +- - heartbeat/ManageRAID.in | 4 ++-- - heartbeat/ManageVE.in | 4 ++-- - heartbeat/NodeUtilization | 2 +- - heartbeat/Pure-FTPd | 2 +- - heartbeat/Raid1 | 2 +- - heartbeat/Route | 2 +- - heartbeat/SAPDatabase | 4 ++-- - heartbeat/SAPInstance | 4 ++-- - heartbeat/SendArp | 2 +- - heartbeat/ServeRAID | 2 +- - heartbeat/SphinxSearchDaemon | 2 +- - heartbeat/Squid.in | 2 +- - heartbeat/Stateful | 2 +- - heartbeat/SysInfo.in | 2 +- - heartbeat/VIPArip | 2 +- - heartbeat/VirtualDomain | 4 ++-- - heartbeat/WAS | 2 +- - heartbeat/WAS6 | 2 +- - heartbeat/WinPopup | 2 +- - heartbeat/Xen | 2 +- - heartbeat/Xinetd | 2 +- - heartbeat/ZFS | 2 +- - heartbeat/aliyun-vpc-move-ip | 4 ++-- - heartbeat/anything | 2 +- - heartbeat/apache | 2 +- - heartbeat/asterisk | 2 +- - heartbeat/aws-vpc-move-ip | 4 ++-- - heartbeat/aws-vpc-route53.in | 2 +- - heartbeat/awseip | 2 +- - heartbeat/awsvip | 2 +- - heartbeat/azure-lb | 2 +- - heartbeat/clvm.in | 2 +- - heartbeat/conntrackd.in | 4 ++-- - heartbeat/crypt | 2 +- - heartbeat/db2 | 2 +- - heartbeat/dhcpd | 4 ++-- - heartbeat/dnsupdate.in | 2 +- - heartbeat/docker | 2 +- - heartbeat/docker-compose | 4 ++-- - heartbeat/dovecot | 4 ++-- - heartbeat/eDir88.in | 2 +- - heartbeat/ethmonitor | 4 ++-- - heartbeat/exportfs | 2 +- - heartbeat/fio.in | 2 +- - heartbeat/galera.in | 2 +- - heartbeat/garbd | 2 +- - heartbeat/gcp-ilb | 2 +- - heartbeat/gcp-pd-move.in | 2 +- - heartbeat/gcp-vpc-move-ip.in | 2 +- - heartbeat/gcp-vpc-move-route.in | 2 +- - heartbeat/gcp-vpc-move-vip.in | 2 +- - heartbeat/iSCSILogicalUnit.in | 4 ++-- - heartbeat/iSCSITarget.in | 4 ++-- - heartbeat/ids | 2 +- - heartbeat/iface-bridge | 2 +- - heartbeat/iface-vlan | 2 +- - heartbeat/ipsec | 2 +- - heartbeat/iscsi | 2 +- - heartbeat/jboss | 2 +- - heartbeat/jira.in | 4 ++-- - heartbeat/kamailio.in | 2 +- - heartbeat/lvmlockd | 2 +- - heartbeat/lxc.in | 4 ++-- - heartbeat/lxd-info.in | 2 +- - heartbeat/machine-info.in | 2 +- - heartbeat/mariadb.in | 2 +- - heartbeat/mdraid | 2 +- - heartbeat/minio | 2 +- - heartbeat/mpathpersist.in | 4 ++-- - heartbeat/mysql | 2 +- - heartbeat/mysql-proxy | 4 ++-- - heartbeat/nagios | 4 ++-- - heartbeat/named | 2 +- - heartbeat/nfsnotify.in | 2 +- - heartbeat/nfsserver | 2 +- - heartbeat/nginx | 2 +- - heartbeat/nvmet-namespace | 4 ++-- - heartbeat/nvmet-port | 4 ++-- - heartbeat/nvmet-subsystem | 4 ++-- - heartbeat/openstack-cinder-volume | 4 ++-- - heartbeat/openstack-floating-ip | 4 ++-- - heartbeat/openstack-info.in | 2 +- - heartbeat/openstack-virtual-ip | 4 ++-- - heartbeat/oraasm | 4 ++-- - heartbeat/oracle | 2 +- - heartbeat/oralsnr | 2 +- - heartbeat/ovsmonitor | 4 ++-- - heartbeat/pgagent | 2 +- - heartbeat/pgsql | 2 +- - heartbeat/pingd | 2 +- - heartbeat/podman | 2 +- - heartbeat/portblock | 2 +- - heartbeat/postfix | 4 ++-- - heartbeat/pound | 2 +- - heartbeat/proftpd | 2 +- - heartbeat/rabbitmq-cluster.in | 2 +- - heartbeat/redis.in | 2 +- - heartbeat/rkt | 2 +- - heartbeat/rsyncd | 2 +- - heartbeat/rsyslog.in | 2 +- - heartbeat/scsi2reservation | 2 +- - heartbeat/sfex | 4 ++-- - heartbeat/sg_persist.in | 4 ++-- - heartbeat/slapd.in | 4 ++-- - heartbeat/smb-share.in | 2 +- - heartbeat/storage-mon.in | 2 +- - heartbeat/sybaseASE.in | 2 +- - heartbeat/symlink | 4 ++-- - heartbeat/syslog-ng.in | 2 +- - heartbeat/tomcat | 2 +- - heartbeat/varnish | 2 +- - heartbeat/vdo-vol | 4 ++-- - heartbeat/vmware | 4 ++-- - heartbeat/vsftpd.in | 2 +- - heartbeat/zabbixserver | 4 ++-- - 133 files changed, 169 insertions(+), 169 deletions(-) - -diff --git a/heartbeat/AoEtarget.in b/heartbeat/AoEtarget.in -index 5e3f01bccd..5a14c1ee7c 100644 ---- a/heartbeat/AoEtarget.in -+++ b/heartbeat/AoEtarget.in -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This resource agent manages an ATA-over-Ethernet (AoE) target using vblade. -diff --git a/heartbeat/AudibleAlarm b/heartbeat/AudibleAlarm -index 19cb73e610..44a30884e0 100755 ---- a/heartbeat/AudibleAlarm -+++ b/heartbeat/AudibleAlarm -@@ -37,7 +37,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in -index a2a83be676..d25d026cab 100755 ---- a/heartbeat/CTDB.in -+++ b/heartbeat/CTDB.in -@@ -153,7 +153,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ClusterMon b/heartbeat/ClusterMon -index 2bbf87da4b..161e309638 100755 ---- a/heartbeat/ClusterMon -+++ b/heartbeat/ClusterMon -@@ -60,7 +60,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Delay b/heartbeat/Delay -index 56f4e4f0a7..7ba6623f24 100755 ---- a/heartbeat/Delay -+++ b/heartbeat/Delay -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Dummy b/heartbeat/Dummy -index c6f911ba2b..81a675d63a 100755 ---- a/heartbeat/Dummy -+++ b/heartbeat/Dummy -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/EvmsSCC b/heartbeat/EvmsSCC -index 53aba4579c..a69113848f 100755 ---- a/heartbeat/EvmsSCC -+++ b/heartbeat/EvmsSCC -@@ -65,7 +65,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Evmsd b/heartbeat/Evmsd -index 1f2413432f..6e30eae61e 100755 ---- a/heartbeat/Evmsd -+++ b/heartbeat/Evmsd -@@ -43,7 +43,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 4554e12d7f..54d24eef7b 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -131,8 +131,8 @@ meta_data() { - cat < - -- --1.1 -+ -+1.0 - - - Resource script for Filesystem. It manages a Filesystem on a -diff --git a/heartbeat/ICP b/heartbeat/ICP -index 90cfa3f740..0bf37dec42 100755 ---- a/heartbeat/ICP -+++ b/heartbeat/ICP -@@ -66,7 +66,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/IPaddr b/heartbeat/IPaddr -index fb0deab3bd..9b0ea8174f 100755 ---- a/heartbeat/IPaddr -+++ b/heartbeat/IPaddr -@@ -89,7 +89,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This script manages IP alias IP addresses -diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 -index db0b0e547d..735dd77795 100755 ---- a/heartbeat/IPaddr2 -+++ b/heartbeat/IPaddr2 -@@ -131,7 +131,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 7cdc3a9fe6..ec868409fe 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -93,7 +93,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/IPv6addr.c b/heartbeat/IPv6addr.c -index c354c9dde8..7d1345cd11 100644 ---- a/heartbeat/IPv6addr.c -+++ b/heartbeat/IPv6addr.c -@@ -835,7 +835,7 @@ meta_data_addr6(void) - const char* meta_data= - "\n" - "\n" -- "\n" -+ "\n" - " 1.0\n" - " \n" - " This script manages IPv6 alias IPv6 addresses,It can add an IP6\n" -diff --git a/heartbeat/LVM b/heartbeat/LVM -index 287856e54b..b587bd8e38 100755 ---- a/heartbeat/LVM -+++ b/heartbeat/LVM -@@ -67,7 +67,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index 53223367e9..fadefcb7ab 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -92,7 +92,7 @@ meta_data() { - - - -- -+ - 1.0 - - -diff --git a/heartbeat/LinuxSCSI b/heartbeat/LinuxSCSI -index 015251eac4..306e7ee7ba 100755 ---- a/heartbeat/LinuxSCSI -+++ b/heartbeat/LinuxSCSI -@@ -113,7 +113,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/MailTo b/heartbeat/MailTo -index a180f21ffd..56940bafaa 100755 ---- a/heartbeat/MailTo -+++ b/heartbeat/MailTo -@@ -52,7 +52,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ManageRAID.in b/heartbeat/ManageRAID.in -index 97301d6e0b..bf5c745f6e 100644 ---- a/heartbeat/ManageRAID.in -+++ b/heartbeat/ManageRAID.in -@@ -96,8 +96,8 @@ meta_data() - cat < - -- -- 1.00.2 -+ -+ 1.0 - - - Manages starting, stopping and monitoring of RAID devices which -diff --git a/heartbeat/ManageVE.in b/heartbeat/ManageVE.in -index b8d251d391..f07ca5bdc6 100644 ---- a/heartbeat/ManageVE.in -+++ b/heartbeat/ManageVE.in -@@ -73,8 +73,8 @@ meta_data() - cat < - -- -- 1.00.4 -+ -+ 1.0 - - - This OCF compliant resource agent manages OpenVZ VEs and thus requires -diff --git a/heartbeat/NodeUtilization b/heartbeat/NodeUtilization -index 64b812581b..f98ab13e9c 100755 ---- a/heartbeat/NodeUtilization -+++ b/heartbeat/NodeUtilization -@@ -58,7 +58,7 @@ NodeUtilization_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Pure-FTPd b/heartbeat/Pure-FTPd -index 6638c9db2b..1499dddc98 100755 ---- a/heartbeat/Pure-FTPd -+++ b/heartbeat/Pure-FTPd -@@ -54,7 +54,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This script manages Pure-FTPd in an Active-Passive setup -diff --git a/heartbeat/Raid1 b/heartbeat/Raid1 -index d719df9574..924d94c305 100755 ---- a/heartbeat/Raid1 -+++ b/heartbeat/Raid1 -@@ -67,7 +67,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Route b/heartbeat/Route -index 3faf6d52ee..8b390615af 100755 ---- a/heartbeat/Route -+++ b/heartbeat/Route -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/SAPDatabase b/heartbeat/SAPDatabase -index 3486303f10..563a6f3469 100755 ---- a/heartbeat/SAPDatabase -+++ b/heartbeat/SAPDatabase -@@ -109,8 +109,8 @@ meta_data() { - cat < - -- --2.14 -+ -+1.0 - - - Resource script for SAP databases. It manages a SAP database of any type as an HA resource. -diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance -index f8bcbc05b8..016f59aff2 100755 ---- a/heartbeat/SAPInstance -+++ b/heartbeat/SAPInstance -@@ -110,8 +110,8 @@ sapinstance_meta_data() { - cat < - -- --2.14 -+ -+1.0 - - - Usually a SAP system consists of one database and at least one or more SAP instances (sometimes called application servers). One SAP Instance is defined by having exactly one instance profile. The instance profiles can usually be found in the directory /sapmnt/SID/profile. Each instance must be configured as it's own resource in the cluster configuration. -diff --git a/heartbeat/SendArp b/heartbeat/SendArp -index 9e4cbb1649..5af7bec3be 100755 ---- a/heartbeat/SendArp -+++ b/heartbeat/SendArp -@@ -82,7 +82,7 @@ sendarp_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ServeRAID b/heartbeat/ServeRAID -index 5c5b90451d..6d6596f00a 100755 ---- a/heartbeat/ServeRAID -+++ b/heartbeat/ServeRAID -@@ -117,7 +117,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/SphinxSearchDaemon b/heartbeat/SphinxSearchDaemon -index cad03794d6..d4e9e855f6 100755 ---- a/heartbeat/SphinxSearchDaemon -+++ b/heartbeat/SphinxSearchDaemon -@@ -51,7 +51,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Squid.in b/heartbeat/Squid.in -index b186f9ab12..12e2489364 100644 ---- a/heartbeat/Squid.in -+++ b/heartbeat/Squid.in -@@ -87,7 +87,7 @@ metadata_squid() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Stateful b/heartbeat/Stateful -index cc461405a2..aa2284467d 100755 ---- a/heartbeat/Stateful -+++ b/heartbeat/Stateful -@@ -45,7 +45,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/SysInfo.in b/heartbeat/SysInfo.in -index df4bf6dc00..c57b7b6614 100644 ---- a/heartbeat/SysInfo.in -+++ b/heartbeat/SysInfo.in -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/VIPArip b/heartbeat/VIPArip -index 5fc7c94c91..688237d9d2 100755 ---- a/heartbeat/VIPArip -+++ b/heartbeat/VIPArip -@@ -49,7 +49,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain -index 140e297361..8db947a516 100755 ---- a/heartbeat/VirtualDomain -+++ b/heartbeat/VirtualDomain -@@ -77,8 +77,8 @@ VirtualDomain_meta_data() { - cat < - -- --1.1 -+ -+1.0 - - - Resource agent for a virtual domain (a.k.a. domU, virtual machine, -diff --git a/heartbeat/WAS b/heartbeat/WAS -index 3d9831dab8..15b56e99e0 100755 ---- a/heartbeat/WAS -+++ b/heartbeat/WAS -@@ -105,7 +105,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/WAS6 b/heartbeat/WAS6 -index 8f5f287e06..9223a7ee5b 100755 ---- a/heartbeat/WAS6 -+++ b/heartbeat/WAS6 -@@ -87,7 +87,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/WinPopup b/heartbeat/WinPopup -index ee3f68346e..b48f3b9435 100755 ---- a/heartbeat/WinPopup -+++ b/heartbeat/WinPopup -@@ -42,7 +42,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Xen b/heartbeat/Xen -index d28e96db68..1ef20d7b79 100755 ---- a/heartbeat/Xen -+++ b/heartbeat/Xen -@@ -70,7 +70,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/Xinetd b/heartbeat/Xinetd -index e4cef5a1fa..778e18c1a9 100755 ---- a/heartbeat/Xinetd -+++ b/heartbeat/Xinetd -@@ -38,7 +38,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ZFS b/heartbeat/ZFS -index 3e1c0e934e..5574d328f0 100755 ---- a/heartbeat/ZFS -+++ b/heartbeat/ZFS -@@ -38,7 +38,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This script manages ZFS pools -diff --git a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip -index feb575b60c..1a3a1a014c 100755 ---- a/heartbeat/aliyun-vpc-move-ip -+++ b/heartbeat/aliyun-vpc-move-ip -@@ -175,8 +175,8 @@ ecs_ip_metadata() { - cat < - -- --2.0 -+ -+1.0 - - Resource Agent to move IP addresses within a VPC of the Aliyun Webservices ECS - by changing an entry in an specific routing table -diff --git a/heartbeat/anything b/heartbeat/anything -index 766895a5d4..137a612634 100755 ---- a/heartbeat/anything -+++ b/heartbeat/anything -@@ -236,7 +236,7 @@ anything_meta() { - cat < - -- -+ - 1.0 - - This is a generic OCF RA to manage almost anything. -diff --git a/heartbeat/apache b/heartbeat/apache -index b249a46c75..448225ed77 100755 ---- a/heartbeat/apache -+++ b/heartbeat/apache -@@ -474,7 +474,7 @@ apache_meta_data(){ - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/asterisk b/heartbeat/asterisk -index ed34d7a10f..272a350f91 100755 ---- a/heartbeat/asterisk -+++ b/heartbeat/asterisk -@@ -86,7 +86,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip -index 3ca3d6bd63..5d52040808 100755 ---- a/heartbeat/aws-vpc-move-ip -+++ b/heartbeat/aws-vpc-move-ip -@@ -76,8 +76,8 @@ metadata() { - cat < - -- --2.0 -+ -+1.0 - - Resource Agent to move IP addresses within a VPC of the Amazon Webservices EC2 - by changing an entry in an specific routing table -diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in -index f7c2dc2427..22cbb35833 100644 ---- a/heartbeat/aws-vpc-route53.in -+++ b/heartbeat/aws-vpc-route53.in -@@ -75,7 +75,7 @@ metadata() { - cat < - -- -+ - 1.0 - - Update Route53 record of Amazon Webservices EC2 by updating an entry in a -diff --git a/heartbeat/awseip b/heartbeat/awseip -index 12ffffaa3c..dc48460c85 100755 ---- a/heartbeat/awseip -+++ b/heartbeat/awseip -@@ -55,7 +55,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/awsvip b/heartbeat/awsvip -index 044d049c6b..037278e296 100755 ---- a/heartbeat/awsvip -+++ b/heartbeat/awsvip -@@ -54,7 +54,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb -index ade1b45772..7f585bf77c 100755 ---- a/heartbeat/azure-lb -+++ b/heartbeat/azure-lb -@@ -47,7 +47,7 @@ lb_metadata() { - cat < - -- -+ - 1.0 - - Resource agent to answer Azure Load Balancer health probe requests -diff --git a/heartbeat/clvm.in b/heartbeat/clvm.in -index fd9afe24dd..a5db7a5134 100644 ---- a/heartbeat/clvm.in -+++ b/heartbeat/clvm.in -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/conntrackd.in b/heartbeat/conntrackd.in -index aa6d3b8c94..f115250d6c 100644 ---- a/heartbeat/conntrackd.in -+++ b/heartbeat/conntrackd.in -@@ -46,8 +46,8 @@ meta_data() { - cat < - -- --1.2 -+ -+1.0 - - - Master/Slave OCF Resource Agent for conntrackd -diff --git a/heartbeat/crypt b/heartbeat/crypt -index 05bded7c56..56db379666 100755 ---- a/heartbeat/crypt -+++ b/heartbeat/crypt -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/db2 b/heartbeat/db2 -index 459136cbd7..03146a9570 100755 ---- a/heartbeat/db2 -+++ b/heartbeat/db2 -@@ -58,7 +58,7 @@ db2_meta_data() { - cat < - -- -+ - 1.0 - - Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in master/slave configuration. Multiple partitions are supported. -diff --git a/heartbeat/dhcpd b/heartbeat/dhcpd -index 46027b39b9..4df4923778 100755 ---- a/heartbeat/dhcpd -+++ b/heartbeat/dhcpd -@@ -83,8 +83,8 @@ dhcpd_meta_data() { - cat < - -- -- 0.1 -+ -+ 1.0 - - Manage an ISC DHCP server service in a chroot environment. - -diff --git a/heartbeat/dnsupdate.in b/heartbeat/dnsupdate.in -index 34a6c56f30..4fcd2a8ba2 100644 ---- a/heartbeat/dnsupdate.in -+++ b/heartbeat/dnsupdate.in -@@ -47,7 +47,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/docker b/heartbeat/docker -index 41ac421293..11b46a85a6 100755 ---- a/heartbeat/docker -+++ b/heartbeat/docker -@@ -46,7 +46,7 @@ meta_data() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/docker-compose b/heartbeat/docker-compose -index 79043ef332..3bc6774186 100755 ---- a/heartbeat/docker-compose -+++ b/heartbeat/docker-compose -@@ -54,8 +54,8 @@ meta_data() - cat < - -- --1.0.3 -+ -+1.0 - - Manages docker services using docker-compose as an OCF resource in an High Availability setup. - It relies on a well-tested docker compose YAML file which distributed on an identical location on all cluster nodes. -diff --git a/heartbeat/dovecot b/heartbeat/dovecot -index edd3d1ba96..5775241c94 100755 ---- a/heartbeat/dovecot -+++ b/heartbeat/dovecot -@@ -49,8 +49,8 @@ meta_data() { - cat < - -- --0.1 -+ -+1.0 - - This script manages Dovecot as an OCF resource in a high-availability setup. - -diff --git a/heartbeat/eDir88.in b/heartbeat/eDir88.in -index 2ef8bbd7cd..cd945d2c31 100644 ---- a/heartbeat/eDir88.in -+++ b/heartbeat/eDir88.in -@@ -81,7 +81,7 @@ eDir_meta_data() { - cat <<-EOFB - - -- -+ - 1.0 - - -diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor -index cf0321ab40..e2c3efa285 100755 ---- a/heartbeat/ethmonitor -+++ b/heartbeat/ethmonitor -@@ -79,8 +79,8 @@ meta_data() { - cat < - -- --1.2 -+ -+1.0 - - - Monitor the vitality of a local network interface. -diff --git a/heartbeat/exportfs b/heartbeat/exportfs -index 15f30bcbd3..2ad0e9892e 100755 ---- a/heartbeat/exportfs -+++ b/heartbeat/exportfs -@@ -27,7 +27,7 @@ exportfs_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/fio.in b/heartbeat/fio.in -index 0347c1ca60..4a312517b4 100644 ---- a/heartbeat/fio.in -+++ b/heartbeat/fio.in -@@ -43,7 +43,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/galera.in b/heartbeat/galera.in -index 7f5f2f1eb2..c363eb2546 100755 ---- a/heartbeat/galera.in -+++ b/heartbeat/galera.in -@@ -125,7 +125,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/garbd b/heartbeat/garbd -index f2a7266dc7..24a6e69bfc 100755 ---- a/heartbeat/garbd -+++ b/heartbeat/garbd -@@ -101,7 +101,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb -index 0bac316c4c..28484b2416 100755 ---- a/heartbeat/gcp-ilb -+++ b/heartbeat/gcp-ilb -@@ -70,7 +70,7 @@ ilb_metadata() { - cat < - -- -+ - 1.0 - - Resource Agent that wraps /usr/bin/nc or /usr/bin/socat to reply to health checks in Google Cloud. -diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in -index cbe703c3c5..1e811196e3 100644 ---- a/heartbeat/gcp-pd-move.in -+++ b/heartbeat/gcp-pd-move.in -@@ -68,7 +68,7 @@ METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1/' - METADATA_HEADERS = {'Metadata-Flavor': 'Google'} - METADATA = ''' - -- -+ - 1.0 - - Resource Agent that can attach or detach a regional/zonal disk on current GCP -diff --git a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in -index c9393481e6..2e63b2bf48 100755 ---- a/heartbeat/gcp-vpc-move-ip.in -+++ b/heartbeat/gcp-vpc-move-ip.in -@@ -68,7 +68,7 @@ metadata() { - cat < - -- -+ - 1.0 - - Resource Agent that can move a floating IP addresse within a GCP VPC by changing an -diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in -index 6b240c04d0..3f543feeaa 100644 ---- a/heartbeat/gcp-vpc-move-route.in -+++ b/heartbeat/gcp-vpc-move-route.in -@@ -70,7 +70,7 @@ METADATA_HEADERS = {'Metadata-Flavor': 'Google'} - METADATA = \ - ''' - -- -+ - 1.0 - - Resource Agent that can move a floating IP addresse within a GCP VPC by changing an -diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in -index 7e9d61f551..331226be5e 100755 ---- a/heartbeat/gcp-vpc-move-vip.in -+++ b/heartbeat/gcp-vpc-move-vip.in -@@ -61,7 +61,7 @@ METADATA_HEADERS = {'Metadata-Flavor': 'Google'} - METADATA = \ - ''' - -- -+ - 1.0 - Floating IP Address or Range on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP range to a running instance - Floating IP Address or Range on Google Cloud Platform -diff --git a/heartbeat/iSCSILogicalUnit.in b/heartbeat/iSCSILogicalUnit.in -index 767c739666..86ab1b095d 100644 ---- a/heartbeat/iSCSILogicalUnit.in -+++ b/heartbeat/iSCSILogicalUnit.in -@@ -91,8 +91,8 @@ meta_data() { - cat < - -- --0.9 -+ -+1.0 - - - Manages iSCSI Logical Unit. An iSCSI Logical unit is a subdivision of -diff --git a/heartbeat/iSCSITarget.in b/heartbeat/iSCSITarget.in -index 2bfc718b8f..221d484773 100644 ---- a/heartbeat/iSCSITarget.in -+++ b/heartbeat/iSCSITarget.in -@@ -62,8 +62,8 @@ meta_data() { - cat < - -- --0.9 -+ -+1.0 - - - Manages iSCSI targets. An iSCSI target is a collection of SCSI Logical -diff --git a/heartbeat/ids b/heartbeat/ids -index 8300f69a5c..0d9e1e169e 100755 ---- a/heartbeat/ids -+++ b/heartbeat/ids -@@ -129,7 +129,7 @@ ids_meta_data() { - cat <<-! - - -- -+ - 1.0 - - -diff --git a/heartbeat/iface-bridge b/heartbeat/iface-bridge -index e1dfecbff4..75d5371ddd 100755 ---- a/heartbeat/iface-bridge -+++ b/heartbeat/iface-bridge -@@ -98,7 +98,7 @@ bridge_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/iface-vlan b/heartbeat/iface-vlan -index d0481373c0..019c2e158c 100755 ---- a/heartbeat/iface-vlan -+++ b/heartbeat/iface-vlan -@@ -76,7 +76,7 @@ vlan_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ipsec b/heartbeat/ipsec -index c35c2b337f..c842955bf1 100755 ---- a/heartbeat/ipsec -+++ b/heartbeat/ipsec -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/iscsi b/heartbeat/iscsi -index 602a658014..d25aec234a 100755 ---- a/heartbeat/iscsi -+++ b/heartbeat/iscsi -@@ -71,7 +71,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/jboss b/heartbeat/jboss -index 76460dabde..948355a083 100755 ---- a/heartbeat/jboss -+++ b/heartbeat/jboss -@@ -329,7 +329,7 @@ metadata_jboss() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/jira.in b/heartbeat/jira.in -index 6a4d9c5705..66a8e53f0d 100644 ---- a/heartbeat/jira.in -+++ b/heartbeat/jira.in -@@ -198,8 +198,8 @@ jira_meta_data(){ - cat < - -- -- 0.1 -+ -+ 1.0 - - OCF Resource Agent to manage JIRA software - -diff --git a/heartbeat/kamailio.in b/heartbeat/kamailio.in -index ec194a9135..4f6af3dbd0 100644 ---- a/heartbeat/kamailio.in -+++ b/heartbeat/kamailio.in -@@ -89,7 +89,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd -index 685eda36e0..05bb0a2e55 100755 ---- a/heartbeat/lvmlockd -+++ b/heartbeat/lvmlockd -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/lxc.in b/heartbeat/lxc.in -index b6c076b32d..1ffbc46ad3 100644 ---- a/heartbeat/lxc.in -+++ b/heartbeat/lxc.in -@@ -58,8 +58,8 @@ meta_data() { - cat < - -- --0.1 -+ -+1.0 - Allows LXC containers to be managed by the cluster. - Notes for lxc Versions before 1.0.0, where the Container is stopped using kill -PWR instead of lxc-stop: - It is 'assumed' that the 'init' system will do an orderly shudown if presented with a 'kill -PWR' signal. -diff --git a/heartbeat/lxd-info.in b/heartbeat/lxd-info.in -index 5fc928aff3..f9fb44ac49 100644 ---- a/heartbeat/lxd-info.in -+++ b/heartbeat/lxd-info.in -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/machine-info.in b/heartbeat/machine-info.in -index aa9bbd4c4c..bfa7ce5fc0 100644 ---- a/heartbeat/machine-info.in -+++ b/heartbeat/machine-info.in -@@ -48,7 +48,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in -index c1969d70e0..3b6feb6f88 100644 ---- a/heartbeat/mariadb.in -+++ b/heartbeat/mariadb.in -@@ -72,7 +72,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/mdraid b/heartbeat/mdraid -index 6f5f563f77..1e6a5d0276 100755 ---- a/heartbeat/mdraid -+++ b/heartbeat/mdraid -@@ -51,7 +51,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/minio b/heartbeat/minio -index 8b47561a49..16ceeed422 100755 ---- a/heartbeat/minio -+++ b/heartbeat/minio -@@ -53,7 +53,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This script manages Minio in an Active-Passive setup -diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in -index 813a1b1fdc..fcf1b3a4bd 100644 ---- a/heartbeat/mpathpersist.in -+++ b/heartbeat/mpathpersist.in -@@ -74,8 +74,8 @@ meta_data() { - cat < - -- --1.1 -+ -+1.0 - - - This resource agent manages SCSI persistent reservations on multipath devices. -diff --git a/heartbeat/mysql b/heartbeat/mysql -index a1a223cf10..720de8c1a6 100755 ---- a/heartbeat/mysql -+++ b/heartbeat/mysql -@@ -75,7 +75,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/mysql-proxy b/heartbeat/mysql-proxy -index b79f311259..e34396d9a5 100755 ---- a/heartbeat/mysql-proxy -+++ b/heartbeat/mysql-proxy -@@ -109,8 +109,8 @@ meta_data() { - cat < - -- --0.1 -+ -+1.0 - - This script manages MySQL Proxy as an OCF resource in a high-availability setup. - -diff --git a/heartbeat/nagios b/heartbeat/nagios -index 3d07b141c8..a45d5b8be8 100755 ---- a/heartbeat/nagios -+++ b/heartbeat/nagios -@@ -52,8 +52,8 @@ nagios_meta_data() { - cat < - -- --0.75 -+ -+1.0 - - OCF Resource script for Nagios 3.x or 4.x. It manages a Nagios instance as a HA resource. - Nagios resource agent -diff --git a/heartbeat/named b/heartbeat/named -index 535410df57..f3a17e9ecd 100755 ---- a/heartbeat/named -+++ b/heartbeat/named -@@ -67,7 +67,7 @@ named_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in -index 8e80f77e97..6e49535577 100644 ---- a/heartbeat/nfsnotify.in -+++ b/heartbeat/nfsnotify.in -@@ -58,7 +58,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 96b19abe36..9624ad3abd 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -39,7 +39,7 @@ nfsserver_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/nginx b/heartbeat/nginx -index 9a778c7f58..07da7681b3 100755 ---- a/heartbeat/nginx -+++ b/heartbeat/nginx -@@ -636,7 +636,7 @@ metadata_nginx(){ - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/nvmet-namespace b/heartbeat/nvmet-namespace -index 0ea31f121b..fa9a9f8e9c 100755 ---- a/heartbeat/nvmet-namespace -+++ b/heartbeat/nvmet-namespace -@@ -41,8 +41,8 @@ meta_data() { - cat < - -- --0.9 -+ -+1.0 - - - Manages NVMe-oF namespaces. An NVMe-oF namespace is part of an NVMe-oF target. -diff --git a/heartbeat/nvmet-port b/heartbeat/nvmet-port -index 8728857d97..cbf1109754 100755 ---- a/heartbeat/nvmet-port -+++ b/heartbeat/nvmet-port -@@ -46,8 +46,8 @@ meta_data() { - cat < - -- --0.9 -+ -+1.0 - - - Manages NVMe-oF ports. An NVMe-oF port is part of an NVMe-oF target. -diff --git a/heartbeat/nvmet-subsystem b/heartbeat/nvmet-subsystem -index d469fc17b2..e004ec142d 100755 ---- a/heartbeat/nvmet-subsystem -+++ b/heartbeat/nvmet-subsystem -@@ -41,8 +41,8 @@ meta_data() { - cat < - -- --0.9 -+ -+1.0 - - - Manages NVMe-oF subsystems. An NVMe-oF subsystem is part of an NVMe-oF target. -diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume -index f18fb5c5e4..4cea25f037 100755 ---- a/heartbeat/openstack-cinder-volume -+++ b/heartbeat/openstack-cinder-volume -@@ -59,8 +59,8 @@ metadata() { - cat < - -- --2.0 -+ -+1.0 - - Resource Agent to attach a cinder volume to an instance. - It relies on attributes given by openstack-info resource agent (openstack_id attribute). -diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip -index 0e4a96099f..6a0c5bd7b2 100755 ---- a/heartbeat/openstack-floating-ip -+++ b/heartbeat/openstack-floating-ip -@@ -57,8 +57,8 @@ metadata() { - cat < - -- --2.0 -+ -+1.0 - - Resource Agent to move a floating IP address from an instance to another one. - It relies on attributes given by openstack-info resource agent (openstack_ports, openstack_id attributes). -diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in -index 95c95d28ff..53215ed2ef 100755 ---- a/heartbeat/openstack-info.in -+++ b/heartbeat/openstack-info.in -@@ -53,7 +53,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip -index 45aa87d82e..d96705fdfc 100755 ---- a/heartbeat/openstack-virtual-ip -+++ b/heartbeat/openstack-virtual-ip -@@ -57,8 +57,8 @@ metadata() { - cat < - -- --2.0 -+ -+1.0 - - Resource Agent to move a virtual IP address from an instance to another one - by adding an allowed-address pair associated with an instance port. -diff --git a/heartbeat/oraasm b/heartbeat/oraasm -index 3cc3342125..34c8df0b11 100755 ---- a/heartbeat/oraasm -+++ b/heartbeat/oraasm -@@ -42,8 +42,8 @@ oraasm_meta_data() { - cat < - -- --0.75 -+ -+1.0 - - OCF Resource script for Oracle ASM. It uses the ohasd init-script to manage a Oracle ASM Disk Group as a HA resource. - Oracle ASM resource agent -diff --git a/heartbeat/oracle b/heartbeat/oracle -index 124060834e..caee18a8ac 100755 ---- a/heartbeat/oracle -+++ b/heartbeat/oracle -@@ -74,7 +74,7 @@ oracle_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/oralsnr b/heartbeat/oralsnr -index 1ecf8fca88..dd0df1c614 100755 ---- a/heartbeat/oralsnr -+++ b/heartbeat/oralsnr -@@ -70,7 +70,7 @@ oralsnr_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/ovsmonitor b/heartbeat/ovsmonitor -index 8a80a8c923..d03902ea88 100755 ---- a/heartbeat/ovsmonitor -+++ b/heartbeat/ovsmonitor -@@ -72,8 +72,8 @@ meta_data() { - cat < - -- --0.1 -+ -+1.0 - - - Monitor the vitality of a local ovs bond. -diff --git a/heartbeat/pgagent b/heartbeat/pgagent -index eea80fe4ac..b1e61b35d0 100755 ---- a/heartbeat/pgagent -+++ b/heartbeat/pgagent -@@ -79,7 +79,7 @@ meta_data() { - cat < - -- -+ - 1.0 - This is a pgagent Resource Agent. - Controls pgagent -diff --git a/heartbeat/pgsql b/heartbeat/pgsql -index a93400c0e5..e3a39038fb 100755 ---- a/heartbeat/pgsql -+++ b/heartbeat/pgsql -@@ -132,7 +132,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/pingd b/heartbeat/pingd -index 57fa0dba37..e2d5c31a66 100755 ---- a/heartbeat/pingd -+++ b/heartbeat/pingd -@@ -61,7 +61,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/podman b/heartbeat/podman -index 034dfff76e..fd901c968e 100755 ---- a/heartbeat/podman -+++ b/heartbeat/podman -@@ -47,7 +47,7 @@ meta_data() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/portblock b/heartbeat/portblock -index 2e84a101f9..f1a7cc8c1b 100755 ---- a/heartbeat/portblock -+++ b/heartbeat/portblock -@@ -131,7 +131,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/postfix b/heartbeat/postfix -index 300fbc3688..7f88a91fbe 100755 ---- a/heartbeat/postfix -+++ b/heartbeat/postfix -@@ -52,8 +52,8 @@ meta_data() { - cat < - -- --0.1 -+ -+1.0 - - This script manages Postfix as an OCF resource in a high-availability setup. - -diff --git a/heartbeat/pound b/heartbeat/pound -index 7a0ad547b4..48aa221839 100755 ---- a/heartbeat/pound -+++ b/heartbeat/pound -@@ -50,7 +50,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/proftpd b/heartbeat/proftpd -index e5cf36eece..a9fc5ffd76 100755 ---- a/heartbeat/proftpd -+++ b/heartbeat/proftpd -@@ -61,7 +61,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This script manages Proftpd in an Active-Passive setup -diff --git a/heartbeat/rabbitmq-cluster.in b/heartbeat/rabbitmq-cluster.in -index abd0662f26..0b8916c287 100755 ---- a/heartbeat/rabbitmq-cluster.in -+++ b/heartbeat/rabbitmq-cluster.in -@@ -67,7 +67,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/redis.in b/heartbeat/redis.in -index f53d469641..7f886c7ea9 100755 ---- a/heartbeat/redis.in -+++ b/heartbeat/redis.in -@@ -91,7 +91,7 @@ redis_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/rkt b/heartbeat/rkt -index 2d3ef0e24c..724986f6c8 100755 ---- a/heartbeat/rkt -+++ b/heartbeat/rkt -@@ -40,7 +40,7 @@ meta_data() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/rsyncd b/heartbeat/rsyncd -index cd4ab91c5a..d15aaf6a8a 100755 ---- a/heartbeat/rsyncd -+++ b/heartbeat/rsyncd -@@ -52,7 +52,7 @@ meta_data() - cat < - -- -+ - 1.0 - - This script manages rsync daemon -diff --git a/heartbeat/rsyslog.in b/heartbeat/rsyslog.in -index 9cb9a0ad68..80d5c17563 100644 ---- a/heartbeat/rsyslog.in -+++ b/heartbeat/rsyslog.in -@@ -74,7 +74,7 @@ metadata_rsyslog() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/scsi2reservation b/heartbeat/scsi2reservation -index 3e4ff9584f..9b29ec4608 100755 ---- a/heartbeat/scsi2reservation -+++ b/heartbeat/scsi2reservation -@@ -23,7 +23,7 @@ scsi2reserve_meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/sfex b/heartbeat/sfex -index ac82191938..b079ca0aac 100755 ---- a/heartbeat/sfex -+++ b/heartbeat/sfex -@@ -68,8 +68,8 @@ meta_data() { - cat < - -- --1.3 -+ -+1.0 - - - Resource script for SF-EX. It manages a shared storage medium exclusively . -diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in -index 7af38034e7..678762f408 100644 ---- a/heartbeat/sg_persist.in -+++ b/heartbeat/sg_persist.in -@@ -75,8 +75,8 @@ meta_data() { - cat < - -- --1.1 -+ -+1.0 - - - This resource agent manages SCSI PERSISTENT RESERVATIONS. -diff --git a/heartbeat/slapd.in b/heartbeat/slapd.in -index bd3995bdc1..ffccd1d357 100644 ---- a/heartbeat/slapd.in -+++ b/heartbeat/slapd.in -@@ -89,8 +89,8 @@ meta_data() - cat < - -- --0.1 -+ -+1.0 - - - Resource script for Stand-alone LDAP Daemon (slapd). It manages a slapd instance as an OCF resource. -diff --git a/heartbeat/smb-share.in b/heartbeat/smb-share.in -index a904febb1a..8a1a0a8604 100755 ---- a/heartbeat/smb-share.in -+++ b/heartbeat/smb-share.in -@@ -138,7 +138,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - This RA is used to control samba shares on the fly. -diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in -index 875095670e..4cd861c1b6 100644 ---- a/heartbeat/storage-mon.in -+++ b/heartbeat/storage-mon.in -@@ -69,7 +69,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in -index fef76474e6..8b315029b1 100755 ---- a/heartbeat/sybaseASE.in -+++ b/heartbeat/sybaseASE.in -@@ -139,7 +139,7 @@ meta_data() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/symlink b/heartbeat/symlink -index 1266061ddf..decd9f74e5 100755 ---- a/heartbeat/symlink -+++ b/heartbeat/symlink -@@ -37,8 +37,8 @@ meta_data() { - cat < - -- --1.1 -+ -+1.0 - - - This resource agent that manages a symbolic link (symlink). -diff --git a/heartbeat/syslog-ng.in b/heartbeat/syslog-ng.in -index 47a23f1887..246db28b76 100644 ---- a/heartbeat/syslog-ng.in -+++ b/heartbeat/syslog-ng.in -@@ -88,7 +88,7 @@ metadata_syslog_ng() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/tomcat b/heartbeat/tomcat -index 4e6c04c376..fa2715140b 100755 ---- a/heartbeat/tomcat -+++ b/heartbeat/tomcat -@@ -339,7 +339,7 @@ metadata_tomcat() - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/varnish b/heartbeat/varnish -index 07a4f6449f..5fbf35c61a 100755 ---- a/heartbeat/varnish -+++ b/heartbeat/varnish -@@ -85,7 +85,7 @@ meta_data() { - cat < - -- -+ - 1.0 - - -diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol -index 3c79df58a2..94822cb82c 100755 ---- a/heartbeat/vdo-vol -+++ b/heartbeat/vdo-vol -@@ -38,8 +38,8 @@ vdo_meta_data() { - cat < - -- --0.75 -+ -+1.0 - - - OCF Resource script for VDO (Virtual Data Optimizer) volume(s). It manages VDO volume(s) as a HA resource. -diff --git a/heartbeat/vmware b/heartbeat/vmware -index 7cd61dd5fd..f784fb10ad 100755 ---- a/heartbeat/vmware -+++ b/heartbeat/vmware -@@ -311,8 +311,8 @@ meta_data() { - cat < - -- --0.2 -+ -+1.0 - - OCF compliant script to control vmware server 2.0 virtual machines. - -diff --git a/heartbeat/vsftpd.in b/heartbeat/vsftpd.in -index 57f6669b7a..3831c4fa82 100644 ---- a/heartbeat/vsftpd.in -+++ b/heartbeat/vsftpd.in -@@ -50,7 +50,7 @@ meta_data() - cat < - -- -+ - 1.0 - - This script manages vsftpd -diff --git a/heartbeat/zabbixserver b/heartbeat/zabbixserver -index 08f7e798b7..b4b5b7c2be 100755 ---- a/heartbeat/zabbixserver -+++ b/heartbeat/zabbixserver -@@ -55,8 +55,8 @@ zabbixserver_meta_data() { - cat < - -- --0.0.1 -+ -+1.0 - - - This is a Zabbix server Resource Agent for zabbix_server monitoring - -From d03aedd47b392437d11a24be1bcc5696caa7fbc5 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 7 Oct 2021 11:10:51 +0200 -Subject: [PATCH 2/3] ocf.py: add agent and OCF version parameters (both - defaults to 1.0) - ---- - heartbeat/ocf.py | 10 +++++++--- - 1 file changed, 7 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py -index 6dbc10d6b7..dda2fed4bb 100644 ---- a/heartbeat/ocf.py -+++ b/heartbeat/ocf.py -@@ -305,10 +305,12 @@ class Agent(object): - of how to use it. - """ - -- def __init__(self, name, shortdesc, longdesc): -+ def __init__(self, name, shortdesc, longdesc, version=1.0, ocf_version=1.0): - self.name = name - self.shortdesc = shortdesc - self.longdesc = longdesc -+ self.version = version -+ self.ocf_version = ocf_version - self.parameters = [] - self.actions = [] - self._handlers = {} -@@ -342,8 +344,8 @@ def __str__(self): - def to_xml(self): - return """ - -- --1.0 -+ -+{ocf_version} - - {longdesc} - -@@ -359,6 +361,8 @@ def to_xml(self): - - - """.format(name=self.name, -+ version = self.version, -+ ocf_version = self.ocf_version, - longdesc=self.longdesc, - shortdesc=self.shortdesc, - parameters="".join(p.to_xml() for p in self.parameters), - -From 22dd98f5c325d0b48ade6ad7406d3c45b7dfec2c Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 7 Oct 2021 15:29:25 +0200 -Subject: [PATCH 3/3] metadata.rng: update to support resource agent version - according to the OCF standard - ---- - heartbeat/metadata.rng | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/heartbeat/metadata.rng b/heartbeat/metadata.rng -index ac534db82d..3dd7355472 100644 ---- a/heartbeat/metadata.rng -+++ b/heartbeat/metadata.rng -@@ -2,6 +2,8 @@ - - - -+ -+ - - - diff --git a/SOURCES/bz2008333-gcp-pd-move-gcp-vpc-move-route-dont-fail-due-to-incorrect-rc.patch b/SOURCES/bz2008333-gcp-pd-move-gcp-vpc-move-route-dont-fail-due-to-incorrect-rc.patch deleted file mode 100644 index 00a04b4..0000000 --- a/SOURCES/bz2008333-gcp-pd-move-gcp-vpc-move-route-dont-fail-due-to-incorrect-rc.patch +++ /dev/null @@ -1,64 +0,0 @@ -From fcd2565602146c0b9317d159cecb8935e304c7ce Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 30 Sep 2021 10:23:17 +0200 -Subject: [PATCH] gcp-pd-move/gcp-vpc-move-route: dont fail failed resources - instantly (caused by OCF_ERR_CONFIGURED) - ---- - heartbeat/gcp-pd-move.in | 4 ++-- - heartbeat/gcp-vpc-move-route.in | 6 +++--- - 2 files changed, 5 insertions(+), 5 deletions(-) - -diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in -index e99cc71f88..cbe703c3c5 100644 ---- a/heartbeat/gcp-pd-move.in -+++ b/heartbeat/gcp-pd-move.in -@@ -157,7 +157,7 @@ def populate_vars(): - CONN = googleapiclient.discovery.build('compute', 'v1') - except Exception as e: - logger.error('Couldn\'t connect with google api: ' + str(e)) -- sys.exit(ocf.OCF_ERR_CONFIGURED) -+ sys.exit(ocf.OCF_ERR_GENERIC) - - for param in PARAMETERS: - value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param]) -@@ -172,7 +172,7 @@ def populate_vars(): - except Exception as e: - logger.error( - 'Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) -- sys.exit(ocf.OCF_ERR_CONFIGURED) -+ sys.exit(ocf.OCF_ERR_GENERIC) - - PROJECT = get_metadata('project/project-id') - if PARAMETERS['disk_scope'] in ['detect', 'regional']: -diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in -index dac6e4ea8c..6b240c04d0 100644 ---- a/heartbeat/gcp-vpc-move-route.in -+++ b/heartbeat/gcp-vpc-move-route.in -@@ -243,7 +243,7 @@ def validate(ctx): - ctx.conn = googleapiclient.discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False) - except Exception as e: - logger.error('Couldn\'t connect with google api: ' + str(e)) -- sys.exit(OCF_ERR_CONFIGURED) -+ sys.exit(OCF_ERR_GENERIC) - - ctx.ip = os.environ.get('OCF_RESKEY_ip') - if not ctx.ip: -@@ -258,7 +258,7 @@ def validate(ctx): - except Exception as e: - logger.error( - 'Instance information not found. Is this a GCE instance ?: %s', str(e)) -- sys.exit(OCF_ERR_CONFIGURED) -+ sys.exit(OCF_ERR_GENERIC) - - ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % ( - GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance) -@@ -273,7 +273,7 @@ def validate(ctx): - idxs = ctx.iproute.link_lookup(ifname=ctx.interface) - if not idxs: - logger.error('Network interface not found') -- sys.exit(OCF_ERR_CONFIGURED) -+ sys.exit(OCF_ERR_GENERIC) - ctx.iface_idx = idxs[0] - - diff --git a/SOURCES/bz2012057-Route-return-OCF_NOT_RUNNING-missing-route.patch b/SOURCES/bz2012057-Route-return-OCF_NOT_RUNNING-missing-route.patch deleted file mode 100644 index 53d3299..0000000 --- a/SOURCES/bz2012057-Route-return-OCF_NOT_RUNNING-missing-route.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 7c54e4ecda33c90a1046c0688774f5b847ab10fe Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 7 Dec 2021 10:37:24 +0100 -Subject: [PATCH] Route: return OCF_NOT_RUNNING for probe action when interface - or route doesnt exist - ---- - heartbeat/Route | 15 +++++---------- - 1 file changed, 5 insertions(+), 10 deletions(-) - -diff --git a/heartbeat/Route b/heartbeat/Route -index 8b390615a..7db41d0ae 100755 ---- a/heartbeat/Route -+++ b/heartbeat/Route -@@ -227,15 +227,6 @@ route_stop() { - } - - route_status() { -- if [ -n "${OCF_RESKEY_device}" ]; then -- # Must check if device exists or is gone. -- # If device is gone, route is also unconfigured. -- ip link show dev ${OCF_RESKEY_device} >/dev/null 2>&1 -- if [ $? -ne 0 ]; then -- # Assume device does not exist, and short-circuit here. -- return $OCF_NOT_RUNNING -- fi -- fi - show_output="$(ip $addr_family route show $(create_route_spec) 2>/dev/null)" - if [ $? -eq 0 ]; then - if [ -n "$show_output" ]; then -@@ -251,7 +242,11 @@ route_status() { - else - # "ip route show" returned an error code. Assume something - # went wrong. -- return $OCF_ERR_GENERIC -+ if ocf_is_probe; then -+ return $OCF_NOT_RUNNING -+ else -+ return $OCF_ERR_GENERIC -+ fi - fi - } - diff --git a/SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch b/SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch deleted file mode 100644 index 5b27e3e..0000000 --- a/SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch +++ /dev/null @@ -1,366 +0,0 @@ -From 764dacb6195f8940f13b9c322b1bc8189c5619fc Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Mon, 6 Sep 2021 12:13:42 +0200 -Subject: [PATCH 1/6] Fix NFSv4 lock failover: set NFS Server Scope - -Problem: https://github.com/ClusterLabs/resource-agents/issues/1644 -RFC8881, 8.4.2.1 State Reclaim: - -| If the server scope is different, the client should not attempt to -| reclaim locks. In this situation, no lock reclaim is possible. -| Any attempt to re-obtain the locks with non-reclaim operations is -| problematic since there is no guarantee that the existing -| filehandles will be recognized by the new server, or that if -| recognized, they denote the same objects. It is best to treat the -| locks as having been revoked by the reconfiguration event. - -That's why for lock reclaim to even be attempted, we have to define and set -the same server scope for NFSD on all cluster nodes in the NFS failover -cluster. And in linux, that is done by setting the uts nodename for the -command that starts the nfsd kernel threads. - -For "init scripts", just set it directly using unshare --uts. -For systemd units, add NFS_SERVER_SCOPE to some environment files -and inject the "unshare --uts" into the ExecStart command lines -using override drop-in files. ---- - heartbeat/nfsserver | 120 +++++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 119 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 96b19abe36..0888378645 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -5,6 +5,18 @@ - # by hxinwei@gmail.com - # License: GNU General Public License v2 (GPLv2) and later - -+ -+# I don't know for certain whether all services actuall _need_ this, -+# I know that at least nfs-server needs it. -+# The rgmanager resource agent in rgmanager/src/resources/nfsserver.sh.in -+# did the unshare for gssd and idmapd as well, even though it seems unclear why. -+# Let's start with just the nfs-server, and add others if/when we have clear -+# indication they need it. -+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service" -+NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service" -+SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope -+SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf -+ - if [ -n "$OCF_DEBUG_LIBRARY" ]; then - . $OCF_DEBUG_LIBRARY - else -@@ -99,6 +111,31 @@ Specifies the length of sm-notify retry time (minutes). - - - -+ -+ -+RFC8881, 8.4.2.1 State Reclaim: -+ -+If the server scope is different, the client should not attempt to -+reclaim locks. In this situation, no lock reclaim is possible. -+Any attempt to re-obtain the locks with non-reclaim operations is -+problematic since there is no guarantee that the existing -+filehandles will be recognized by the new server, or that if -+recognized, they denote the same objects. It is best to treat the -+locks as having been revoked by the reconfiguration event. -+ -+For lock reclaim to even be attempted, we have to define and set the same -+server scope for NFSD on all cluster nodes in the NFS failover cluster. -+ -+This agent won't "guess" a suitable server scope name for you, you need to -+explicitly specify this. But without it, NFSv4 lock reclaim after failover -+won't work properly. Suggested value: the failover "service IP". -+ -+ -+RFC8881 NFS server scope for (lock) state reclaim after failover. -+ -+ -+ -+ - - - Comma separated list of floating IP addresses used to access the nfs service -@@ -269,7 +306,11 @@ nfs_exec() - set_exec_mode - - case $EXEC_MODE in -- 1) ${OCF_RESKEY_nfs_init_script} $cmd;; -+ 1) if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then -+ ${OCF_RESKEY_nfs_init_script} $cmd -+ else -+ unshare -u /bin/sh -c "hostname ${OCF_RESKEY_nfs_server_scope}; exec ${OCF_RESKEY_nfs_init_script} $cmd" -+ fi ;; - 2) if ! echo $svc | grep -q "\."; then - svc="${svc}.service" - fi -@@ -623,6 +664,74 @@ notify_locks() - fi - } - -+# Problem: https://github.com/ClusterLabs/resource-agents/issues/1644 -+# RFC8881, 8.4.2.1 State Reclaim: -+# -+# | If the server scope is different, the client should not attempt to -+# | reclaim locks. In this situation, no lock reclaim is possible. -+# | Any attempt to re-obtain the locks with non-reclaim operations is -+# | problematic since there is no guarantee that the existing -+# | filehandles will be recognized by the new server, or that if -+# | recognized, they denote the same objects. It is best to treat the -+# | locks as having been revoked by the reconfiguration event. -+# -+# That's why for lock reclaim to even be attempted, we have to define and set -+# the same server scope for NFSD on all cluster nodes in the NFS failover -+# cluster. And in linux, that is done by setting the uts nodename for the -+# command that starts the nfsd kernel threads. -+# -+inject_unshare_uts_name_into_systemd_units () -+{ -+ local END_TAG="# END OF DROP-IN FOR NFS SERVER SCOPE" -+ local services -+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1) -+ -+ local svc dir dropin edited_exec_start do_reload=false -+ for svc in $services ; do -+ dir=/run/systemd/system/$svc.d -+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN -+ grep -sqF "$END_TAG" "$dropin" && continue -+ -+ test -d "$dir" || mkdir -p "$dir" -+ test -e "$dropin" && rm -f "$dropin" -+ -+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p") -+ cat > "$dropin" <<___ -+[Service] -+EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE -+# reset list of exec start, then re-populate with unshared uts namespace -+ExecStart= -+$edited_exec_start -+$END_TAG -+___ -+ do_reload=true -+ ocf_log debug "injected unshare --uts into $dropin" -+ done -+ -+ mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}" -+ echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE" -+ -+ $do_reload && systemctl daemon-reload -+} -+ -+remove_unshare_uts_dropins () -+{ -+ local services -+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE) -+ -+ local svc dir dropin do_reload=false -+ for svc in $services ; do -+ dir=/run/systemd/system/$svc.d -+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN -+ test -e "$dropin" || continue -+ rm -f "$dropin" -+ do_reload=true -+ ocf_log debug "removed unshare --uts from $svc" -+ done -+ rm -f "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE}" -+ $do_reload && systemctl daemon-reload -+} -+ - nfsserver_start () - { - local rc; -@@ -636,6 +745,13 @@ nfsserver_start () - is_redhat_based && set_env_args - bind_tree - prepare_directory -+ case $EXEC_MODE in [23]) -+ if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then -+ remove_unshare_uts_dropins -+ else -+ inject_unshare_uts_name_into_systemd_units -+ fi ;; -+ esac - - if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then - mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir -@@ -854,6 +970,8 @@ nfsserver_stop () - ocf_log info "NFS server stopped" - fi - -+ case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac -+ - return $rc - } - - -From 515697b53c1614d05d39491c9af83e8d8b844b17 Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Fri, 8 Oct 2021 12:01:41 +0200 -Subject: [PATCH 2/6] Fix NFSv4 lock failover: set NFS Server Scope, regardless - of EXEC_MODE - -Debian (and other systems) may provide "init scripts", -which will only redirect back to systemd. - -If we just unshare --uts the init script invocation, -the uts namespace is useless in that case. - -If systemd is running, mangle the nfs-server.service unit, -independent of the "EXEC_MODE". ---- - heartbeat/nfsserver | 18 ++++++++++++++---- - 1 file changed, 14 insertions(+), 4 deletions(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 0888378645..054aabbaf6 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -745,13 +745,20 @@ nfsserver_start () - is_redhat_based && set_env_args - bind_tree - prepare_directory -- case $EXEC_MODE in [23]) -+ -+ # Debian (and other systems) may provide "init scripts", -+ # which will only redirect back to systemd. -+ # If we just unshare --uts the init script invocation, -+ # the uts namespace is useless in that case. -+ # If systemd is running, mangle the nfs-server.service unit, -+ # independent of the "EXEC_MODE" we detected. -+ if $systemd_is_running ; then - if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then - remove_unshare_uts_dropins - else - inject_unshare_uts_name_into_systemd_units -- fi ;; -- esac -+ fi -+ fi - - if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then - mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir -@@ -970,7 +977,9 @@ nfsserver_stop () - ocf_log info "NFS server stopped" - fi - -- case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac -+ if $systemd_is_running; then -+ remove_unshare_uts_dropins -+ fi - - return $rc - } -@@ -1008,6 +1017,7 @@ nfsserver_validate () - } - - nfsserver_validate -+systemd_is_running && systemd_is_running=true || systemd_is_running=false - - case $__OCF_ACTION in - start) nfsserver_start - -From e83c20d88f404f9f9d829c654883d60eb6cc9ff3 Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Fri, 8 Oct 2021 17:06:18 +0200 -Subject: [PATCH 3/6] Fix NFSv4 lock failover: add missing "|cut -f1" in - remove_unshare_uts_dropins - ---- - heartbeat/nfsserver | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 054aabbaf6..d3db89a537 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -717,7 +717,7 @@ ___ - remove_unshare_uts_dropins () - { - local services -- services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE) -+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1) - - local svc dir dropin do_reload=false - for svc in $services ; do - -From b5b0e4a0b60d285af576b2d8ecfbe95e5a177a87 Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Fri, 8 Oct 2021 17:07:13 +0200 -Subject: [PATCH 4/6] Fix NFSv4 lock failover: get rid of "world-inaccessible" - warning - -by temporarily changing the umask before generating the dropins ---- - heartbeat/nfsserver | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index d3db89a537..447e0302b2 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -687,6 +687,8 @@ inject_unshare_uts_name_into_systemd_units () - services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1) - - local svc dir dropin edited_exec_start do_reload=false -+ local old_umask=$(umask) -+ umask 0022 - for svc in $services ; do - dir=/run/systemd/system/$svc.d - dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN -@@ -710,6 +712,7 @@ ___ - - mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}" - echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE" -+ umask $old_umask - - $do_reload && systemctl daemon-reload - } - -From 3c6c91ce5a00eeef9cd766389d73a0b42580a1e6 Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Fri, 8 Oct 2021 17:08:09 +0200 -Subject: [PATCH 5/6] Fix NFSv4 lock failover: deal with "special executable - prefix" chars in ExecStart - ---- - heartbeat/nfsserver | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 447e0302b2..5326bd2c6e 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -697,7 +697,7 @@ inject_unshare_uts_name_into_systemd_units () - test -d "$dir" || mkdir -p "$dir" - test -e "$dropin" && rm -f "$dropin" - -- edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p") -+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\([-+:!@]*\\)\\(.*\\)#ExecStart=\\1/usr/bin/unshare --uts /bin/sh -c 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\2#p") - cat > "$dropin" <<___ - [Service] - EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE - -From 512fbaf61e6d24a1236ef50e323ea17a62485c36 Mon Sep 17 00:00:00 2001 -From: Lars Ellenberg -Date: Fri, 8 Oct 2021 17:08:59 +0200 -Subject: [PATCH 6/6] Fix NFSv4 lock failover: add rpc-statd-notify to the - comment list of potentially interesting services - ---- - heartbeat/nfsserver | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 5326bd2c6e..240dd1a76c 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -12,7 +12,7 @@ - # did the unshare for gssd and idmapd as well, even though it seems unclear why. - # Let's start with just the nfs-server, and add others if/when we have clear - # indication they need it. --#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service" -+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpc-statd-notify.service rpcbind.service" - NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service" - SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope - SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf diff --git a/SOURCES/bz2015789-gcp-ilb-1-fix-log_enable.patch b/SOURCES/bz2015789-gcp-ilb-1-fix-log_enable.patch deleted file mode 100644 index b72f3e3..0000000 --- a/SOURCES/bz2015789-gcp-ilb-1-fix-log_enable.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 9a7b47f1838e9d6e3c807e9db5312097adb5c499 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 5 Nov 2021 10:30:49 +0100 -Subject: [PATCH] gcp-ilb/Squid: fix issues detected by CI - ---- - heartbeat/Squid.in | 2 +- - heartbeat/gcp-ilb | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb -index 28484b241..48dc3ac4e 100755 ---- a/heartbeat/gcp-ilb -+++ b/heartbeat/gcp-ilb -@@ -53,12 +53,12 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid" - - - #Validate command for logging --if $OCF_RESKEY_log_enable = "true"; then -+if [ $OCF_RESKEY_log_enable = "true" ]; then - if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then - logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params" - ocf_log debug "Logging command is: \'$logging_cmd\' " - else -- $OCF_RESKEY_log_enable = "false" -+ OCF_RESKEY_log_enable="false" - ocf_log err "\'$logging_cmd\' is invalid. External logging disabled." - - fi; diff --git a/SOURCES/bz2015789-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch b/SOURCES/bz2015789-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch deleted file mode 100644 index 8a7df42..0000000 --- a/SOURCES/bz2015789-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 14576f7ca02fb0abff188238ac019e88ab06e878 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 9 Nov 2021 11:49:36 +0100 -Subject: [PATCH] gcp-ilb: only check if log_cmd binary is available if - log_enable is true - ---- - heartbeat/gcp-ilb | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb -index 48dc3ac4e..f84f373b7 100755 ---- a/heartbeat/gcp-ilb -+++ b/heartbeat/gcp-ilb -@@ -37,7 +37,7 @@ if type "socat" > /dev/null 2>&1; then - OCF_RESKEY_cat_default="socat" - else - OCF_RESKEY_cat_default="nc" --fi; -+fi - - - : ${OCF_RESKEY_cat=${OCF_RESKEY_cat_default}} -@@ -53,7 +53,7 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid" - - - #Validate command for logging --if [ $OCF_RESKEY_log_enable = "true" ]; then -+if ocf_is_true "$OCF_RESKEY_log_enable"; then - if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then - logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params" - ocf_log debug "Logging command is: \'$logging_cmd\' " -@@ -61,7 +61,7 @@ if [ $OCF_RESKEY_log_enable = "true" ]; then - OCF_RESKEY_log_enable="false" - ocf_log err "\'$logging_cmd\' is invalid. External logging disabled." - -- fi; -+ fi - fi - - -@@ -285,7 +285,8 @@ ilb_stop() { - - ilb_validate() { - check_binary "$OCF_RESKEY_cat" -- check_binary "$OCF_RESKEY_log_cmd" -+ -+ ocf_is_true "$OCF_RESKEY_log_enable" && check_binary "$OCF_RESKEY_log_cmd" - - if ! ocf_is_decimal "$OCF_RESKEY_port"; then - ocf_exit_reason "$OCF_RESKEY_port is not a valid port" diff --git a/SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch b/SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch deleted file mode 100644 index 7b8108d..0000000 --- a/SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/heartbeat/gcp-ilb 2021-11-09 14:13:20.311243373 +0100 -+++ b/heartbeat/gcp-ilb 2021-11-09 14:13:50.269329165 +0100 -@@ -28,7 +28,7 @@ - OCF_RESKEY_cat_default="socat" - OCF_RESKEY_port_default="60000" - OCF_RESKEY_log_enable_default="false" --OCF_RESKEY_log_cmd_default="gcloud" -+OCF_RESKEY_log_cmd_default="gcloud-ra" - OCF_RESKEY_log_params_default="logging write GCPILB" - OCF_RESKEY_log_end_params_default="" - diff --git a/SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch b/SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch deleted file mode 100644 index 75ca836..0000000 --- a/SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 26 Aug 2021 12:27:50 +0200 -Subject: [PATCH] nfsnotify: fix default value for "notify_args" - ---- - heartbeat/nfsnotify.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in -index 851f6ad6b4..fe6d2793ba 100644 ---- a/heartbeat/nfsnotify.in -+++ b/heartbeat/nfsnotify.in -@@ -33,7 +33,7 @@ - # Parameter defaults - - OCF_RESKEY_source_host_default="" --OCF_RESKEY_notify_args_default="false" -+OCF_RESKEY_notify_args_default="" - - : ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}} - : ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}} diff --git a/SOURCES/bz2029706-1-db2-crm_attribute-use-forever.patch b/SOURCES/bz2029706-1-db2-crm_attribute-use-forever.patch deleted file mode 100644 index 17f26c8..0000000 --- a/SOURCES/bz2029706-1-db2-crm_attribute-use-forever.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 925180da2f41feddc5aac3c249563eb179b34029 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 22 Nov 2021 16:44:48 +0100 -Subject: [PATCH] db2: use -l forever instead of -t nodes -l reboot, as they - conflict with eachother - ---- - heartbeat/db2 | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/db2 b/heartbeat/db2 -index 03146a957..fa2a45a5d 100755 ---- a/heartbeat/db2 -+++ b/heartbeat/db2 -@@ -274,7 +274,7 @@ db2_fal_attrib() { - while read id node member - do - [ "$member" = member -a "$node" != "$me" ] || continue -- crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3" -+ crm_attribute -l forever --node=$node -n $attr -v "$3" - rc=$? - ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node" - [ $rc != 0 ] && break -@@ -282,7 +282,7 @@ db2_fal_attrib() { - ;; - - get) -- crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1 -+ crm_attribute -l forever -n $attr -G --quiet 2>&1 - rc=$? - if [ $rc != 0 ] - then diff --git a/SOURCES/bz2029706-2-db2-fixes.patch b/SOURCES/bz2029706-2-db2-fixes.patch deleted file mode 100644 index c9fe8c3..0000000 --- a/SOURCES/bz2029706-2-db2-fixes.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 75eaf06eea8957aa3941823955d1c8fa7933ab1d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 23 Feb 2022 16:32:21 +0100 -Subject: [PATCH] db2: only warn when notify isnt set, and use - ocf_local_nodename() to get node name - ---- - heartbeat/db2 | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/db2 b/heartbeat/db2 -index fa2a45a5d..ea24d33fc 100755 ---- a/heartbeat/db2 -+++ b/heartbeat/db2 -@@ -267,7 +267,7 @@ db2_fal_attrib() { - - case "$2" in - set) -- me=$(uname -n) -+ me=$(ocf_local_nodename) - - # loop over all member nodes and set attribute - crm_node -l | -@@ -284,7 +284,7 @@ db2_fal_attrib() { - get) - crm_attribute -l forever -n $attr -G --quiet 2>&1 - rc=$? -- if [ $rc != 0 ] -+ if ! ocf_is_true "$OCF_RESKEY_CRM_meta_notify" && [ $rc != 0 ] - then - ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?" - fi diff --git a/SOURCES/bz2039692-mysql-1-replication-fixes.patch b/SOURCES/bz2039692-mysql-1-replication-fixes.patch deleted file mode 100644 index e086e07..0000000 --- a/SOURCES/bz2039692-mysql-1-replication-fixes.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 706b48fd93a75a582c538013aea1418b6ed69dd0 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 9 Mar 2023 15:57:59 +0100 -Subject: [PATCH] mysql: promotable fixes to avoid nodes getting bounced around - by setting -v 1/-v 2, and added OCF_CHECK_LEVEL=10 for promotable resources - to be able to distinguish between promoted and not - ---- - heartbeat/mysql | 19 +++++++++++++------ - 1 file changed, 13 insertions(+), 6 deletions(-) - -diff --git a/heartbeat/mysql b/heartbeat/mysql -index 9ab49ab20e..29ed427319 100755 ---- a/heartbeat/mysql -+++ b/heartbeat/mysql -@@ -757,6 +757,10 @@ mysql_monitor() { - status_loglevel="info" - fi - -+ if ocf_is_ms; then -+ OCF_CHECK_LEVEL=10 -+ fi -+ - mysql_common_status $status_loglevel - rc=$? - -@@ -777,7 +781,13 @@ mysql_monitor() { - return $rc - fi - -- if [ $OCF_CHECK_LEVEL -gt 0 -a -n "$OCF_RESKEY_test_table" ]; then -+ if [ $OCF_CHECK_LEVEL -eq 10 ]; then -+ if [ -z "$OCF_RESKEY_test_table" ]; then -+ ocf_exit_reason "test_table not set" -+ return $OCF_ERR_CONFIGURED -+ -+ fi -+ - # Check if this instance is configured as a slave, and if so - # check slave status - if is_slave; then -@@ -795,18 +805,16 @@ mysql_monitor() { - ocf_exit_reason "Failed to select from $test_table"; - return $OCF_ERR_GENERIC; - fi -- else -- # In case no exnteded tests are enabled and we are in master/slave mode _always_ set the master score to 1 if we reached this point -- ocf_is_ms && $CRM_MASTER -v 1 - fi - - if ocf_is_ms && ! get_read_only; then - ocf_log debug "MySQL monitor succeeded (master)"; - # Always set master score for the master -- $CRM_MASTER -v 2 -+ $CRM_MASTER -v $((${OCF_RESKEY_max_slave_lag}+1)) - return $OCF_RUNNING_MASTER - else - ocf_log debug "MySQL monitor succeeded"; -+ ocf_is_ms && $CRM_MASTER -v 1 - return $OCF_SUCCESS - fi - } -@@ -873,7 +881,6 @@ mysql_start() { - # preference set by the administrator. We choose a low - # greater-than-zero preference. - $CRM_MASTER -v 1 -- - fi - - # Initial monitor action diff --git a/SOURCES/bz2039692-mysql-2-fix-demoted-score-bounce.patch b/SOURCES/bz2039692-mysql-2-fix-demoted-score-bounce.patch deleted file mode 100644 index 30815b4..0000000 --- a/SOURCES/bz2039692-mysql-2-fix-demoted-score-bounce.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 34483f8029ea9ab25220cfee71d53adaf5aacaa0 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 14 Jun 2023 14:37:01 +0200 -Subject: [PATCH] mysql: fix promotion_score bouncing between ~3600 and 1 on - demoted nodes - ---- - heartbeat/mysql | 11 ----------- - 1 file changed, 11 deletions(-) - -diff --git a/heartbeat/mysql b/heartbeat/mysql -index 29ed42731..1df2fc0f2 100755 ---- a/heartbeat/mysql -+++ b/heartbeat/mysql -@@ -517,17 +517,6 @@ check_slave() { - - exit $OCF_ERR_INSTALLED - fi -- elif ocf_is_ms; then -- # Even if we're not set to evict lagging slaves, we can -- # still use the seconds behind master value to set our -- # master preference. -- local master_pref -- master_pref=$((${OCF_RESKEY_max_slave_lag}-${secs_behind})) -- if [ $master_pref -lt 0 ]; then -- # Sanitize a below-zero preference to just zero -- master_pref=0 -- fi -- $CRM_MASTER -v $master_pref - fi - - # is the slave ok to have a VIP on it diff --git a/SOURCES/bz2040110-IPaddr2-IPsrcaddr-1-support-policy-based-routing.patch b/SOURCES/bz2040110-IPaddr2-IPsrcaddr-1-support-policy-based-routing.patch deleted file mode 100644 index 0607ddb..0000000 --- a/SOURCES/bz2040110-IPaddr2-IPsrcaddr-1-support-policy-based-routing.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 4d87bcfe5df8a1e40ee945e095ac9e7cca147ec4 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 29 Jun 2022 10:26:25 +0200 -Subject: [PATCH] IPaddr2/IPsrcaddr: add/modify table parameter to be able to - find interface while using policy based routing - ---- - heartbeat/IPaddr2 | 12 ++++++++++++ - heartbeat/IPsrcaddr | 5 ++++- - heartbeat/findif.sh | 2 +- - 3 files changed, 17 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2 -index 97a7431a2..e8384c586 100755 ---- a/heartbeat/IPaddr2 -+++ b/heartbeat/IPaddr2 -@@ -73,6 +73,7 @@ OCF_RESKEY_ip_default="" - OCF_RESKEY_cidr_netmask_default="" - OCF_RESKEY_broadcast_default="" - OCF_RESKEY_iflabel_default="" -+OCF_RESKEY_table_default="" - OCF_RESKEY_cidr_netmask_default="" - OCF_RESKEY_lvs_support_default=false - OCF_RESKEY_lvs_ipv6_addrlabel_default=false -@@ -97,6 +98,7 @@ OCF_RESKEY_network_namespace_default="" - : ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}} - : ${OCF_RESKEY_broadcast=${OCF_RESKEY_broadcast_default}} - : ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} -+: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}} - : ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}} - : ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}} - : ${OCF_RESKEY_lvs_ipv6_addrlabel_value=${OCF_RESKEY_lvs_ipv6_addrlabel_value_default}} -@@ -239,6 +241,16 @@ If a label is specified in nic name, this parameter has no effect. - - - -+ -+ -+Table to use to lookup which interface to use for the IP. -+ -+This can be used for policy based routing. See man ip-rule(8). -+ -+Table -+ -+ -+ - - - Enable support for LVS Direct Routing configurations. In case a IP -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 1bd41a930..cf106cc34 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -155,13 +155,16 @@ Metric. Only needed if incorrect metric value is used. - - - --Table to modify. E.g. "local". -+Table to modify and use for interface lookup. E.g. "local". - - The table has to have a route matching the "destination" parameter. -+ -+This can be used for policy based routing. See man ip-rule(8). - - Table - - -+ - - - -diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh -index 66bc6d56a..1a40cc9a4 100644 ---- a/heartbeat/findif.sh -+++ b/heartbeat/findif.sh -@@ -32,7 +32,7 @@ prefixcheck() { - getnetworkinfo() - { - local line netinfo -- ip -o -f inet route list match $OCF_RESKEY_ip table local scope host | (while read line; -+ ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table=local}" scope host | (while read line; - do - netinfo=`echo $line | awk '{print $2}'` - case $netinfo in diff --git a/SOURCES/bz2040110-IPaddr2-IPsrcaddr-2-fix-table-parameter.patch b/SOURCES/bz2040110-IPaddr2-IPsrcaddr-2-fix-table-parameter.patch deleted file mode 100644 index 9cfc808..0000000 --- a/SOURCES/bz2040110-IPaddr2-IPsrcaddr-2-fix-table-parameter.patch +++ /dev/null @@ -1,35 +0,0 @@ -From da9e8e691f39494e14f8f11173b6ab6433384396 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 20 Jun 2023 14:19:23 +0200 -Subject: [PATCH] findif.sh: fix table parameter so it uses main table by - default - ---- - heartbeat/findif.sh | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh -index 1a40cc9a4b..6c04c98c19 100644 ---- a/heartbeat/findif.sh -+++ b/heartbeat/findif.sh -@@ -32,7 +32,7 @@ prefixcheck() { - getnetworkinfo() - { - local line netinfo -- ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table=local}" scope host | (while read line; -+ ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table:=main}" scope host | (while read line; - do - netinfo=`echo $line | awk '{print $2}'` - case $netinfo in -@@ -215,9 +215,9 @@ findif() - fi - if [ -n "$nic" ] ; then - # NIC supports more than two. -- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') -+ set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') - else -- set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') -+ set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}') - fi - if [ $# = 0 ] ; then - case $OCF_RESKEY_ip in diff --git a/SOURCES/bz2049319-Filesystem-add-support-for-Amazon-EFS.patch b/SOURCES/bz2049319-Filesystem-add-support-for-Amazon-EFS.patch deleted file mode 100644 index 05e7bf1..0000000 --- a/SOURCES/bz2049319-Filesystem-add-support-for-Amazon-EFS.patch +++ /dev/null @@ -1,175 +0,0 @@ -From cab190c737fdf58268aa5c009f6089b754862b22 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 1 Feb 2022 16:32:50 -0800 -Subject: [PATCH 1/3] Filesystem: Fix OpenBSD check in fstype_supported() - -fstype_supported() is supposed to skip the /proc/filesystems check if -the OS is OpenBSD. Instead, it skips the check if the OS is **not** -OpenBSD. That means the function has been a no-op for all other distros. - -Signed-off-by: Reid Wahl ---- - heartbeat/Filesystem | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 010c1dcfc..8b4792152 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -440,7 +440,7 @@ fstype_supported() - local support="$FSTYPE" - local rc - -- if [ "X${HOSTOS}" != "XOpenBSD" ];then -+ if [ "X${HOSTOS}" = "XOpenBSD" ];then - # skip checking /proc/filesystems for obsd - return $OCF_SUCCESS - fi - -From 5d38b87daa9cfffa89a193df131d6ebd87cd05aa Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 1 Feb 2022 18:26:32 -0800 -Subject: [PATCH 2/3] Filesystem: Improve fstype_supported logs for fuse - -Make it more clear when we have to use a different name to check for -support of a particular filesystem. Currently only used for fuse-type -filesystems. - -Signed-off-by: Reid Wahl ---- - heartbeat/Filesystem | 13 ++++++++++--- - 1 file changed, 10 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 8b4792152..4d84846c1 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -455,6 +455,10 @@ fstype_supported() - fuse.*|glusterfs|rozofs) support="fuse";; - esac - -+ if [ "$support" != "$FSTYPE" ]; then -+ ocf_log info "Checking support for $FSTYPE as \"$support\"" -+ fi -+ - grep -w "$support"'$' /proc/filesystems >/dev/null - if [ $? -eq 0 ]; then - # found the fs type -@@ -465,7 +469,7 @@ fstype_supported() - # check the if the filesystem support exists again. - $MODPROBE $support >/dev/null - if [ $? -ne 0 ]; then -- ocf_exit_reason "Couldn't find filesystem $FSTYPE in /proc/filesystems and failed to load kernel module" -+ ocf_exit_reason "Couldn't find filesystem $support in /proc/filesystems and failed to load kernel module" - return $OCF_ERR_INSTALLED - fi - -@@ -478,11 +482,11 @@ fstype_supported() - # yes. found the filesystem after doing the modprobe - return $OCF_SUCCESS - fi -- ocf_log debug "Unable to find support for $FSTYPE in /proc/filesystems after modprobe, trying again" -+ ocf_log debug "Unable to find support for $support in /proc/filesystems after modprobe, trying again" - sleep 1 - done - -- ocf_exit_reason "Couldn't find filesystem $FSTYPE in /proc/filesystems" -+ ocf_exit_reason "Couldn't find filesystem $support in /proc/filesystems" - return $OCF_ERR_INSTALLED - } - -@@ -837,6 +841,9 @@ Filesystem_monitor() - # VALIDATE_ALL: Are the instance parameters valid? - # FIXME!! The only part that's useful is the return code. - # This code always returns $OCF_SUCCESS (!) -+# FIXME!! Needs some tuning to match fstype_supported() (e.g., for -+# fuse). Can we just call fstype_supported() with a flag like -+# "no_modprobe" instead? - # - Filesystem_validate_all() - { - -From e2174244067b02d798e0f12437f0f499c80f91fe Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 1 Feb 2022 18:55:47 -0800 -Subject: [PATCH 3/3] Filesystem: Add support for Amazon EFS mount helper - -mount.efs, the mount helper for Amazon Elastic File System (EFS) -provided by amazon-efs-utils [1], is a wrapper for mount.nfs4. It offers -a number of AWS-specific mount options and some security improvements -like encryption of data in transit. - -This commit adds support by treating an fstype=efs like fstype=nfs4 for -the most part. - -Resolves: RHBZ#2049319 - -[1] https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html - -Signed-off-by: Reid Wahl ---- - heartbeat/Filesystem | 14 ++++++++------ - 1 file changed, 8 insertions(+), 6 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 4d84846c1..1a90d6a42 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -341,7 +341,7 @@ determine_blockdevice() { - # Get the current real device name, if possible. - # (specified devname could be -L or -U...) - case "$FSTYPE" in -- nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none|lustre) -+ nfs4|nfs|efs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none|lustre) - : ;; - *) - match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}" -@@ -423,7 +423,7 @@ is_fsck_needed() { - no) false;; - ""|auto) - case "$FSTYPE" in -- ext4|ext4dev|ext3|reiserfs|reiser4|nss|xfs|jfs|vfat|fat|nfs4|nfs|cifs|smbfs|ocfs2|gfs2|none|lustre|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs) -+ ext4|ext4dev|ext3|reiserfs|reiser4|nss|xfs|jfs|vfat|fat|nfs4|nfs|efs|cifs|smbfs|ocfs2|gfs2|none|lustre|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs) - false;; - *) - true;; -@@ -450,9 +450,11 @@ fstype_supported() - return $OCF_SUCCESS - fi - -- # support fuse-filesystems (e.g. GlusterFS) -+ # support fuse-filesystems (e.g. GlusterFS) and Amazon Elastic File -+ # System (EFS) - case "$FSTYPE" in - fuse.*|glusterfs|rozofs) support="fuse";; -+ efs) support="nfs4";; - esac - - if [ "$support" != "$FSTYPE" ]; then -@@ -701,7 +703,7 @@ Filesystem_stop() - - # For networked filesystems, there's merit in trying -f: - case "$FSTYPE" in -- nfs4|nfs|cifs|smbfs) umount_force="-f" ;; -+ nfs4|nfs|efs|cifs|smbfs) umount_force="-f" ;; - esac - - # Umount all sub-filesystems mounted under $MOUNTPOINT/ too. -@@ -892,7 +894,7 @@ set_blockdevice_var() { - - # these are definitely not block devices - case "$FSTYPE" in -- nfs4|nfs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|lustre) return;; -+ nfs4|nfs|efs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|lustre) return;; - esac - - if $(is_option "loop"); then -@@ -1013,7 +1015,7 @@ is_option "ro" && - CLUSTERSAFE=2 - - case "$FSTYPE" in --nfs4|nfs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs|lustre) -+nfs4|nfs|efs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs|lustre) - CLUSTERSAFE=1 # this is kind of safe too - ;; - # add here CLUSTERSAFE=0 for all filesystems which are not diff --git a/SOURCES/bz2049414-Filesystem-1-fix-uuid-label-device-whitespace.patch b/SOURCES/bz2049414-Filesystem-1-fix-uuid-label-device-whitespace.patch deleted file mode 100644 index 09960f0..0000000 --- a/SOURCES/bz2049414-Filesystem-1-fix-uuid-label-device-whitespace.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 26de0ad2f0f975166fe79ef72ab08e2c03519eea Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 28 Mar 2022 13:25:35 +0200 -Subject: [PATCH] Filesystem: fix logic for UUID/label devices with space - between parameter and UUID/label - ---- - heartbeat/Filesystem | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 1a90d6a42..72a1b8623 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -596,11 +596,11 @@ Filesystem_start() - flushbufs "$DEVICE" - # Mount the filesystem. - case "$FSTYPE" in -- none) $MOUNT $options "$DEVICE" "$MOUNTPOINT" && -+ none) $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" && - bind_mount - ;; -- "") $MOUNT $options "$DEVICE" "$MOUNTPOINT" ;; -- *) $MOUNT -t "$FSTYPE" $options "$DEVICE" "$MOUNTPOINT" ;; -+ "") $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" ;; -+ *) $MOUNT -t "$FSTYPE" $options $device_opt "$DEVICE" "$MOUNTPOINT" ;; - esac - - if [ $? -ne 0 ]; then -@@ -902,7 +902,13 @@ set_blockdevice_var() { - fi - - case "$DEVICE" in -- -*) # Oh... An option to mount instead... Typically -U or -L -+ --*) # Typically --uuid or --label -+ device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//") -+ DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//") -+ ;; -+ -*) # Oh... An option to mount instead... Typically -U or -L -+ device_opt=$(echo $DEVICE | cut -c1-2) -+ DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//") - ;; - /dev/null) # Special case for BSC - blockdevice=yes diff --git a/SOURCES/bz2049414-Filesystem-2-improve-uuid-label-device-logic.patch b/SOURCES/bz2049414-Filesystem-2-improve-uuid-label-device-logic.patch deleted file mode 100644 index 844772a..0000000 --- a/SOURCES/bz2049414-Filesystem-2-improve-uuid-label-device-logic.patch +++ /dev/null @@ -1,38 +0,0 @@ -From d9b46474fc19d9c57e2cfb752d60319017da8410 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 6 Apr 2022 14:14:19 +0200 -Subject: [PATCH] Filesystem: improve logic for UUID/label and add note that - /dev/disk/by-{uuid,label}/ are preferred on Linux - ---- - heartbeat/Filesystem | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 72a1b8623..44270ad98 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -163,6 +163,8 @@ directory where the status file is to be placed. - - - The name of block device for the filesystem, or -U, -L options for mount, or NFS mount specification. -+ -+NOTE: On Linux /dev/disk/by-{uuid,label}/ are preferred to -U/-L. - - block device - -@@ -902,11 +904,11 @@ set_blockdevice_var() { - fi - - case "$DEVICE" in -- --*) # Typically --uuid or --label -- device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//") -+ --uuid=*|--uuid\ *|--label=*|--label\ *) -+ device_opt=$(echo $DEVICE | sed "s/\([[:blank:]]\|=\).*//") - DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//") - ;; -- -*) # Oh... An option to mount instead... Typically -U or -L -+ -U*|-L*) # short versions of --uuid/--label - device_opt=$(echo $DEVICE | cut -c1-2) - DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//") - ;; diff --git a/SOURCES/bz2064342-1-IPsrcaddr-dhcp-warning.patch b/SOURCES/bz2064342-1-IPsrcaddr-dhcp-warning.patch deleted file mode 100644 index 34bad14..0000000 --- a/SOURCES/bz2064342-1-IPsrcaddr-dhcp-warning.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 6d2ed7615614ede093f097189876d0f08553a43e Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Mon, 14 Feb 2022 22:23:39 -0800 -Subject: [PATCH] IPsrcaddr: Add warning about DHCP - -If DHCP is enabled for the interface that serves OCF_RESKEY_ipaddress, -then NetworkManager (and possibly dhclient in systems without NM; -unsure) may later re-add a route that the IPsrcaddr resource replaced. -This may cause the resource to fail or cause other unexpected behavior. - -So far this has been observed with a default route, albeit with an edge -case of a configuration (OCF_RESKEY_ipaddress on a different subnet) -that may not be totally valid. There are likely to be other situations -as well where DHCP can cause conflicts with IPsrcaddr's manual updates -via iproute. The safest option is to use only static configuration for -the involved interface. - -Resolves: RHBZ#1654862 - -Signed-off-by: Reid Wahl ---- - heartbeat/IPsrcaddr | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index ec868409f..fd7b6f68d 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -99,6 +99,12 @@ meta_data() { - - Resource script for IPsrcaddr. It manages the preferred source address - modification. -+ -+Note: DHCP should not be enabled for the interface serving the preferred -+source address. Enabling DHCP may result in unexpected behavior, such as -+the automatic addition of duplicate or conflicting routes. This may -+cause the IPsrcaddr resource to fail, or it may produce undesired -+behavior while the resource continues to run. - - Manages the preferred source address for outgoing IP packets - diff --git a/SOURCES/bz2064342-2-IPsrcaddr-error-message-route-not-found.patch b/SOURCES/bz2064342-2-IPsrcaddr-error-message-route-not-found.patch deleted file mode 100644 index 8a4a6fc..0000000 --- a/SOURCES/bz2064342-2-IPsrcaddr-error-message-route-not-found.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 5a65f66ff803ad7ed15af958cc1efdde4d53dcb7 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Thu, 17 Feb 2022 03:53:21 -0800 -Subject: [PATCH] IPsrcaddr: Better error message when no matching route found - -If OCF_RESKEY_destination is not explicitly set and `ip route list` -can't find a route matching the specifications, the NETWORK variable -doesn't get set. This causes a certain failure of the start operation, -because there is no PREFIX argument to `ip route replace` (syntax -error). It may also cause unexpected behavior for stop operations (but -not in all cases). During a monitor, this event can only happen if -something has changed outside the cluster's control, and so is cause -for warning there. - -Exit OCF_ERR_ARGS for start, log debug for probe, log warning for all -other ops. - -Resolves: RHBZ#1654862 - -Signed-off-by: Reid Wahl ---- - heartbeat/IPsrcaddr | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index fd7b6f68d..f0216722d 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -549,6 +549,20 @@ rc=$? - INTERFACE=`echo $findif_out | awk '{print $1}'` - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then - NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` -+ -+ if [ -z "$NETWORK" ]; then -+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" -+ err_str="$err_str match $ipaddress' failed to find a matching route" -+ -+ if [ "$__OCF_ACTION" = "start" ]; then -+ ocf_exit_reason "$err_str" -+ exit $OCF_ERR_ARGS -+ elif ! ocf_is_probe; then -+ ocf_log warn "$err_str" -+ else -+ ocf_log debug "$err_str" -+ fi -+ fi - else - NETWORK="$OCF_RESKEY_destination" - fi diff --git a/SOURCES/bz2064342-3-IPsrcaddr-fix-indentation.patch b/SOURCES/bz2064342-3-IPsrcaddr-fix-indentation.patch deleted file mode 100644 index 337943d..0000000 --- a/SOURCES/bz2064342-3-IPsrcaddr-fix-indentation.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 0a197f1cd227e768837dff778a0c56fc1085d434 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 21 Feb 2022 13:54:04 +0100 -Subject: [PATCH] IPsrcaddr: fix indentation in better error message code - ---- - heartbeat/IPsrcaddr | 30 +++++++++++++++--------------- - 1 file changed, 15 insertions(+), 15 deletions(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index f0216722d..c82adc0e9 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -542,27 +542,27 @@ fi - findif_out=`$FINDIF -C` - rc=$? - [ $rc -ne 0 ] && { -- ocf_exit_reason "[$FINDIF -C] failed" -- exit $rc -+ ocf_exit_reason "[$FINDIF -C] failed" -+ exit $rc - } - - INTERFACE=`echo $findif_out | awk '{print $1}'` - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then - NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` - -- if [ -z "$NETWORK" ]; then -- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" -- err_str="$err_str match $ipaddress' failed to find a matching route" -- -- if [ "$__OCF_ACTION" = "start" ]; then -- ocf_exit_reason "$err_str" -- exit $OCF_ERR_ARGS -- elif ! ocf_is_probe; then -- ocf_log warn "$err_str" -- else -- ocf_log debug "$err_str" -- fi -- fi -+ if [ -z "$NETWORK" ]; then -+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" -+ err_str="$err_str match $ipaddress' failed to find a matching route" -+ -+ if [ "$__OCF_ACTION" = "start" ]; then -+ ocf_exit_reason "$err_str" -+ exit $OCF_ERR_ARGS -+ elif ! ocf_is_probe; then -+ ocf_log warn "$err_str" -+ else -+ ocf_log debug "$err_str" -+ fi -+ fi - else - NETWORK="$OCF_RESKEY_destination" - fi diff --git a/SOURCES/bz2064342-4-IPsrcaddr-fixes.patch b/SOURCES/bz2064342-4-IPsrcaddr-fixes.patch deleted file mode 100644 index c099fa5..0000000 --- a/SOURCES/bz2064342-4-IPsrcaddr-fixes.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 50a596bfb977b18902dc62b99145bbd1a087690a Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 1 Mar 2022 11:06:07 +0100 -Subject: [PATCH] IPsrcaddr: fixes - -- use findif.sh to detect secondary interfaces -- get metric and proto to update the correct route/update it correctly -- match route using interface to fail when trying to update secondary - interfaces without specifying destination (would update default route - before) -- also use PRIMARY_IP/OPTS during stop-action for default routes (to get - back to the exact routes we started with) -- dont fail during stop-action if route doesnt exist -- use [[:blank:]] for WS to follow POSIX standard (suggested by nrwahl) ---- - heartbeat/IPsrcaddr | 35 +++++++++++++++++++---------------- - 1 file changed, 19 insertions(+), 16 deletions(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index c82adc0e9..7dbf65ff5 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -52,6 +52,7 @@ - # Initialization: - : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} - . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs -+. ${OCF_FUNCTIONS_DIR}/findif.sh - - # Defaults - OCF_RESKEY_ipaddress_default="" -@@ -181,19 +182,21 @@ errorexit() { - # - # where the src clause "src Y.Y.Y.Y" may or may not be present - --WS="[`echo -en ' \t'`]" -+WS="[[:blank:]]" - OCTET="[0-9]\{1,3\}" - IPADDR="\($OCTET\.\)\{3\}$OCTET" - SRCCLAUSE="src$WS$WS*\($IPADDR\)" - MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)" --FINDIF=$HA_BIN/findif -+METRICCLAUSE=".*\(metric$WS[^ ]\+\)" -+PROTOCLAUSE=".*\(proto$WS[^ ]\+\)" -+FINDIF=findif - - # findif needs that to be set - export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress - - srca_read() { - # Capture matching route - doublequotes prevent word splitting... -- ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" -+ ROUTE="`$CMDSHOW dev $INTERFACE 2> /dev/null`" || errorexit "command '$CMDSHOW' failed" - - # ... so we can make sure there is only 1 matching route - [ 1 -eq `echo "$ROUTE" | wc -l` ] || \ -@@ -201,7 +204,7 @@ srca_read() { - - # But there might still be no matching route - [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \ -- ! ocf_is_probe && errorexit "no matching route exists" -+ ! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists" - - # Sed out the source ip address if it exists - SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"` -@@ -232,8 +235,8 @@ srca_start() { - rc=$OCF_SUCCESS - ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)" - else -- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \ -- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed" -+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \ -+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed" - - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then - $CMDCHANGE $ROUTE_WO_SRC src $1 || \ -@@ -266,14 +269,11 @@ srca_stop() { - - [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" - -- OPTS="" -- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then -- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" -- OPTS="proto kernel scope host src $PRIMARY_IP" -- fi -+ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" -+ OPTS="proto kernel scope link src $PRIMARY_IP" - -- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \ -- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed" -+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \ -+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed" - - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then - $CMDCHANGE $ROUTE_WO_SRC || \ -@@ -539,16 +539,19 @@ if [ $rc -ne $OCF_SUCCESS ]; then - esac - fi - --findif_out=`$FINDIF -C` -+findif_out=`$FINDIF` - rc=$? - [ $rc -ne 0 ] && { -- ocf_exit_reason "[$FINDIF -C] failed" -+ ocf_exit_reason "[$FINDIF] failed" - exit $rc - } - - INTERFACE=`echo $findif_out | awk '{print $1}'` -+LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress` -+METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"` -+[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"` - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then -- NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'` -+ NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'` - - if [ -z "$NETWORK" ]; then - err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO" diff --git a/SOURCES/bz2072043-LVM-activate-fix-fence-issue.patch b/SOURCES/bz2072043-LVM-activate-fix-fence-issue.patch deleted file mode 100644 index 03727c8..0000000 --- a/SOURCES/bz2072043-LVM-activate-fix-fence-issue.patch +++ /dev/null @@ -1,102 +0,0 @@ -From e651576c1b5c1ffbe0fd1b78f209be9a3f9764e7 Mon Sep 17 00:00:00 2001 -From: XingWei-Liu -Date: Thu, 10 Mar 2022 10:38:11 +0800 -Subject: [PATCH 1/4] change lvm_status return value from ocf_not_running to - ocf_err_generic - ---- - heartbeat/LVM-activate | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index aed672ea3..0aef76706 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -790,7 +790,7 @@ lvm_status() { - fi - - if [ $dm_count -eq 0 ]; then -- return $OCF_NOT_RUNNING -+ return $OCF_ERR_GENERIC - fi - - case "$OCF_CHECK_LEVEL" in - -From 540ae56436a4f9547bb17aa206fe0e8c7a7fea87 Mon Sep 17 00:00:00 2001 -From: XingWei-Liu -Date: Thu, 10 Mar 2022 16:44:25 +0800 -Subject: [PATCH 2/4] add if ocf_is_probe in monitor func - ---- - heartbeat/LVM-activate | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index 0aef76706..c86606637 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -790,7 +790,11 @@ lvm_status() { - fi - - if [ $dm_count -eq 0 ]; then -- return $OCF_ERR_GENERIC -+ if ocf_is_probe ;then -+ return $OCF_NOT_RUNNING -+ else -+ return $OCF_ERR_GENERIC -+ fi - fi - - case "$OCF_CHECK_LEVEL" in - -From ae3f35d4f671f3288034a257c6dd8eff9a83447a Mon Sep 17 00:00:00 2001 -From: XingWei-Liu -Date: Thu, 10 Mar 2022 16:50:04 +0800 -Subject: [PATCH 3/4] add if ocf_is_probe in monitor func - ---- - heartbeat/LVM-activate | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index c86606637..f345f73a9 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -791,9 +791,9 @@ lvm_status() { - - if [ $dm_count -eq 0 ]; then - if ocf_is_probe ;then -- return $OCF_NOT_RUNNING -- else - return $OCF_ERR_GENERIC -+ else -+ return $OCF_NOT_RUNNING - fi - fi - - -From 1072c0490ef936a1a7dfd8411da434dce1569457 Mon Sep 17 00:00:00 2001 -From: XingWei-Liu -Date: Thu, 10 Mar 2022 18:10:21 +0800 -Subject: [PATCH 4/4] reverse return value in monitor func - ---- - heartbeat/LVM-activate | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate -index f345f73a9..c86606637 100755 ---- a/heartbeat/LVM-activate -+++ b/heartbeat/LVM-activate -@@ -791,9 +791,9 @@ lvm_status() { - - if [ $dm_count -eq 0 ]; then - if ocf_is_probe ;then -- return $OCF_ERR_GENERIC -- else - return $OCF_NOT_RUNNING -+ else -+ return $OCF_ERR_GENERIC - fi - fi - diff --git a/SOURCES/bz2086889-lvmlockd-fail-when-use_lvmlockd-not-set.patch b/SOURCES/bz2086889-lvmlockd-fail-when-use_lvmlockd-not-set.patch deleted file mode 100644 index 8400437..0000000 --- a/SOURCES/bz2086889-lvmlockd-fail-when-use_lvmlockd-not-set.patch +++ /dev/null @@ -1,25 +0,0 @@ -From b3885f7d95fe390371f806c7f3debb3ec8ad012d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 7 Jun 2022 15:20:11 +0200 -Subject: [PATCH] lvmlockd: fail when use_lvmlockd has not been set - ---- - heartbeat/lvmlockd | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd -index 05bb0a2e5..dc7bd2d7e 100755 ---- a/heartbeat/lvmlockd -+++ b/heartbeat/lvmlockd -@@ -179,6 +179,11 @@ setup_lvm_config() - out=$(lvmconfig 'global/locking_type' 2> /dev/null) - lock_type=$(echo "$out" | cut -d'=' -f2) - -+ if [ -z "$use_lvmlockd" ]; then -+ ocf_exit_reason "\"use_lvmlockd\" not set in /etc/lvm/lvm.conf ..." -+ exit $OCF_ERR_CONFIGURED -+ fi -+ - if [ -n "$use_lvmlockd" ] && [ "$use_lvmlockd" != 1 ] ; then - ocf_log info "setting \"use_lvmlockd=1\" in /etc/lvm/lvm.conf ..." - sed -i 's,^[[:blank:]]*use_lvmlockd[[:blank:]]*=.*,\ \ \ \ use_lvmlockd = 1,g' /etc/lvm/lvm.conf diff --git a/SOURCES/bz2090370-CTDB-move-process-to-root-cgroup-if-rt-enabled.patch b/SOURCES/bz2090370-CTDB-move-process-to-root-cgroup-if-rt-enabled.patch deleted file mode 100644 index 7f0c7ef..0000000 --- a/SOURCES/bz2090370-CTDB-move-process-to-root-cgroup-if-rt-enabled.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 99c4f2af92a10155cf072198c72deffaed3883a5 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 3 Aug 2022 17:20:31 +0200 -Subject: [PATCH] CTDB: move process to root cgroup if realtime scheduling is - enabled - ---- - heartbeat/CTDB.in | 2 ++ - heartbeat/ocf-shellfuncs.in | 12 ++++++++++++ - 2 files changed, 14 insertions(+) - -diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in -index d25d026ca..46f56cfac 100755 ---- a/heartbeat/CTDB.in -+++ b/heartbeat/CTDB.in -@@ -709,6 +709,8 @@ EOF - invoke_ctdbd() { - local vers="$1" - -+ ocf_move_to_root_cgroup_if_rt_enabled -+ - ocf_version_cmp "$vers" "4.9.0" - if [ "$?" -ne "0" ]; then - # With 4.9+, all ctdbd binary parameters are provided as -diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in -index 6be4e4e30..2c53a967a 100644 ---- a/heartbeat/ocf-shellfuncs.in -+++ b/heartbeat/ocf-shellfuncs.in -@@ -672,6 +672,18 @@ EOF - systemctl daemon-reload - } - -+# move process to root cgroup if realtime scheduling is enabled -+ocf_move_to_root_cgroup_if_rt_enabled() -+{ -+ if [ -e "/sys/fs/cgroup/cpu/cpu.rt_runtime_us" ]; then -+ echo $$ >> /sys/fs/cgroup/cpu/tasks -+ -+ if [ "$?" -ne "0" ]; then -+ ocf_log warn "Unable to move PID $$ to the root cgroup" -+ fi -+ fi -+} -+ - # usage: crm_mon_no_validation args... - # run crm_mon without any cib schema validation - # This is useful when an agent runs in a bundle to avoid potential diff --git a/SOURCES/bz2093214-aws-vpc-move-ip-add-interface-label-support.patch b/SOURCES/bz2093214-aws-vpc-move-ip-add-interface-label-support.patch deleted file mode 100644 index d1a611c..0000000 --- a/SOURCES/bz2093214-aws-vpc-move-ip-add-interface-label-support.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 4420ef84f3172c67fc7b8b6ae41ea173de017bf4 Mon Sep 17 00:00:00 2001 -From: Petr Pavlu -Date: Wed, 25 May 2022 15:12:33 +0200 -Subject: [PATCH] aws-vpc-move-ip: Allow to set the interface label - -Add a parameter to specify an interface label to distinguish the IP -address managed by aws-vpc-move-ip, similarly as can be done with -IPaddr2. This allows to easily recognize the address from other -addresses assigned to a given interface. ---- - heartbeat/aws-vpc-move-ip | 30 +++++++++++++++++++++++++++++- - 1 file changed, 29 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip -index 5d5204080..dee040300 100755 ---- a/heartbeat/aws-vpc-move-ip -+++ b/heartbeat/aws-vpc-move-ip -@@ -43,6 +43,7 @@ OCF_RESKEY_address_default="" - OCF_RESKEY_routing_table_default="" - OCF_RESKEY_routing_table_role_default="" - OCF_RESKEY_interface_default="eth0" -+OCF_RESKEY_iflabel_default="" - OCF_RESKEY_monapi_default="false" - OCF_RESKEY_lookup_type_default="InstanceId" - -@@ -54,6 +55,7 @@ OCF_RESKEY_lookup_type_default="InstanceId" - : ${OCF_RESKEY_routing_table=${OCF_RESKEY_routing_table_default}} - : ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}} - : ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}} -+: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}} - : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} - : ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} - -@@ -149,6 +151,18 @@ Name of the network interface, i.e. eth0 - - - -+ -+ -+You can specify an additional label for your IP address here. -+This label is appended to your interface name. -+ -+The kernel allows alphanumeric labels up to a maximum length of 15 -+characters including the interface name and colon (e.g. eth0:foobar1234) -+ -+Interface label -+ -+ -+ - - - Enable enhanced monitoring using AWS API calls to check route table entry -@@ -215,6 +229,14 @@ ec2ip_validate() { - return $OCF_ERR_CONFIGURED - fi - -+ if [ -n "$OCF_RESKEY_iflabel" ]; then -+ label=${OCF_RESKEY_interface}:${OFC_RESKEY_iflabel} -+ if [ ${#label} -gt 15 ]; then -+ ocf_exit_reason "Interface label [$label] exceeds maximum character limit of 15" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ fi -+ - TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") - -@@ -363,7 +385,13 @@ ec2ip_get_and_configure() { - - # Reconfigure the local ip address - ec2ip_drop -- cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface" -+ -+ extra_opts="" -+ if [ -n "$OCF_RESKEY_iflabel" ]; then -+ extra_opts="$extra_opts label $OCF_RESKEY_interface:$OCF_RESKEY_iflabel" -+ fi -+ -+ cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface $extra_opts" - ocf_log debug "executing command: $cmd" - $cmd - rc=$? diff --git a/SOURCES/bz2103370-ocf-tester-1-update.patch b/SOURCES/bz2103370-ocf-tester-1-update.patch deleted file mode 100644 index 0e32ed2..0000000 --- a/SOURCES/bz2103370-ocf-tester-1-update.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 46e8d346ca4803245f51a157591c4df1126d3b49 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 12 Jul 2022 12:45:52 +0200 -Subject: [PATCH] ocf-tester: use promotable terms - ---- - tools/ocf-tester.in | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in -index 10822a5a06..f1703ba1b7 100755 ---- a/tools/ocf-tester.in -+++ b/tools/ocf-tester.in -@@ -295,10 +295,10 @@ if [ $rc -eq 3 ]; then - - elif [ $rc -eq 8 ]; then - test_command demote "Cleanup, demote" -- assert $? 0 "Your agent was a master and could not be demoted" 1 -+ assert $? 0 "Your agent was promoted and could not be demoted" 1 - - test_command stop "Cleanup, stop" -- assert $? 0 "Your agent was a master and could not be stopped" 1 -+ assert $? 0 "Your agent was promoted and could not be stopped" 1 - - elif [ $rc -ne 7 ]; then - test_command stop -@@ -370,10 +370,10 @@ if [ $has_promote -eq 1 -a $has_demote -eq 1 ]; then - assert $? 0 "Demote failed" 1 - - elif [ $has_promote -eq 0 -a $has_demote -eq 0 ]; then -- info "* Your agent does not support master/slave (optional)" -+ info "* Your agent does not support promotable clones (optional)" - - else -- echo "* Your agent partially supports master/slave" -+ echo "* Your agent partially supports promotable clones" - num_errors=`expr $num_errors + 1` - fi - diff --git a/SOURCES/bz2103370-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch b/SOURCES/bz2103370-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch deleted file mode 100644 index a932397..0000000 --- a/SOURCES/bz2103370-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch +++ /dev/null @@ -1,166 +0,0 @@ -From 687aa646852d5fd5d4e811b2ec562ebffa15e23d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 14 Jul 2022 14:52:07 +0200 -Subject: [PATCH] ocf-tester: remove deprecated lrmd/lrmadmin code that hasnt - worked since pre-pacemaker days - ---- - tools/ocf-tester.8 | 12 ++----- - tools/ocf-tester.in | 81 --------------------------------------------- - 2 files changed, 2 insertions(+), 91 deletions(-) - -diff --git a/tools/ocf-tester.8 b/tools/ocf-tester.8 -index 850ec0be04..3f398282d2 100644 ---- a/tools/ocf-tester.8 -+++ b/tools/ocf-tester.8 -@@ -1,9 +1,9 @@ --.TH OCF-TESTER "8" "January 2012" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities" -+.TH OCF-TESTER "8" "July 2022" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities" - .SH NAME - ocf-tester \- Part of the Linux-HA project - .SH SYNOPSIS - .B ocf-tester --[\fI-LhvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR -+[\fI-hvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR - .SH DESCRIPTION - Tool for testing if a cluster resource is OCF compliant - .SH OPTIONS -@@ -26,11 +26,6 @@ Name of the resource - \fB\-o\fR name=value - Name and value of any parameters required by the agent - .TP --\fB\-L\fR --Use lrmadmin/lrmd for tests --.PP --Usage: ocf\-tester [\-Lh] \fB\-n\fR resource_name [\-o name=value]* /full/path/to/resource/agent --.TP - \fB\-h\fR - This text - .TP -@@ -51,6 +46,3 @@ Name of the resource - .TP - \fB\-o\fR name=value - Name and value of any parameters required by the agent --.TP --\fB\-L\fR --Use lrmadmin/lrmd for tests -diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in -index 10822a5a06..15b14e51ea 100755 ---- a/tools/ocf-tester.in -+++ b/tools/ocf-tester.in -@@ -25,8 +25,6 @@ - # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - # - --LRMD=@libdir@/heartbeat/lrmd --LRMADMIN=@sbindir@/lrmadmin - DATADIR=@datadir@ - METADATA_LINT="xmllint --noout --valid -" - -@@ -61,7 +59,6 @@ usage() { - echo " -X Turn on RA tracing (expect large output)" - echo " -n name Name of the resource" - echo " -o name=value Name and value of any parameters required by the agent" -- echo " -L Use lrmadmin/lrmd for tests" - exit $1 - } - -@@ -104,7 +101,6 @@ while test "$done" = "0"; do - -o) name=${2%%=*}; value=${2#*=}; - lrm_ra_args="$lrm_ra_args $2"; - ra_args="$ra_args OCF_RESKEY_$name='$value'"; shift; shift;; -- -L) use_lrmd=1; shift;; - -v) verbose=1; shift;; - -d) export HA_debug=1; shift;; - -X) export OCF_TRACE_RA=1; verbose=1; shift;; -@@ -140,79 +136,6 @@ stopped_rc=7 - has_demote=1 - has_promote=1 - --start_lrmd() { -- lrmd_timeout=0 -- lrmd_interval=0 -- lrmd_target_rc=EVERYTIME -- lrmd_started="" -- $LRMD -s 2>/dev/null -- rc=$? -- if [ $rc -eq 3 ]; then -- lrmd_started=1 -- $LRMD & -- sleep 1 -- $LRMD -s 2>/dev/null -- else -- return $rc -- fi --} --add_resource() { -- $LRMADMIN -A $OCF_RESOURCE_INSTANCE \ -- ocf \ -- `basename $agent` \ -- $(basename `dirname $agent`) \ -- $lrm_ra_args > /dev/null --} --del_resource() { -- $LRMADMIN -D $OCF_RESOURCE_INSTANCE --} --parse_lrmadmin_output() { -- awk ' --BEGIN{ rc=1; } --/Waiting for lrmd to callback.../ { n=1; next; } --n==1 && /----------------operation--------------/ { n++; next; } --n==2 && /return code:/ { rc=$0; sub("return code: *","",rc); next } --n==2 && /---------------------------------------/ { -- n++; -- next; --} --END{ -- if( n!=3 ) exit 1; -- else exit rc; --} --' --} --exec_resource() { -- op="$1" -- args="$2" -- $LRMADMIN -E $OCF_RESOURCE_INSTANCE \ -- $op $lrmd_timeout $lrmd_interval \ -- $lrmd_target_rc \ -- $args | parse_lrmadmin_output --} -- --if [ "$use_lrmd" = 1 ]; then -- echo "Using lrmd/lrmadmin for all tests" -- start_lrmd || { -- echo "could not start lrmd" >&2 -- exit 1 -- } -- trap ' -- [ "$lrmd_started" = 1 ] && $LRMD -k -- ' EXIT -- add_resource || { -- echo "failed to add resource to lrmd" >&2 -- exit 1 -- } --fi -- --lrm_test_command() { -- action="$1" -- msg="$2" -- debug "$msg" -- exec_resource $action "$lrm_ra_args" --} -- - test_permissions() { - action=meta-data - debug ${1:-"Testing permissions with uid nobody"} -@@ -233,10 +156,6 @@ test_command() { - action=$1; shift - export __OCF_ACTION=$action - msg=${1:-"Testing: $action"} -- if [ "$use_lrmd" = 1 ]; then -- lrm_test_command $action "$msg" -- return $? -- fi - #echo Running: "export $ra_args; $agent $action 2>&1 > /dev/null" - if [ $verbose -eq 0 ]; then - command_output=`$agent $action 2>&1` diff --git a/SOURCES/bz2109159-storage_mon-1-exit-after-help.patch b/SOURCES/bz2109159-storage_mon-1-exit-after-help.patch deleted file mode 100644 index a8fa868..0000000 --- a/SOURCES/bz2109159-storage_mon-1-exit-after-help.patch +++ /dev/null @@ -1,79 +0,0 @@ -From b3eadb8523b599af800a7c772606aa0e90cf142f Mon Sep 17 00:00:00 2001 -From: Fujii Masao -Date: Tue, 19 Jul 2022 17:03:02 +0900 -Subject: [PATCH 1/2] Make storage_mon -h exit just after printing help - messages. - -Previously, when -h or an invalid option was specified, storage_mon -printed the help messages, proceeded processing and then could -throw an error. This was not the behavior that, e.g., users who want -to specify -h option to see the help messages are expecting. To fix -this issue, this commit changes storage_mon so that it exits just -after printing the help messages when -h or an invalid option is -specified. ---- - tools/storage_mon.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 7b65bb419..1303371f7 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -28,7 +28,7 @@ static void usage(char *name, FILE *f) - fprintf(f, " --timeout max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT); - fprintf(f, " --inject-errors-percent Generate EIO errors %% of the time (for testing only)\n"); - fprintf(f, " --verbose emit extra output to stdout\n"); -- fprintf(f, " --help print this messages\n"); -+ fprintf(f, " --help print this messages, then exit\n"); - } - - /* Check one device */ -@@ -178,9 +178,11 @@ int main(int argc, char *argv[]) - break; - case 'h': - usage(argv[0], stdout); -+ exit(0); - break; - default: - usage(argv[0], stderr); -+ exit(-1); - break; - } - - -From e62795f02d25a772a239e0a4f9eb9d6470c134ee Mon Sep 17 00:00:00 2001 -From: Fujii Masao -Date: Tue, 19 Jul 2022 17:56:32 +0900 -Subject: [PATCH 2/2] Fix typo in help message. - ---- - tools/storage_mon.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 1303371f7..3c82d5ee8 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -28,7 +28,7 @@ static void usage(char *name, FILE *f) - fprintf(f, " --timeout max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT); - fprintf(f, " --inject-errors-percent Generate EIO errors %% of the time (for testing only)\n"); - fprintf(f, " --verbose emit extra output to stdout\n"); -- fprintf(f, " --help print this messages, then exit\n"); -+ fprintf(f, " --help print this message\n"); - } - - /* Check one device */ -@@ -178,11 +178,11 @@ int main(int argc, char *argv[]) - break; - case 'h': - usage(argv[0], stdout); -- exit(0); -+ return 0; - break; - default: - usage(argv[0], stderr); -- exit(-1); -+ return -1; - break; - } - diff --git a/SOURCES/bz2109159-storage_mon-2-fix-specified-scores-count.patch b/SOURCES/bz2109159-storage_mon-2-fix-specified-scores-count.patch deleted file mode 100644 index 8bbe33e..0000000 --- a/SOURCES/bz2109159-storage_mon-2-fix-specified-scores-count.patch +++ /dev/null @@ -1,36 +0,0 @@ -From a68957e8f1e8169438acf5a4321f47ed7d8ceec1 Mon Sep 17 00:00:00 2001 -From: Fujii Masao -Date: Tue, 19 Jul 2022 20:28:38 +0900 -Subject: [PATCH] storage_mon: Fix bug in checking of number of specified - scores. - -Previously specifying the maximum allowed number (MAX_DEVICES, currently 25) -of devices and scores as arguments could cause storage_mon to fail unexpectedly -with the error message "too many scores, max is 25". This issue happened -because storage_mon checked whether the number of specified scores -exceeded the upper limit by using the local variable "device_count" indicating -the number of specified devices (not scores). So after the maximum number -of devices arguments were interpreted, the appearance of next score argument -caused the error even when the number of interpreted scores arguments had -not exceeded the maximum. - -This patch fixes storage_mon so that it uses the local variable "score_count" -indicating the number of specified scores, to check whether arguments for -scores are specified more than the upper limit. ---- - tools/storage_mon.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 3c82d5ee8..c749076c2 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -154,7 +154,7 @@ int main(int argc, char *argv[]) - } - break; - case 's': -- if (device_count < MAX_DEVICES) { -+ if (score_count < MAX_DEVICES) { - int score = atoi(optarg); - if (score < 1 || score > 10) { - fprintf(stderr, "Score must be between 1 and 10 inclusive\n"); diff --git a/SOURCES/bz2109159-storage_mon-3-fix-child-process-exit.patch b/SOURCES/bz2109159-storage_mon-3-fix-child-process-exit.patch deleted file mode 100644 index d02d584..0000000 --- a/SOURCES/bz2109159-storage_mon-3-fix-child-process-exit.patch +++ /dev/null @@ -1,43 +0,0 @@ -From c6ea93fcb499c84c3d8e9aad2ced65065a3f6d51 Mon Sep 17 00:00:00 2001 -From: Fujii Masao -Date: Tue, 19 Jul 2022 22:34:08 +0900 -Subject: [PATCH] Fix bug in handling of child process exit. - -When storage_mon detects that a child process exits with zero, -it resets the test_forks[] entry for the child process to 0, to avoid -waitpid() for the process again in the loop. But, previously, -storage_mon didn't do that when it detected that a child process -exited with non-zero. Which caused waitpid() to be called again -for the process already gone and to report an error like -"waitpid on XXX failed: No child processes" unexpectedly. -In this case, basically storage_mon should wait until all the child -processes exit and return the final score, instead. - -This patch fixes this issue by making storage_mon reset test_works[] -entry even when a child process exits with non-zero. ---- - tools/storage_mon.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 3c82d5ee8..83a48ca36 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -232,13 +232,13 @@ int main(int argc, char *argv[]) - - if (w == test_forks[i]) { - if (WIFEXITED(wstatus)) { -- if (WEXITSTATUS(wstatus) == 0) { -- finished_count++; -- test_forks[i] = 0; -- } else { -+ if (WEXITSTATUS(wstatus) != 0) { - syslog(LOG_ERR, "Error reading from device %s", devices[i]); - final_score += scores[i]; - } -+ -+ finished_count++; -+ test_forks[i] = 0; - } - } - } diff --git a/SOURCES/bz2109159-storage_mon-4-fix-possible-false-negatives.patch b/SOURCES/bz2109159-storage_mon-4-fix-possible-false-negatives.patch deleted file mode 100644 index 8448bc6..0000000 --- a/SOURCES/bz2109159-storage_mon-4-fix-possible-false-negatives.patch +++ /dev/null @@ -1,417 +0,0 @@ -From 0bb52cf9985bda47e13940761b3d8e2eaddf377c Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Wed, 10 Aug 2022 17:35:54 +0900 -Subject: [PATCH 1/4] storage_mon: Use the O_DIRECT flag in open() to eliminate - cache effects - ---- - tools/Makefile.am | 1 + - tools/storage_mon.c | 82 +++++++++++++++++++++++++++++++++------------ - 2 files changed, 61 insertions(+), 22 deletions(-) - -diff --git a/tools/Makefile.am b/tools/Makefile.am -index 1309223b4..08323fee3 100644 ---- a/tools/Makefile.am -+++ b/tools/Makefile.am -@@ -74,6 +74,7 @@ sfex_stat_LDADD = $(GLIBLIB) -lplumb -lplumbgpl - findif_SOURCES = findif.c - - storage_mon_SOURCES = storage_mon.c -+storage_mon_CFLAGS = -D_GNU_SOURCE - - if BUILD_TICKLE - halib_PROGRAMS += tickle_tcp -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 930ead41c..ba87492fc 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -31,23 +31,27 @@ static void usage(char *name, FILE *f) - fprintf(f, " --help print this message\n"); - } - --/* Check one device */ --static void *test_device(const char *device, int verbose, int inject_error_percent) -+static int open_device(const char *device, int verbose) - { -- uint64_t devsize; - int device_fd; - int res; -+ uint64_t devsize; - off_t seek_spot; -- char buffer[512]; - -- if (verbose) { -- printf("Testing device %s\n", device); -+#if defined(__linux__) || defined(__FreeBSD__) -+ device_fd = open(device, O_RDONLY|O_DIRECT); -+ if (device_fd >= 0) { -+ return device_fd; -+ } else if (errno != EINVAL) { -+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -+ return -1; - } -+#endif - - device_fd = open(device, O_RDONLY); - if (device_fd < 0) { - fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -- exit(-1); -+ return -1; - } - #ifdef __FreeBSD__ - res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize); -@@ -57,11 +61,12 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - if (res != 0) { - fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); - close(device_fd); -- exit(-1); -+ return -1; - } - if (verbose) { - fprintf(stderr, "%s: size=%zu\n", device, devsize); - } -+ - /* Don't fret about real randomness */ - srand(time(NULL) + getpid()); - /* Pick a random place on the device - sector aligned */ -@@ -70,35 +75,64 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - if (res < 0) { - fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno)); - close(device_fd); -- exit(-1); -+ return -1; - } -- - if (verbose) { - printf("%s: reading from pos %ld\n", device, seek_spot); - } -+ return device_fd; -+} -+ -+/* Check one device */ -+static void *test_device(const char *device, int verbose, int inject_error_percent) -+{ -+ int device_fd; -+ int sec_size = 0; -+ int res; -+ void *buffer; -+ -+ if (verbose) { -+ printf("Testing device %s\n", device); -+ } -+ -+ device_fd = open_device(device, verbose); -+ if (device_fd < 0) { -+ exit(-1); -+ } -+ -+ ioctl(device_fd, BLKSSZGET, &sec_size); -+ if (sec_size == 0) { -+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -+ goto error; -+ } - -- res = read(device_fd, buffer, sizeof(buffer)); -+ if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) { -+ fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno)); -+ goto error; -+ } -+ -+ res = read(device_fd, buffer, sec_size); -+ free(buffer); - if (res < 0) { - fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno)); -- close(device_fd); -- exit(-1); -+ goto error; - } -- if (res < (int)sizeof(buffer)) { -- fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res); -- close(device_fd); -- exit(-1); -+ if (res < sec_size) { -+ fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res); -+ goto error; - } - - /* Fake an error */ -- if (inject_error_percent && ((rand() % 100) < inject_error_percent)) { -- fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n"); -- close(device_fd); -- exit(-1); -+ if (inject_error_percent) { -+ srand(time(NULL) + getpid()); -+ if ((rand() % 100) < inject_error_percent) { -+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n"); -+ goto error; -+ } - } - res = close(device_fd); - if (res != 0) { - fprintf(stderr, "Failed to close %s: %s\n", device, strerror(errno)); -- close(device_fd); - exit(-1); - } - -@@ -106,6 +140,10 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - printf("%s: done\n", device); - } - exit(0); -+ -+error: -+ close(device_fd); -+ exit(-1); - } - - int main(int argc, char *argv[]) - -From ce4e632f29ed6b86b82a959eac5844655baed153 Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Mon, 15 Aug 2022 19:17:21 +0900 -Subject: [PATCH 2/4] storage_mon: fix build-related issues - ---- - tools/storage_mon.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index ba87492fc..e34d1975a 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -38,7 +38,6 @@ static int open_device(const char *device, int verbose) - uint64_t devsize; - off_t seek_spot; - --#if defined(__linux__) || defined(__FreeBSD__) - device_fd = open(device, O_RDONLY|O_DIRECT); - if (device_fd >= 0) { - return device_fd; -@@ -46,7 +45,6 @@ static int open_device(const char *device, int verbose) - fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); - return -1; - } --#endif - - device_fd = open(device, O_RDONLY); - if (device_fd < 0) { -@@ -100,7 +98,11 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - exit(-1); - } - -+#ifdef __FreeBSD__ -+ ioctl(device_fd, DIOCGSECTORSIZE, &sec_size); -+#else - ioctl(device_fd, BLKSSZGET, &sec_size); -+#endif - if (sec_size == 0) { - fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); - goto error; - -From 7a0aaa0dfdebeab3fae9fe9ddc412c3d1f610273 Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Wed, 24 Aug 2022 17:36:23 +0900 -Subject: [PATCH 3/4] storage_mon: do random lseek even with O_DIRECT, etc - ---- - tools/storage_mon.c | 118 ++++++++++++++++++++++---------------------- - 1 file changed, 58 insertions(+), 60 deletions(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index e34d1975a..0bdb48649 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -31,38 +31,43 @@ static void usage(char *name, FILE *f) - fprintf(f, " --help print this message\n"); - } - --static int open_device(const char *device, int verbose) -+/* Check one device */ -+static void *test_device(const char *device, int verbose, int inject_error_percent) - { -+ uint64_t devsize; -+ int flags = O_RDONLY | O_DIRECT; - int device_fd; - int res; -- uint64_t devsize; - off_t seek_spot; - -- device_fd = open(device, O_RDONLY|O_DIRECT); -- if (device_fd >= 0) { -- return device_fd; -- } else if (errno != EINVAL) { -- fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -- return -1; -+ if (verbose) { -+ printf("Testing device %s\n", device); - } - -- device_fd = open(device, O_RDONLY); -+ device_fd = open(device, flags); - if (device_fd < 0) { -- fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -- return -1; -+ if (errno != EINVAL) { -+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -+ exit(-1); -+ } -+ flags &= ~O_DIRECT; -+ device_fd = open(device, flags); -+ if (device_fd < 0) { -+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno)); -+ exit(-1); -+ } - } - #ifdef __FreeBSD__ - res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize); - #else - res = ioctl(device_fd, BLKGETSIZE64, &devsize); - #endif -- if (res != 0) { -+ if (res < 0) { - fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -- close(device_fd); -- return -1; -+ goto error; - } - if (verbose) { -- fprintf(stderr, "%s: size=%zu\n", device, devsize); -+ printf("%s: opened %s O_DIRECT, size=%zu\n", device, (flags & O_DIRECT)?"with":"without", devsize); - } - - /* Don't fret about real randomness */ -@@ -72,65 +77,58 @@ static int open_device(const char *device, int verbose) - res = lseek(device_fd, seek_spot, SEEK_SET); - if (res < 0) { - fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno)); -- close(device_fd); -- return -1; -+ goto error; - } - if (verbose) { - printf("%s: reading from pos %ld\n", device, seek_spot); - } -- return device_fd; --} -- --/* Check one device */ --static void *test_device(const char *device, int verbose, int inject_error_percent) --{ -- int device_fd; -- int sec_size = 0; -- int res; -- void *buffer; -- -- if (verbose) { -- printf("Testing device %s\n", device); -- } - -- device_fd = open_device(device, verbose); -- if (device_fd < 0) { -- exit(-1); -- } -+ if (flags & O_DIRECT) { -+ int sec_size = 0; -+ void *buffer; - - #ifdef __FreeBSD__ -- ioctl(device_fd, DIOCGSECTORSIZE, &sec_size); -+ res = ioctl(device_fd, DIOCGSECTORSIZE, &sec_size); - #else -- ioctl(device_fd, BLKSSZGET, &sec_size); -+ res = ioctl(device_fd, BLKSSZGET, &sec_size); - #endif -- if (sec_size == 0) { -- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -- goto error; -- } -+ if (res < 0) { -+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -+ goto error; -+ } - -- if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) { -- fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno)); -- goto error; -- } -+ if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) { -+ fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno)); -+ goto error; -+ } -+ res = read(device_fd, buffer, sec_size); -+ free(buffer); -+ if (res < 0) { -+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno)); -+ goto error; -+ } -+ if (res < sec_size) { -+ fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res); -+ goto error; -+ } -+ } else { -+ char buffer[512]; - -- res = read(device_fd, buffer, sec_size); -- free(buffer); -- if (res < 0) { -- fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno)); -- goto error; -- } -- if (res < sec_size) { -- fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res); -- goto error; -+ res = read(device_fd, buffer, sizeof(buffer)); -+ if (res < 0) { -+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno)); -+ goto error; -+ } -+ if (res < (int)sizeof(buffer)) { -+ fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res); -+ goto error; -+ } - } - - /* Fake an error */ -- if (inject_error_percent) { -- srand(time(NULL) + getpid()); -- if ((rand() % 100) < inject_error_percent) { -- fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n"); -- goto error; -- } -+ if (inject_error_percent && ((rand() % 100) < inject_error_percent)) { -+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n"); -+ goto error; - } - res = close(device_fd); - if (res != 0) { - -From db97e055a17526cec056c595844a9d8851e3ee19 Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Thu, 25 Aug 2022 16:03:46 +0900 -Subject: [PATCH 4/4] storage_mon: improve error messages when ioctl() fails - ---- - tools/storage_mon.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/tools/storage_mon.c b/tools/storage_mon.c -index 0bdb48649..f829c5081 100644 ---- a/tools/storage_mon.c -+++ b/tools/storage_mon.c -@@ -63,7 +63,7 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - res = ioctl(device_fd, BLKGETSIZE64, &devsize); - #endif - if (res < 0) { -- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -+ fprintf(stderr, "Failed to get device size for %s: %s\n", device, strerror(errno)); - goto error; - } - if (verbose) { -@@ -93,7 +93,7 @@ static void *test_device(const char *device, int verbose, int inject_error_perce - res = ioctl(device_fd, BLKSSZGET, &sec_size); - #endif - if (res < 0) { -- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno)); -+ fprintf(stderr, "Failed to get block device sector size for %s: %s\n", device, strerror(errno)); - goto error; - } - diff --git a/SOURCES/bz2116941-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch b/SOURCES/bz2116941-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch deleted file mode 100644 index f4c46f8..0000000 --- a/SOURCES/bz2116941-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 0063164d72bbaca68f12a2f0a7dbae9ccb41fa39 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 26 Jul 2022 09:08:26 +0200 -Subject: [PATCH] ethmonitor/ovsmonitor/pgsql: remove ignored attrd_updater - "-q" parameter - -attrd_updater in 2.1.3 no longer ignores the -q parameter, which makes -these agents break. It never did anything in attrd_updater, and is -probably left-over from copy/paste crm_attribute code that got changed -to attrd_updater. ---- - heartbeat/ethmonitor | 2 +- - heartbeat/ovsmonitor | 2 +- - heartbeat/pgsql | 8 ++++---- - 3 files changed, 6 insertions(+), 6 deletions(-) - -diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor -index ba8574131..451738a0b 100755 ---- a/heartbeat/ethmonitor -+++ b/heartbeat/ethmonitor -@@ -464,7 +464,7 @@ END - - set_cib_value() { - local score=`expr $1 \* $OCF_RESKEY_multiplier` -- attrd_updater -n $ATTRNAME -v $score -q -+ attrd_updater -n $ATTRNAME -v $score - local rc=$? - case $rc in - 0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;; -diff --git a/heartbeat/ovsmonitor b/heartbeat/ovsmonitor -index 872ce86eb..6765da4b9 100755 ---- a/heartbeat/ovsmonitor -+++ b/heartbeat/ovsmonitor -@@ -355,7 +355,7 @@ END - - set_cib_value() { - local score=`expr $1 \* $OCF_RESKEY_multiplier` -- attrd_updater -n $ATTRNAME -v $score -q -+ attrd_updater -n $ATTRNAME -v $score - local rc=$? - case $rc in - 0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;; -diff --git a/heartbeat/pgsql b/heartbeat/pgsql -index 94aceb324..e93d66855 100755 ---- a/heartbeat/pgsql -+++ b/heartbeat/pgsql -@@ -808,7 +808,7 @@ pgsql_real_stop() { - local stop_escalate - - if ocf_is_true ${OCF_RESKEY_check_wal_receiver}; then -- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D -q -+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D - fi - - if ! pgsql_status -@@ -937,16 +937,16 @@ pgsql_wal_receiver_status() { - receiver_parent_pids=`ps -ef | tr -s " " | grep "[w]al\s*receiver" | cut -d " " -f 3` - - if echo "$receiver_parent_pids" | grep -q -w "$PID" ; then -- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" -q -+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" - return 0 - fi - - if [ $pgsql_real_monitor_status -eq "$OCF_RUNNING_MASTER" ]; then -- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" -q -+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" - return 0 - fi - -- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" -q -+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" - ocf_log warn "wal receiver process is not running" - return 1 - } diff --git a/SOURCES/bz2127117-nfsserver-nfsv4_only-parameter.patch b/SOURCES/bz2127117-nfsserver-nfsv4_only-parameter.patch deleted file mode 100644 index 9bcbb41..0000000 --- a/SOURCES/bz2127117-nfsserver-nfsv4_only-parameter.patch +++ /dev/null @@ -1,298 +0,0 @@ -From 764757380af19d3a21d40f3c9624e4135ff074e1 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 2 Nov 2022 10:26:31 +0100 -Subject: [PATCH] nfsserver: add nfsv4_only parameter to make it run without - rpc-statd/rpcbind services - ---- - heartbeat/nfsserver | 200 +++++++++++++++++++++++++------------------- - 1 file changed, 114 insertions(+), 86 deletions(-) - -diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver -index 9bbd603e5..cb2d43ab1 100755 ---- a/heartbeat/nfsserver -+++ b/heartbeat/nfsserver -@@ -79,6 +79,16 @@ Init script for nfsserver - - - -+ -+ -+Run in NFSv4 only mode (rpc-statd and rpcbind services masked). -+ -+ -+NFSv4 only mode. -+ -+ -+ -+ - - - Do not send reboot notifications to NFSv3 clients during server startup. -@@ -332,7 +342,7 @@ v3locking_exec() - if [ $EXEC_MODE -eq 2 ]; then - nfs_exec $cmd nfs-lock.service - elif [ $EXEC_MODE -eq 3 ]; then -- nfs_exec $cmd rpc-statd.service -+ nfs_exec $cmd rpc-statd.service - else - case $cmd in - start) locking_start;; -@@ -348,20 +358,22 @@ nfsserver_systemd_monitor() - local rc - local fn - -- ocf_log debug "Status: rpcbind" -- rpcinfo > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -ne "0" ]; then -- ocf_exit_reason "rpcbind is not running" -- return $OCF_NOT_RUNNING -- fi -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ ocf_log debug "Status: rpcbind" -+ rpcinfo > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -ne "0" ]; then -+ ocf_exit_reason "rpcbind is not running" -+ return $OCF_NOT_RUNNING -+ fi - -- ocf_log debug "Status: nfs-mountd" -- ps axww | grep -q "[r]pc.mountd" -- rc=$? -- if [ "$rc" -ne "0" ]; then -- ocf_exit_reason "nfs-mountd is not running" -- return $OCF_NOT_RUNNING -+ ocf_log debug "Status: nfs-mountd" -+ ps axww | grep -q "[r]pc.mountd" -+ rc=$? -+ if [ "$rc" -ne "0" ]; then -+ ocf_exit_reason "nfs-mountd is not running" -+ return $OCF_NOT_RUNNING -+ fi - fi - - ocf_log debug "Status: nfs-idmapd" -@@ -375,12 +387,14 @@ nfsserver_systemd_monitor() - return $OCF_NOT_RUNNING - fi - -- ocf_log debug "Status: rpc-statd" -- rpcinfo -t localhost 100024 > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -ne "0" ]; then -- ocf_exit_reason "rpc-statd is not running" -- return $OCF_NOT_RUNNING -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ ocf_log debug "Status: rpc-statd" -+ rpcinfo -t localhost 100024 > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -ne "0" ]; then -+ ocf_exit_reason "rpc-statd is not running" -+ return $OCF_NOT_RUNNING -+ fi - fi - - nfs_exec is-active nfs-server -@@ -424,7 +438,7 @@ nfsserver_monitor () - if [ $rc -eq 0 ]; then - # don't report success if nfs servers are up - # without locking daemons. -- v3locking_exec "status" -+ ocf_is_true "$OCF_RESKEY_nfsv4_only" || v3locking_exec "status" - rc=$? - if [ $rc -ne 0 ]; then - ocf_exit_reason "NFS server is up, but the locking daemons are down" -@@ -786,48 +800,54 @@ nfsserver_start () - - # systemd - case $EXEC_MODE in -- [23]) nfs_exec start rpcbind -- local i=1 -- while : ; do -- ocf_log info "Start: rpcbind i: $i" -- rpcinfo > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -eq "0" ]; then -- break; -- fi -- sleep 1 -- i=$((i + 1)) -- done -+ [23]) if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ nfs_exec start rpcbind -+ local i=1 -+ while : ; do -+ ocf_log info "Start: rpcbind i: $i" -+ rpcinfo > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ break -+ fi -+ sleep 1 -+ i=$((i + 1)) -+ done -+ fi - ;; - esac - -- # check to see if we need to start rpc.statd -- v3locking_exec "status" -- if [ $? -ne $OCF_SUCCESS ]; then -- v3locking_exec "start" -- rc=$? -- if [ $rc -ne 0 ]; then -- ocf_exit_reason "Failed to start NFS server locking daemons" -- return $rc -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ # check to see if we need to start rpc.statd -+ v3locking_exec "status" -+ if [ $? -ne $OCF_SUCCESS ]; then -+ v3locking_exec "start" -+ rc=$? -+ if [ $rc -ne 0 ]; then -+ ocf_exit_reason "Failed to start NFS server locking daemons" -+ return $rc -+ fi -+ else -+ ocf_log info "rpc.statd already up" - fi -- else -- ocf_log info "rpc.statd already up" - fi - - # systemd - case $EXEC_MODE in -- [23]) nfs_exec start nfs-mountd -- local i=1 -- while : ; do -- ocf_log info "Start: nfs-mountd i: $i" -- ps axww | grep -q "[r]pc.mountd" -- rc=$? -- if [ "$rc" -eq "0" ]; then -- break; -- fi -- sleep 1 -- i=$((i + 1)) -- done -+ [23]) if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ nfs_exec start nfs-mountd -+ local i=1 -+ while : ; do -+ ocf_log info "Start: nfs-mountd i: $i" -+ ps axww | grep -q "[r]pc.mountd" -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ break -+ fi -+ sleep 1 -+ i=$((i + 1)) -+ done -+ fi - - nfs_exec start nfs-idmapd - local i=1 -@@ -839,24 +859,26 @@ nfsserver_start () - ocf_log debug "$(cat $fn)" - rm -f $fn - if [ "$rc" -eq "0" ]; then -- break; -+ break - fi - sleep 1 - i=$((i + 1)) - done - -- nfs_exec start rpc-statd -- local i=1 -- while : ; do -- ocf_log info "Start: rpc-statd i: $i" -- rpcinfo -t localhost 100024 > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -eq "0" ]; then -- break; -- fi -- sleep 1 -- i=$((i + 1)) -- done -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ nfs_exec start rpc-statd -+ local i=1 -+ while : ; do -+ ocf_log info "Start: rpc-statd i: $i" -+ rpcinfo -t localhost 100024 > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ break -+ fi -+ sleep 1 -+ i=$((i + 1)) -+ done -+ fi - esac - - -@@ -914,13 +936,15 @@ nfsserver_stop () - sleep 1 - done - -- nfs_exec stop rpc-statd > /dev/null 2>&1 -- ocf_log info "Stop: rpc-statd" -- rpcinfo -t localhost 100024 > /dev/null 2>&1 -- rc=$? -- if [ "$rc" -eq "0" ]; then -- ocf_exit_reason "Failed to stop rpc-statd" -- return $OCF_ERR_GENERIC -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ nfs_exec stop rpc-statd > /dev/null 2>&1 -+ ocf_log info "Stop: rpc-statd" -+ rpcinfo -t localhost 100024 > /dev/null 2>&1 -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ ocf_exit_reason "Failed to stop rpc-statd" -+ return $OCF_ERR_GENERIC -+ fi - fi - - nfs_exec stop nfs-idmapd > /dev/null 2>&1 -@@ -935,13 +959,15 @@ nfsserver_stop () - return $OCF_ERR_GENERIC - fi - -- nfs_exec stop nfs-mountd > /dev/null 2>&1 -- ocf_log info "Stop: nfs-mountd" -- ps axww | grep -q "[r]pc.mountd" -- rc=$? -- if [ "$rc" -eq "0" ]; then -- ocf_exit_reason "Failed to stop nfs-mountd" -- return $OCF_ERR_GENERIC -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ nfs_exec stop nfs-mountd > /dev/null 2>&1 -+ ocf_log info "Stop: nfs-mountd" -+ ps axww | grep -q "[r]pc.mountd" -+ rc=$? -+ if [ "$rc" -eq "0" ]; then -+ ocf_exit_reason "Failed to stop nfs-mountd" -+ return $OCF_ERR_GENERIC -+ fi - fi - - if systemctl --no-legend list-unit-files "nfsdcld*" | grep -q nfsdcld; then -@@ -960,10 +986,12 @@ nfsserver_stop () - esac - - -- v3locking_exec "stop" -- if [ $? -ne 0 ]; then -- ocf_exit_reason "Failed to stop NFS locking daemons" -- rc=$OCF_ERR_GENERIC -+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then -+ v3locking_exec "stop" -+ if [ $? -ne 0 ]; then -+ ocf_exit_reason "Failed to stop NFS locking daemons" -+ rc=$OCF_ERR_GENERIC -+ fi - fi - - # systemd diff --git a/SOURCES/bz2133682-IPsrcaddr-proto-metric-scope-default-route-fixes.patch b/SOURCES/bz2133682-IPsrcaddr-proto-metric-scope-default-route-fixes.patch deleted file mode 100644 index 8722395..0000000 --- a/SOURCES/bz2133682-IPsrcaddr-proto-metric-scope-default-route-fixes.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 237d55120a7c8d761f839c96651e722b3bb3bc88 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 12 Oct 2022 13:57:30 +0200 -Subject: [PATCH 1/4] IPsrcaddr: fix PROTO regex - ---- - heartbeat/IPsrcaddr | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 7dbf65ff5..24406d296 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -188,7 +188,7 @@ IPADDR="\($OCTET\.\)\{3\}$OCTET" - SRCCLAUSE="src$WS$WS*\($IPADDR\)" - MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)" - METRICCLAUSE=".*\(metric$WS[^ ]\+\)" --PROTOCLAUSE=".*\(proto$WS[^ ]\+\)" -+PROTOCLAUSE=".*\(proto$WS[^ ]\+\).*" - FINDIF=findif - - # findif needs that to be set - -From c70ba457851a401cb201cb87d23bdbc5f4fcd2b3 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 12 Oct 2022 14:00:30 +0200 -Subject: [PATCH 2/4] IPsrcaddr: detect metric for main table only, and allow - specifying metric if necessary - ---- - heartbeat/IPsrcaddr | 18 +++++++++++++++++- - 1 file changed, 17 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 24406d296..4745eb8a7 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -59,12 +59,14 @@ OCF_RESKEY_ipaddress_default="" - OCF_RESKEY_cidr_netmask_default="" - OCF_RESKEY_destination_default="0.0.0.0/0" - OCF_RESKEY_proto_default="" -+OCF_RESKEY_metric_default="" - OCF_RESKEY_table_default="" - - : ${OCF_RESKEY_ipaddress=${OCF_RESKEY_ipaddress_default}} - : ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}} - : ${OCF_RESKEY_destination=${OCF_RESKEY_destination_default}} - : ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}} -+: ${OCF_RESKEY_metric=${OCF_RESKEY_metric_default}} - : ${OCF_RESKEY_table=${OCF_RESKEY_table_default}} - ####################################################################### - -@@ -143,6 +145,14 @@ Proto to match when finding network. E.g. "kernel". - - - -+ -+ -+Metric. Only needed if incorrect metric value is used. -+ -+Metric -+ -+ -+ - - - Table to modify. E.g. "local". -@@ -548,8 +558,14 @@ rc=$? - - INTERFACE=`echo $findif_out | awk '{print $1}'` - LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress` --METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"` - [ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"` -+if [ -n "$OCF_RESKEY_metric" ]; then -+ METRIC="metric $OCF_RESKEY_metric" -+elif [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ]; then -+ METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"` -+else -+ METRIC="" -+fi - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then - NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'` - - -From c514f12f7a19440f475938f2a4659e5e9667fa25 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 12 Oct 2022 14:01:26 +0200 -Subject: [PATCH 3/4] IPsrcaddr: use scope host when using non-main tables - ---- - heartbeat/IPsrcaddr | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 4745eb8a7..926246008 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -279,8 +279,14 @@ srca_stop() { - - [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address" - -+ if [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ]; then -+ SCOPE="link" -+ else -+ SCOPE="host" -+ fi -+ - PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')" -- OPTS="proto kernel scope link src $PRIMARY_IP" -+ OPTS="proto kernel scope $SCOPE src $PRIMARY_IP" - - $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \ - errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed" - -From 1f387ac8017b3eee23b41eadafd58ce21a29eb21 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 13 Oct 2022 13:11:28 +0200 -Subject: [PATCH 4/4] IPsrcaddr: fix monitor/status for default route not being - equal to src IP before start, and change route src correctly in stop-action - ---- - heartbeat/IPsrcaddr | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 926246008..1bd41a930 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -229,6 +229,7 @@ srca_read() { - - [ -z "$SRCIP" ] && return 1 - [ $SRCIP = $1 ] && return 0 -+ [ "$__OCF_ACTION" = "monitor" ] || [ "$__OCF_ACTION" = "status" ] && [ "${ROUTE%% *}" = "default" ] && return 1 - return 2 - } - -@@ -292,8 +293,8 @@ srca_stop() { - errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed" - - if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then -- $CMDCHANGE $ROUTE_WO_SRC || \ -- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed" -+ $CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP || \ -+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP' failed" - fi - - return $? diff --git a/SOURCES/bz2139131-mysql-common-return-error-if-kill-fails.patch b/SOURCES/bz2139131-mysql-common-return-error-if-kill-fails.patch deleted file mode 100644 index e6267f8..0000000 --- a/SOURCES/bz2139131-mysql-common-return-error-if-kill-fails.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 97a05e0e662ed922c9ecd016b39ab90ee233d5c9 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 24 Nov 2022 10:36:56 +0100 -Subject: [PATCH] mysql-common: return error in stop-action if kill fails to - stop the process, so the node can get fenced - ---- - heartbeat/mysql-common.sh | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh -index 34e1c6748..8104019b0 100755 ---- a/heartbeat/mysql-common.sh -+++ b/heartbeat/mysql-common.sh -@@ -318,6 +318,10 @@ mysql_common_stop() - if [ $? != $OCF_NOT_RUNNING ]; then - ocf_log info "MySQL failed to stop after ${shutdown_timeout}s using SIGTERM. Trying SIGKILL..." - /bin/kill -KILL $pid > /dev/null -+ mysql_common_status info $pid -+ if [ $? != $OCF_NOT_RUNNING ]; then -+ return $OCF_ERR_GENERIC -+ fi - fi - - ocf_log info "MySQL stopped"; diff --git a/SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch b/SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch deleted file mode 100644 index 28c28ce..0000000 --- a/SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 739e6ce9096facd6d37dffd524c79c961e3fae38 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 11 Nov 2022 14:17:39 +0100 -Subject: [PATCH] vdo-vol: dont fail probe action when the underlying device - doesnt exist - ---- - heartbeat/vdo-vol | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol -index 94822cb82..29bd7b8fd 100755 ---- a/heartbeat/vdo-vol -+++ b/heartbeat/vdo-vol -@@ -148,6 +148,12 @@ vdo_monitor(){ - MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}') - - case "$status" in -+ *"ERROR - vdodumpconfig: Failed to make FileLayer from"*) -+ if ocf_is_probe; then -+ return $OCF_NOT_RUNNING -+ fi -+ return $OCF_ERR_GENERIC -+ ;; - *"Device mapper status: not available"*) - return $OCF_NOT_RUNNING - ;; diff --git a/SOURCES/bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch b/SOURCES/bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch deleted file mode 100644 index ef5e34e..0000000 --- a/SOURCES/bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 2695888c983df331b0fee407a5c69c493a360313 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 30 Nov 2022 12:07:05 +0100 -Subject: [PATCH] lvmlockd: add "use_lvmlockd = 1" if it's commented out or - missing - ---- - heartbeat/lvmlockd | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd -index dc7bd2d7e..f4b299f28 100755 ---- a/heartbeat/lvmlockd -+++ b/heartbeat/lvmlockd -@@ -180,14 +180,23 @@ setup_lvm_config() - lock_type=$(echo "$out" | cut -d'=' -f2) - - if [ -z "$use_lvmlockd" ]; then -- ocf_exit_reason "\"use_lvmlockd\" not set in /etc/lvm/lvm.conf ..." -- exit $OCF_ERR_CONFIGURED -- fi -+ ocf_log info "adding \"use_lvmlockd=1\" to /etc/lvm/lvm.conf ..." -+ cat >> /etc/lvm/lvm.conf << EOF -+ -+global { -+ use_lvmlockd = 1 -+} -+EOF - -- if [ -n "$use_lvmlockd" ] && [ "$use_lvmlockd" != 1 ] ; then -+ if [ $? -ne 0 ]; then -+ ocf_exit_reason "unable to add \"use_lvmlockd=1\" to /etc/lvm/lvm.conf ..." -+ exit $OCF_ERR_CONFIGURED -+ fi -+ elif [ "$use_lvmlockd" != 1 ] ; then - ocf_log info "setting \"use_lvmlockd=1\" in /etc/lvm/lvm.conf ..." - sed -i 's,^[[:blank:]]*use_lvmlockd[[:blank:]]*=.*,\ \ \ \ use_lvmlockd = 1,g' /etc/lvm/lvm.conf - fi -+ - if [ -n "$lock_type" ] ; then - # locking_type was removed from config in v2.03 - ocf_version_cmp "$(lvmconfig --version | awk '/LVM ver/ {sub(/\(.*/, "", $3); print $3}')" "2.03" diff --git a/SOURCES/bz2154727-ethmonitor-dont-log-iface-doesnt-exist-monitor.patch b/SOURCES/bz2154727-ethmonitor-dont-log-iface-doesnt-exist-monitor.patch deleted file mode 100644 index 6b771b6..0000000 --- a/SOURCES/bz2154727-ethmonitor-dont-log-iface-doesnt-exist-monitor.patch +++ /dev/null @@ -1,24 +0,0 @@ -From e7a748d35fe56f2be727ecae1885a2f1366f41bf Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 15 Mar 2023 13:03:07 +0100 -Subject: [PATCH] ethmonitor: dont log "Interface does not exist" for - monitor-action - ---- - heartbeat/ethmonitor | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor -index 451738a0b5..f9c9ef4bdd 100755 ---- a/heartbeat/ethmonitor -+++ b/heartbeat/ethmonitor -@@ -271,6 +271,9 @@ if_init() { - validate-all) - ocf_exit_reason "Interface $NIC does not exist" - exit $OCF_ERR_CONFIGURED;; -+ monitor) -+ ocf_log debug "Interface $NIC does not exist" -+ ;; - *) - ## It might be a bond interface which is temporarily not available, therefore we want to continue here - ocf_log warn "Interface $NIC does not exist" diff --git a/SOURCES/bz2157873-1-all-ras-validate-all-OCF_CHECK_LEVEL-10.patch b/SOURCES/bz2157873-1-all-ras-validate-all-OCF_CHECK_LEVEL-10.patch deleted file mode 100644 index 85f5f48..0000000 --- a/SOURCES/bz2157873-1-all-ras-validate-all-OCF_CHECK_LEVEL-10.patch +++ /dev/null @@ -1,137 +0,0 @@ -From bf89ad06d5da5c05533c80a37a37c8dbbcd123aa Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 8 Dec 2022 15:40:07 +0100 -Subject: [PATCH] galera/mpathpersist/sg_persist/IPsrcaddr: only check notify - and promotable when OCF_CHECK_LEVEL=10 - -Pacemaker has started running validate-all action before creating the -resource. It doesnt provide notify/promotable settings while doing so, -so this patch moves these checks to OCF_CHECK_LEVEL 10 and runs the -validate action at OCF_CHECK_LEVEL 10 in the start-action. ---- - heartbeat/IPsrcaddr | 13 ++++++++----- - heartbeat/galera.in | 9 ++++++--- - heartbeat/mpathpersist.in | 13 +++++++++---- - heartbeat/sg_persist.in | 13 +++++++++---- - 4 files changed, 32 insertions(+), 16 deletions(-) - -diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr -index 1bd41a930..66e2ad8cd 100755 ---- a/heartbeat/IPsrcaddr -+++ b/heartbeat/IPsrcaddr -@@ -510,11 +510,13 @@ srca_validate_all() { - fi - - # We should serve this IP address of course -- if ip_status "$ipaddress"; then -- : -- else -- ocf_exit_reason "We are not serving [$ipaddress], hence can not make it a preferred source address" -- return $OCF_ERR_INSTALLED -+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then -+ if ip_status "$ipaddress"; then -+ : -+ else -+ ocf_exit_reason "We are not serving [$ipaddress], hence can not make it a preferred source address" -+ return $OCF_ERR_INSTALLED -+ fi - fi - return $OCF_SUCCESS - } -@@ -540,6 +542,7 @@ esac - - ipaddress="$OCF_RESKEY_ipaddress" - -+[ "$__OCF_ACTION" != "validate-all" ] && OCF_CHECK_LEVEL=10 - srca_validate_all - rc=$? - if [ $rc -ne $OCF_SUCCESS ]; then -diff --git a/heartbeat/galera.in b/heartbeat/galera.in -index cd2fee7c0..6aed3e4b6 100755 ---- a/heartbeat/galera.in -+++ b/heartbeat/galera.in -@@ -1015,9 +1015,11 @@ galera_stop() - - galera_validate() - { -- if ! ocf_is_ms; then -- ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource." -- return $OCF_ERR_CONFIGURED -+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then -+ if ! ocf_is_ms; then -+ ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource." -+ return $OCF_ERR_CONFIGURED -+ fi - fi - - if [ -z "$OCF_RESKEY_wsrep_cluster_address" ]; then -@@ -1035,6 +1037,7 @@ case "$1" in - exit $OCF_SUCCESS;; - esac - -+[ "$__OCF_ACTION" = "start" ] && OCF_CHECK_LEVEL=10 - galera_validate - rc=$? - LSB_STATUS_STOPPED=3 -diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in -index 0e2c2a4a0..8a46b9930 100644 ---- a/heartbeat/mpathpersist.in -+++ b/heartbeat/mpathpersist.in -@@ -630,10 +630,11 @@ mpathpersist_action_notify() { - } - - mpathpersist_action_validate_all () { -- -- if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then -- ocf_log err "Master options misconfigured." -- exit $OCF_ERR_CONFIGURED -+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then -+ if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then -+ ocf_log err "Master options misconfigured." -+ exit $OCF_ERR_CONFIGURED -+ fi - fi - - return $OCF_SUCCESS -@@ -659,6 +660,10 @@ case $ACTION in - start|promote|monitor|stop|demote) - ocf_log debug "$RESOURCE: starting action \"$ACTION\"" - mpathpersist_init -+ if [ "$__OCF_ACTION" = "start" ]; then -+ OCF_CHECK_LEVEL=10 -+ mpathpersist_action_validate_all -+ fi - mpathpersist_action_$ACTION - exit $? - ;; -diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in -index 16048ea6f..620c02f4a 100644 ---- a/heartbeat/sg_persist.in -+++ b/heartbeat/sg_persist.in -@@ -643,10 +643,11 @@ sg_persist_action_notify() { - } - - sg_persist_action_validate_all () { -- -- if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then -- ocf_log err "Master options misconfigured." -- exit $OCF_ERR_CONFIGURED -+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then -+ if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then -+ ocf_log err "Master options misconfigured." -+ exit $OCF_ERR_CONFIGURED -+ fi - fi - - return $OCF_SUCCESS -@@ -672,6 +673,10 @@ case $ACTION in - start|promote|monitor|stop|demote) - ocf_log debug "$RESOURCE: starting action \"$ACTION\"" - sg_persist_init -+ if [ "$__OCF_ACTION" = "start" ]; then -+ OCF_CHECK_LEVEL=10 -+ sg_persist_action_validate_all -+ fi - sg_persist_action_$ACTION - exit $? - ;; diff --git a/SOURCES/bz2157873-2-Filesystem-CTDB-validate-all-improvements.patch b/SOURCES/bz2157873-2-Filesystem-CTDB-validate-all-improvements.patch deleted file mode 100644 index bd95157..0000000 --- a/SOURCES/bz2157873-2-Filesystem-CTDB-validate-all-improvements.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 21666c5c842b8a6028699ee78db75a1d7134fad0 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 4 Jan 2023 10:39:16 +0100 -Subject: [PATCH 1/2] Filesystem: remove validate-all mountpoint warning as it - is auto-created during start-action if it doesnt exist - ---- - heartbeat/Filesystem | 4 ---- - 1 file changed, 4 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 44270ad98..65088029e 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -851,10 +851,6 @@ Filesystem_monitor() - # - Filesystem_validate_all() - { -- if [ -n "$MOUNTPOINT" ] && [ ! -d "$MOUNTPOINT" ]; then -- ocf_log warn "Mountpoint $MOUNTPOINT does not exist" -- fi -- - # Check if the $FSTYPE is workable - # NOTE: Without inserting the $FSTYPE module, this step may be imprecise - # TODO: This is Linux specific crap. - -From 8a7f40b6ab93d8d39230d864ab06a57ff48d6f1f Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 5 Jan 2023 13:09:48 +0100 -Subject: [PATCH 2/2] CTDB: change public_addresses validate-all warning to - info - ---- - heartbeat/CTDB.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in -index 46f56cfac..b4af66bc1 100755 ---- a/heartbeat/CTDB.in -+++ b/heartbeat/CTDB.in -@@ -940,7 +940,7 @@ ctdb_validate() { - fi - - if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then -- ocf_log warn "CTDB file '${OCF_RESKEY_ctdb_config_dir}/public_addresses' exists - CTDB will try to manage IP failover!" -+ ocf_log info "CTDB file '${OCF_RESKEY_ctdb_config_dir}/public_addresses' exists - CTDB will try to manage IP failover!" - fi - - if [ ! -f "$OCF_RESKEY_ctdb_config_dir/nodes" ]; then diff --git a/SOURCES/bz2157873-3-pgsqlms-validate-all-OCF_CHECK_LEVEL-10.patch b/SOURCES/bz2157873-3-pgsqlms-validate-all-OCF_CHECK_LEVEL-10.patch deleted file mode 100644 index 7b98a63..0000000 --- a/SOURCES/bz2157873-3-pgsqlms-validate-all-OCF_CHECK_LEVEL-10.patch +++ /dev/null @@ -1,68 +0,0 @@ ---- a/heartbeat/pgsqlms 2023-01-04 14:42:36.093258702 +0100 -+++ b/heartbeat/pgsqlms 2023-01-04 14:40:52.403994545 +0100 -@@ -66,6 +66,7 @@ - my $maxlag = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default; - my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'} - || "$pgdata/recovery.conf.pcmk"; -+my $ocf_check_level = $ENV{'OCF_CHECK_LEVEL'} || 0; - - - # PostgreSQL commands path -@@ -1304,26 +1305,28 @@ - return $OCF_ERR_INSTALLED; - } - -- # check notify=true -- $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\ -- --meta --get-parameter notify 2>/dev/null }; -- chomp $ans; -- unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { -- ocf_exit_reason( -- 'You must set meta parameter notify=true for your master resource' -- ); -- return $OCF_ERR_INSTALLED; -- } -+ if ( $ocf_check_level == 10 ) { -+ # check notify=true -+ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\ -+ --meta --get-parameter notify 2>/dev/null }; -+ chomp $ans; -+ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { -+ ocf_exit_reason( -+ 'You must set meta parameter notify=true for your "master" resource' -+ ); -+ return $OCF_ERR_INSTALLED; -+ } - -- # check master-max=1 -- unless ( -- defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} -- and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' -- ) { -- ocf_exit_reason( -- 'You must set meta parameter master-max=1 for your master resource' -- ); -- return $OCF_ERR_INSTALLED; -+ # check master-max=1 -+ unless ( -+ defined $ENV{'OCF_RESKEY_CRM_meta_master_max'} -+ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' -+ ) { -+ ocf_exit_reason( -+ 'You must set meta parameter master-max=1 for your "master" resource' -+ ); -+ return $OCF_ERR_INSTALLED; -+ } - } - - if ( $PGVERNUM >= $PGVER_12 ) { -@@ -2242,6 +2245,9 @@ - # Set current node name. - $nodename = ocf_local_nodename(); - -+if ( $__OCF_ACTION ne 'validate-all' ) { -+ $ocf_check_level = 10; -+} - $exit_code = pgsql_validate_all(); - - exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all'; diff --git a/SOURCES/bz2157873-4-exportfs-pgsql-validate-all-fixes.patch b/SOURCES/bz2157873-4-exportfs-pgsql-validate-all-fixes.patch deleted file mode 100644 index d09352d..0000000 --- a/SOURCES/bz2157873-4-exportfs-pgsql-validate-all-fixes.patch +++ /dev/null @@ -1,187 +0,0 @@ -From 81f9e1a04dfd2274ccb906310b4f191485e342ab Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 11 Jan 2023 13:22:24 +0100 -Subject: [PATCH 1/2] exportfs: move testdir() to start-action to avoid failing - during resource creation (validate-all) and make it create the directory if - it doesnt exist - ---- - heartbeat/exportfs | 27 +++++++++++++++------------ - 1 file changed, 15 insertions(+), 12 deletions(-) - -diff --git a/heartbeat/exportfs b/heartbeat/exportfs -index c10777fa9..2307a9e67 100755 ---- a/heartbeat/exportfs -+++ b/heartbeat/exportfs -@@ -301,6 +301,16 @@ exportfs_monitor () - fi - } - -+testdir() { -+ if [ ! -d $1 ]; then -+ mkdir -p "$1" -+ if [ $? -ne 0 ]; then -+ ocf_exit_reason "Unable to create directory $1" -+ return 1 -+ fi -+ fi -+ return 0 -+} - export_one() { - local dir=$1 - local opts sep -@@ -331,6 +341,10 @@ export_one() { - } - exportfs_start () - { -+ if ! forall testdir; then -+ return $OCF_ERR_INSTALLED -+ fi -+ - if exportfs_monitor; then - ocf_log debug "already exported" - return $OCF_SUCCESS -@@ -428,14 +442,6 @@ exportfs_stop () - fi - } - --testdir() { -- if [ ! -d $1 ]; then -- ocf_is_probe || -- ocf_log err "$1 does not exist or is not a directory" -- return 1 -- fi -- return 0 --} - exportfs_validate_all () - { - if echo "$OCF_RESKEY_fsid" | grep -q -F ','; then -@@ -447,9 +453,6 @@ exportfs_validate_all () - ocf_exit_reason "use integer fsid when exporting multiple directories" - return $OCF_ERR_CONFIGURED - fi -- if ! forall testdir; then -- return $OCF_ERR_INSTALLED -- fi - } - - for dir in $OCF_RESKEY_directory; do -@@ -466,7 +469,7 @@ for dir in $OCF_RESKEY_directory; do - fi - else - case "$__OCF_ACTION" in -- stop|monitor) -+ stop|monitor|validate-all) - canonicalized_dir="$dir" - ocf_log debug "$dir does not exist" - ;; - -From 8ee41af82cda35149f8e0cfede6a8ddef3e221e1 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 11 Jan 2023 13:25:57 +0100 -Subject: [PATCH 2/2] pgsql: dont run promotable and file checks that could be - on shared storage during validate-all action - ---- - heartbeat/pgsql | 53 +++++++++++++++++++++++++++++-------------------- - 1 file changed, 32 insertions(+), 21 deletions(-) - -diff --git a/heartbeat/pgsql b/heartbeat/pgsql -index aa8a13a84..532063ac5 100755 ---- a/heartbeat/pgsql -+++ b/heartbeat/pgsql -@@ -1835,7 +1835,7 @@ check_config() { - - if [ ! -f "$1" ]; then - if ocf_is_probe; then -- ocf_log info "Configuration file is $1 not readable during probe." -+ ocf_log info "Unable to read $1 during probe." - rc=1 - else - ocf_exit_reason "Configuration file $1 doesn't exist" -@@ -1846,8 +1846,7 @@ check_config() { - return $rc - } - --# Validate most critical parameters --pgsql_validate_all() { -+validate_ocf_check_level_10() { - local version - local check_config_rc - local rep_mode_string -@@ -1883,12 +1882,6 @@ pgsql_validate_all() { - fi - fi - -- getent passwd $OCF_RESKEY_pgdba >/dev/null 2>&1 -- if [ ! $? -eq 0 ]; then -- ocf_exit_reason "User $OCF_RESKEY_pgdba doesn't exist"; -- return $OCF_ERR_INSTALLED; -- fi -- - if ocf_is_probe; then - ocf_log info "Don't check $OCF_RESKEY_pgdata during probe" - else -@@ -1898,18 +1891,6 @@ pgsql_validate_all() { - fi - fi - -- if [ -n "$OCF_RESKEY_monitor_user" -a ! -n "$OCF_RESKEY_monitor_password" ] -- then -- ocf_exit_reason "monitor password can't be empty" -- return $OCF_ERR_CONFIGURED -- fi -- -- if [ ! -n "$OCF_RESKEY_monitor_user" -a -n "$OCF_RESKEY_monitor_password" ] -- then -- ocf_exit_reason "monitor_user has to be set if monitor_password is set" -- return $OCF_ERR_CONFIGURED -- fi -- - if is_replication || [ "$OCF_RESKEY_rep_mode" = "slave" ]; then - if [ `printf "$version\n9.1" | sort -n | head -1` != "9.1" ]; then - ocf_exit_reason "Replication mode needs PostgreSQL 9.1 or higher." -@@ -2027,6 +2008,35 @@ pgsql_validate_all() { - return $OCF_SUCCESS - } - -+# Validate most critical parameters -+pgsql_validate_all() { -+ local rc -+ -+ getent passwd $OCF_RESKEY_pgdba >/dev/null 2>&1 -+ if [ ! $? -eq 0 ]; then -+ ocf_exit_reason "User $OCF_RESKEY_pgdba doesn't exist"; -+ return $OCF_ERR_INSTALLED; -+ fi -+ -+ if [ -n "$OCF_RESKEY_monitor_user" ] && [ -z "$OCF_RESKEY_monitor_password" ]; then -+ ocf_exit_reason "monitor password can't be empty" -+ return $OCF_ERR_CONFIGURED -+ fi -+ -+ if [ -z "$OCF_RESKEY_monitor_user" ] && [ -n "$OCF_RESKEY_monitor_password" ]; then -+ ocf_exit_reason "monitor_user has to be set if monitor_password is set" -+ return $OCF_ERR_CONFIGURED -+ fi -+ -+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then -+ validate_ocf_check_level_10 -+ rc=$? -+ [ $rc -ne "$OCF_SUCCESS" ] && exit $rc -+ fi -+ -+ return $OCF_SUCCESS -+} -+ - - # - # Check if we need to create a log file -@@ -2163,6 +2173,7 @@ case "$1" in - exit $OCF_SUCCESS;; - esac - -+[ "$__OCF_ACTION" != "validate-all" ] && OCF_CHECK_LEVEL=10 - pgsql_validate_all - rc=$? - diff --git a/SOURCES/bz2157873-5-pgsqlms-alidate-all-OCF_CHECK_LEVEL-10.patch b/SOURCES/bz2157873-5-pgsqlms-alidate-all-OCF_CHECK_LEVEL-10.patch deleted file mode 100644 index 0642086..0000000 --- a/SOURCES/bz2157873-5-pgsqlms-alidate-all-OCF_CHECK_LEVEL-10.patch +++ /dev/null @@ -1,23 +0,0 @@ ---- ClusterLabs-resource-agents-fd0720f7/heartbeat/pgsqlms 2023-01-16 10:54:30.897188238 +0100 -+++ pgsqlms 2023-01-10 14:21:19.281286242 +0100 -@@ -1351,12 +1351,14 @@ - return $OCF_ERR_ARGS; - } - -- $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts}; -- unless ($guc =~ /\bapplication_name='?$nodename'?\b/) { -- ocf_exit_reason( -- q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }. -- q{It is currently set to '%s'}, $nodename, $guc ); -- return $OCF_ERR_ARGS; -+ if ( $ocf_check_level == 10 ) { -+ $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts}; -+ unless ($guc =~ /\bapplication_name='?$nodename'?\b/) { -+ ocf_exit_reason( -+ q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }. -+ q{It is currently set to '%s'}, $nodename, $guc ); -+ return $OCF_ERR_ARGS; -+ } - } - } - else { diff --git a/SOURCES/bz2181019-azure-events-1-fix-no-transition-summary.patch b/SOURCES/bz2181019-azure-events-1-fix-no-transition-summary.patch deleted file mode 100644 index ed2958e..0000000 --- a/SOURCES/bz2181019-azure-events-1-fix-no-transition-summary.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 81bb58b05d2ddabd17fe31af39f0e857e61db3c9 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 28 Mar 2023 16:53:45 +0200 -Subject: [PATCH] azure-events*: fix for no "Transition Summary" for Pacemaker - 2.1+ - ---- - heartbeat/azure-events-az.in | 8 ++++---- - heartbeat/azure-events.in | 6 +++--- - 2 files changed, 7 insertions(+), 7 deletions(-) - -diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in -index 59d0953061..67c02c6422 100644 ---- a/heartbeat/azure-events-az.in -+++ b/heartbeat/azure-events-az.in -@@ -311,10 +311,10 @@ class clusterHelper: - summary = clusterHelper._exec("crm_simulate", "-Ls") - if not summary: - ocf.logger.warning("transitionSummary: could not load transition summary") -- return False -+ return "" - if summary.find("Transition Summary:") < 0: -- ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) -- return False -+ ocf.logger.debug("transitionSummary: no transactions: %s" % summary) -+ return "" - summary = summary.split("Transition Summary:")[1] - ret = summary.split("\n").pop(0) - -@@ -768,4 +768,4 @@ def main(): - agent.run() - - if __name__ == '__main__': -- main() -\ No newline at end of file -+ main() -diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in -index 66e129060a..5ad658df93 100644 ---- a/heartbeat/azure-events.in -+++ b/heartbeat/azure-events.in -@@ -310,10 +310,10 @@ class clusterHelper: - summary = clusterHelper._exec("crm_simulate", "-Ls") - if not summary: - ocf.logger.warning("transitionSummary: could not load transition summary") -- return False -+ return "" - if summary.find("Transition Summary:") < 0: -- ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) -- return False -+ ocf.logger.debug("transitionSummary: no transactions: %s" % summary) -+ return "" - summary = summary.split("Transition Summary:")[1] - ret = summary.split("\n").pop(0) - diff --git a/SOURCES/bz2181019-azure-events-2-improve-logic.patch b/SOURCES/bz2181019-azure-events-2-improve-logic.patch deleted file mode 100644 index 1b5aa9d..0000000 --- a/SOURCES/bz2181019-azure-events-2-improve-logic.patch +++ /dev/null @@ -1,77 +0,0 @@ -From ff53e5c8d6867e580506d132fba6fcf6aa46b804 Mon Sep 17 00:00:00 2001 -From: Peter Varkoly -Date: Sat, 29 Apr 2023 08:09:11 +0200 -Subject: [PATCH] Use -LS instead of -Ls as parameter to get the Transition - Summary - ---- - heartbeat/azure-events-az.in | 9 +++++---- - heartbeat/azure-events.in | 9 +++++---- - 2 files changed, 10 insertions(+), 8 deletions(-) - -diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in -index 67c02c642..46d4d1f3d 100644 ---- a/heartbeat/azure-events-az.in -+++ b/heartbeat/azure-events-az.in -@@ -298,7 +298,7 @@ class clusterHelper: - Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) - """ - # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? -- # # crm_simulate -Ls -+ # # crm_simulate -LS - # Transition Summary: - # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) - # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) -@@ -308,15 +308,16 @@ class clusterHelper: - # Transition Summary: - ocf.logger.debug("transitionSummary: begin") - -- summary = clusterHelper._exec("crm_simulate", "-Ls") -+ summary = clusterHelper._exec("crm_simulate", "-LS") - if not summary: - ocf.logger.warning("transitionSummary: could not load transition summary") - return "" - if summary.find("Transition Summary:") < 0: - ocf.logger.debug("transitionSummary: no transactions: %s" % summary) - return "" -- summary = summary.split("Transition Summary:")[1] -- ret = summary.split("\n").pop(0) -+ j=summary.find('Transition Summary:') + len('Transition Summary:') -+ l=summary.lower().find('executing cluster transition:') -+ ret = list(filter(str.strip, summary[j:l].split("\n"))) - - ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) - return ret -diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in -index 5ad658df9..90acaba62 100644 ---- a/heartbeat/azure-events.in -+++ b/heartbeat/azure-events.in -@@ -297,7 +297,7 @@ class clusterHelper: - Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) - """ - # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? -- # # crm_simulate -Ls -+ # # crm_simulate -LS - # Transition Summary: - # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) - # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) -@@ -307,15 +307,16 @@ class clusterHelper: - # Transition Summary: - ocf.logger.debug("transitionSummary: begin") - -- summary = clusterHelper._exec("crm_simulate", "-Ls") -+ summary = clusterHelper._exec("crm_simulate", "-LS") - if not summary: - ocf.logger.warning("transitionSummary: could not load transition summary") - return "" - if summary.find("Transition Summary:") < 0: - ocf.logger.debug("transitionSummary: no transactions: %s" % summary) - return "" -- summary = summary.split("Transition Summary:")[1] -- ret = summary.split("\n").pop(0) -+ j=summary.find('Transition Summary:') + len('Transition Summary:') -+ l=summary.lower().find('executing cluster transition:') -+ ret = list(filter(str.strip, summary[j:l].split("\n"))) - - ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) - return ret diff --git a/SOURCES/bz2183152-Filesystem-fail-efs-utils-not-installed.patch b/SOURCES/bz2183152-Filesystem-fail-efs-utils-not-installed.patch deleted file mode 100644 index 72da518..0000000 --- a/SOURCES/bz2183152-Filesystem-fail-efs-utils-not-installed.patch +++ /dev/null @@ -1,23 +0,0 @@ -From b02b06c437b1d8cb1dcfe8ace47c2efc4a0e476c Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 30 Mar 2023 14:44:41 +0200 -Subject: [PATCH] Filesystem: fail if AWS efs-utils not installed when - fstype=efs - ---- - heartbeat/Filesystem | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 65088029ec..50c68f115b 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -456,7 +456,7 @@ fstype_supported() - # System (EFS) - case "$FSTYPE" in - fuse.*|glusterfs|rozofs) support="fuse";; -- efs) support="nfs4";; -+ efs) check_binary "mount.efs"; support="nfs4";; - esac - - if [ "$support" != "$FSTYPE" ]; then diff --git a/SOURCES/bz2189243-Filesystem-1-improve-stop-action.patch b/SOURCES/bz2189243-Filesystem-1-improve-stop-action.patch deleted file mode 100644 index 351600b..0000000 --- a/SOURCES/bz2189243-Filesystem-1-improve-stop-action.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 48ed6e6d6510f42743e4463970e27f05637e4982 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 4 Jul 2023 14:40:19 +0200 -Subject: [PATCH] Filesystem: improve stop-action and allow setting term/kill - signals and signal_delay for large filesystems - ---- - heartbeat/Filesystem | 80 ++++++++++++++++++++++++++++++++++++++------ - 1 file changed, 70 insertions(+), 10 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index 65a9dffb5..fe608ebfd 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -71,6 +71,9 @@ OCF_RESKEY_run_fsck_default="auto" - OCF_RESKEY_fast_stop_default="no" - OCF_RESKEY_force_clones_default="false" - OCF_RESKEY_force_unmount_default="true" -+OCF_RESKEY_term_signals_default="TERM" -+OCF_RESKEY_kill_signals_default="KILL" -+OCF_RESKEY_signal_delay_default="1" - - # RHEL specific defaults - if is_redhat_based; then -@@ -104,6 +107,9 @@ if [ -z "${OCF_RESKEY_fast_stop}" ]; then - fi - : ${OCF_RESKEY_force_clones=${OCF_RESKEY_force_clones_default}} - : ${OCF_RESKEY_force_unmount=${OCF_RESKEY_force_unmount_default}} -+: ${OCF_RESKEY_term_signals=${OCF_RESKEY_term_signals_default}} -+: ${OCF_RESKEY_kill_signals=${OCF_RESKEY_kill_signals_default}} -+: ${OCF_RESKEY_signal_delay=${OCF_RESKEY_signal_delay_default}} - - # Variables used by multiple methods - HOSTOS=$(uname) -@@ -266,6 +272,30 @@ block if unresponsive nfs mounts are in use on the system. - - - -+ -+ -+Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action. -+ -+Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action -+ -+ -+ -+ -+ -+Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action. -+ -+Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action -+ -+ -+ -+ -+ -+How many seconds to wait after sending term/kill signals to processes in stop-action. -+ -+How many seconds to wait after sending term/kill signals to processes in stop-action -+ -+ -+ - - - -@@ -663,19 +693,49 @@ try_umount() { - } - return $OCF_ERR_GENERIC - } --fs_stop() { -- local SUB="$1" timeout=$2 sig cnt -- for sig in TERM KILL; do -- cnt=$((timeout/2)) # try half time with TERM -- while [ $cnt -gt 0 ]; do -- try_umount "$SUB" && -- return $OCF_SUCCESS -- ocf_exit_reason "Couldn't unmount $SUB; trying cleanup with $sig" -+timeout_child() { -+ local pid="$1" timeout="$2" killer ret -+ -+ # start job in the background that will KILL the given process after timeout expires -+ sleep $timeout && kill -s KILL $pid & -+ killer=$! -+ -+ # block until the child process either exits on its own or gets killed by the above killer pipeline -+ wait $pid -+ ret=$? -+ -+ # ret would be 127 + child exit code if the timeout expired -+ [ $ret -lt 128 ] && kill -s KILL $killer -+ return $ret -+} -+fs_stop_loop() { -+ local SUB="$1" signals="$2" sig -+ while true; do -+ for sig in $signals; do - signal_processes "$SUB" $sig -- cnt=$((cnt-1)) -- sleep 1 - done -+ sleep $OCF_RESKEY_signal_delay -+ try_umount "$SUB" && return $OCF_SUCCESS - done -+} -+fs_stop() { -+ local SUB="$1" timeout=$2 grace_time ret -+ grace_time=$((timeout/2)) -+ -+ # try gracefully terminating processes for up to half of the configured timeout -+ fs_stop_loop "$SUB" "$OCF_RESKEY_term_signals" & -+ timeout_child $! $grace_time -+ ret=$? -+ [ $ret -eq $OCF_SUCCESS ] && return $ret -+ -+ # try killing them for the rest of the timeout -+ fs_stop_loop "$SUB" "$OCF_RESKEY_kill_signals" & -+ timeout_child $! $grace_time -+ ret=$? -+ [ $ret -eq $OCF_SUCCESS ] && return $ret -+ -+ # timeout expired -+ ocf_exit_reason "Couldn't unmount $SUB within given timeout" - return $OCF_ERR_GENERIC - } - diff --git a/SOURCES/bz2189243-Filesystem-2-fix-incorrect-parameter-types.patch b/SOURCES/bz2189243-Filesystem-2-fix-incorrect-parameter-types.patch deleted file mode 100644 index 9f61043..0000000 --- a/SOURCES/bz2189243-Filesystem-2-fix-incorrect-parameter-types.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 7056635f3f94c1bcaaa5ed5563dc3b0e9f6749e0 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 18 Jul 2023 14:12:27 +0200 -Subject: [PATCH] Filesystem: dont use boolean type for non-boolean parameters - ---- - heartbeat/Filesystem | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index ee55a4843..b9aae8d50 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -269,7 +269,7 @@ fuser cli tool. fuser is known to perform operations that can potentially - block if unresponsive nfs mounts are in use on the system. - - Kill processes before unmount -- -+ - - - -@@ -277,7 +277,7 @@ block if unresponsive nfs mounts are in use on the system. - Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action. - - Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action -- -+ - - - -@@ -285,7 +285,7 @@ Signals (names or numbers, whitespace separated) to send processes during gracef - Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action. - - Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action -- -+ - - - -@@ -293,7 +293,7 @@ Signals (names or numbers, whitespace separated) to send processes during forcef - How many seconds to wait after sending term/kill signals to processes in stop-action. - - How many seconds to wait after sending term/kill signals to processes in stop-action -- -+ - - - diff --git a/SOURCES/bz2189243-Filesystem-3-fix-signal_delay-default-value.patch b/SOURCES/bz2189243-Filesystem-3-fix-signal_delay-default-value.patch deleted file mode 100644 index 5079b76..0000000 --- a/SOURCES/bz2189243-Filesystem-3-fix-signal_delay-default-value.patch +++ /dev/null @@ -1,23 +0,0 @@ -From f779fad52e5f515ca81218da6098398bdecac286 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 20 Jul 2023 10:18:12 +0200 -Subject: [PATCH] Filesystem: fix incorrect variable name for signal_delay - default in metadata - ---- - heartbeat/Filesystem | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem -index b9aae8d50..066562891 100755 ---- a/heartbeat/Filesystem -+++ b/heartbeat/Filesystem -@@ -293,7 +293,7 @@ Signals (names or numbers, whitespace separated) to send processes during forcef - How many seconds to wait after sending term/kill signals to processes in stop-action. - - How many seconds to wait after sending term/kill signals to processes in stop-action -- -+ - - - diff --git a/SOURCES/gcp-configure-skip-bundled-lib-checks.patch b/SOURCES/gcp-configure-skip-bundled-lib-checks.patch deleted file mode 100644 index c591a40..0000000 --- a/SOURCES/gcp-configure-skip-bundled-lib-checks.patch +++ /dev/null @@ -1,28 +0,0 @@ ---- ClusterLabs-resource-agents-55a4e2c9/configure.ac 2021-08-19 09:37:57.000000000 +0200 -+++ ClusterLabs-resource-agents-55a4e2c9/configure.ac.modif 2021-09-02 13:12:26.336044699 +0200 -@@ -522,25 +522,12 @@ - AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) - - BUILD_GCP_PD_MOVE=1 --if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then -- BUILD_GCP_PD_MOVE=0 -- AC_MSG_WARN("Not building gcp-pd-move") --fi - AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1) - - BUILD_GCP_VPC_MOVE_ROUTE=1 --if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || \ -- test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then -- BUILD_GCP_VPC_MOVE_ROUTE=0 -- AC_MSG_WARN("Not building gcp-vpc-move-route") --fi - AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1) - - BUILD_GCP_VPC_MOVE_VIP=1 --if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then -- BUILD_GCP_VPC_MOVE_VIP=0 -- AC_MSG_WARN("Not building gcp-vpc-move-vip") --fi - AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1) - - AC_PATH_PROGS(ROUTE, route) diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch deleted file mode 100644 index 834243d..0000000 --- a/SOURCES/nova-compute-wait-NovaEvacuate.patch +++ /dev/null @@ -1,766 +0,0 @@ -diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am ---- a/doc/man/Makefile.am 2021-08-25 09:31:14.033615965 +0200 -+++ b/doc/man/Makefile.am 2021-08-24 17:59:40.679372762 +0200 -@@ -97,6 +97,8 @@ - ocf_heartbeat_ManageRAID.7 \ - ocf_heartbeat_ManageVE.7 \ - ocf_heartbeat_NodeUtilization.7 \ -+ ocf_heartbeat_nova-compute-wait.7 \ -+ ocf_heartbeat_NovaEvacuate.7 \ - ocf_heartbeat_Pure-FTPd.7 \ - ocf_heartbeat_Raid1.7 \ - ocf_heartbeat_Route.7 \ -diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am ---- a/heartbeat/Makefile.am 2021-08-25 09:31:14.034615967 +0200 -+++ b/heartbeat/Makefile.am 2021-08-24 17:59:40.679372762 +0200 -@@ -29,6 +29,8 @@ - - ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat - -+ospdir = $(OCF_RA_DIR_PREFIX)/openstack -+ - dtddir = $(datadir)/$(PACKAGE_NAME) - dtd_DATA = ra-api-1.dtd metadata.rng - -@@ -50,6 +52,9 @@ - send_ua_SOURCES = send_ua.c IPv6addr_utils.c - send_ua_LDADD = $(LIBNETLIBS) - -+osp_SCRIPTS = nova-compute-wait \ -+ NovaEvacuate -+ - ocf_SCRIPTS = AoEtarget \ - AudibleAlarm \ - ClusterMon \ -diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait ---- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/nova-compute-wait 2021-08-24 17:59:40.678372759 +0200 -@@ -0,0 +1,317 @@ -+#!/bin/sh -+# Copyright 2015 Red Hat, Inc. -+# -+# Description: Manages compute daemons -+# -+# Authors: Andrew Beekhof -+# -+# Support: openstack@lists.openstack.org -+# License: Apache Software License (ASL) 2.0 -+# -+ -+ -+####################################################################### -+# Initialization: -+ -+### -+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs -+### -+ -+: ${__OCF_ACTION=$1} -+ -+####################################################################### -+ -+meta_data() { -+ cat < -+ -+ -+1.0 -+ -+ -+OpenStack Nova Compute Server. -+ -+OpenStack Nova Compute Server -+ -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+ -+ -+DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN -+ -+DNS domain -+ -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+ -+Deprecated - do not use anymore. -+Deprecated - do not use anymore -+ -+ -+ -+ -+ -+How long to wait for nova to finish evacuating instances elsewhere -+before starting nova-compute. Only used when the agent detects -+evacuations might be in progress. -+ -+You may need to increase the start timeout when increasing this value. -+ -+Delay to allow evacuations time to complete -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+END -+} -+ -+####################################################################### -+ -+# don't exit on TERM, to test that lrmd makes sure that we do exit -+trap sigterm_handler TERM -+sigterm_handler() { -+ ocf_log info "They use TERM to bring us down. No such luck." -+ return -+} -+ -+nova_usage() { -+ cat </run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf -+[Service] -+ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST} -+EOF -+} -+ -+nova_validate() { -+ rc=$OCF_SUCCESS -+ -+ check_binary crudini -+ check_binary nova-compute -+ check_binary fence_compute -+ -+ if [ ! -f /etc/nova/nova.conf ]; then -+ ocf_exit_reason "/etc/nova/nova.conf not found" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ # Is the state directory writable? -+ state_dir=$(dirname $statefile) -+ touch "$state_dir/$$" -+ if [ $? != 0 ]; then -+ ocf_exit_reason "Invalid state directory: $state_dir" -+ return $OCF_ERR_ARGS -+ fi -+ rm -f "$state_dir/$$" -+ -+ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null) -+ if [ $? = 1 ]; then -+ short_host=$(uname -n | awk -F. '{print $1}') -+ if [ "x${OCF_RESKEY_domain}" != x ]; then -+ NOVA_HOST=${short_host}.${OCF_RESKEY_domain} -+ else -+ NOVA_HOST=$(uname -n) -+ fi -+ fi -+ -+ if [ $rc != $OCF_SUCCESS ]; then -+ exit $rc -+ fi -+ return $rc -+} -+ -+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" -+ -+: ${OCF_RESKEY_evacuation_delay=120} -+case $__OCF_ACTION in -+meta-data) meta_data -+ exit $OCF_SUCCESS -+ ;; -+usage|help) nova_usage -+ exit $OCF_SUCCESS -+ ;; -+esac -+ -+case $__OCF_ACTION in -+start) nova_validate; nova_start;; -+stop) nova_stop;; -+monitor) nova_validate; nova_monitor;; -+notify) nova_notify;; -+validate-all) exit $OCF_SUCCESS;; -+*) nova_usage -+ exit $OCF_ERR_UNIMPLEMENTED -+ ;; -+esac -+rc=$? -+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" -+exit $rc -+ -diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate ---- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/NovaEvacuate 2021-08-24 17:59:40.682372770 +0200 -@@ -0,0 +1,407 @@ -+#!/bin/bash -+# -+# Copyright 2015 Red Hat, Inc. -+# -+# Description: Manages evacuation of nodes running nova-compute -+# -+# Authors: Andrew Beekhof -+# -+# Support: openstack@lists.openstack.org -+# License: Apache Software License (ASL) 2.0 -+# -+ -+ -+####################################################################### -+# Initialization: -+ -+### -+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs -+### -+ -+: ${__OCF_ACTION=$1} -+ -+####################################################################### -+ -+meta_data() { -+ cat < -+ -+ -+1.0 -+ -+ -+Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged. -+ -+Evacuator for OpenStack Nova Compute Server -+ -+ -+ -+ -+ -+Authorization URL for connecting to keystone in admin context -+ -+Authorization URL -+ -+ -+ -+ -+ -+Username for connecting to keystone in admin context -+ -+Username -+ -+ -+ -+ -+Password for connecting to keystone in admin context -+ -+Password -+ -+ -+ -+ -+ -+Tenant name for connecting to keystone in admin context. -+Note that with Keystone V3 tenant names are only unique within a domain. -+ -+Keystone v2 Tenant or v3 Project Name -+ -+ -+ -+ -+ -+User's domain name. Used when authenticating to Keystone. -+ -+Keystone v3 User Domain -+ -+ -+ -+ -+ -+Domain name containing project. Used when authenticating to Keystone. -+ -+Keystone v3 Project Domain -+ -+ -+ -+ -+ -+Nova API location (internal, public or admin URL) -+ -+Nova API location (internal, public or admin URL) -+ -+ -+ -+ -+ -+Region name for connecting to nova. -+ -+Region name -+ -+ -+ -+ -+ -+Explicitly allow client to perform "insecure" TLS (https) requests. -+The server's certificate will not be verified against any certificate authorities. -+This option should be used with caution. -+ -+Allow insecure TLS requests -+ -+ -+ -+ -+ -+Disable shared storage recovery for instances. Use at your own risk! -+ -+Disable shared storage recovery for instances -+ -+ -+ -+ -+ -+Enable extra logging from the evacuation process -+ -+Enable debug logging -+ -+ -+ -+ -+ -+Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean -+up eventual locks/leases. -+ -+Nova evacuate delay -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+END -+} -+ -+####################################################################### -+ -+# don't exit on TERM, to test that lrmd makes sure that we do exit -+trap sigterm_handler TERM -+sigterm_handler() { -+ ocf_log info "They use TERM to bring us down. No such luck." -+ return -+} -+ -+evacuate_usage() { -+ cat < >(grep -v "attribute does not exist" 1>&2) | -+ sed 's/ value=""/ value="no"/' | -+ tr '="' ' ' | -+ awk '{print $4" "$6}' -+ ) -+ return $OCF_SUCCESS -+} -+ -+evacuate_validate() { -+ rc=$OCF_SUCCESS -+ fence_options="" -+ -+ -+ if ! have_binary fence_evacuate; then -+ check_binary fence_compute -+ fi -+ -+ # Is the state directory writable? -+ state_dir=$(dirname $statefile) -+ touch "$state_dir/$$" -+ if [ $? != 0 ]; then -+ ocf_exit_reason "Invalid state directory: $state_dir" -+ return $OCF_ERR_ARGS -+ fi -+ rm -f "$state_dir/$$" -+ -+ if [ -z "${OCF_RESKEY_auth_url}" ]; then -+ ocf_exit_reason "auth_url not configured" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}" -+ -+ if [ -z "${OCF_RESKEY_username}" ]; then -+ ocf_exit_reason "username not configured" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ fence_options="${fence_options} -l ${OCF_RESKEY_username}" -+ -+ if [ -z "${OCF_RESKEY_password}" ]; then -+ ocf_exit_reason "password not configured" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ fence_options="${fence_options} -p ${OCF_RESKEY_password}" -+ -+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then -+ ocf_exit_reason "tenant_name not configured" -+ exit $OCF_ERR_CONFIGURED -+ fi -+ -+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}" -+ -+ if [ -n "${OCF_RESKEY_user_domain}" ]; then -+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}" -+ fi -+ -+ if [ -n "${OCF_RESKEY_project_domain}" ]; then -+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}" -+ fi -+ -+ if [ -n "${OCF_RESKEY_region_name}" ]; then -+ fence_options="${fence_options} \ -+ --region-name ${OCF_RESKEY_region_name}" -+ fi -+ -+ if [ -n "${OCF_RESKEY_insecure}" ]; then -+ if ocf_is_true "${OCF_RESKEY_insecure}"; then -+ fence_options="${fence_options} --insecure" -+ fi -+ fi -+ -+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then -+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then -+ fence_options="${fence_options} --no-shared-storage" -+ fi -+ fi -+ -+ if [ -n "${OCF_RESKEY_verbose}" ]; then -+ if ocf_is_true "${OCF_RESKEY_verbose}"; then -+ fence_options="${fence_options} --verbose" -+ fi -+ fi -+ -+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then -+ case ${OCF_RESKEY_endpoint_type} in -+ adminURL|publicURL|internalURL) ;; -+ *) -+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type} not valid. Use adminURL or publicURL or internalURL" -+ exit $OCF_ERR_CONFIGURED -+ ;; -+ esac -+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}" -+ fi -+ -+ if [ -z "${OCF_RESKEY_evacuate_delay}" ]; then -+ OCF_RESKEY_evacuate_delay=0 -+ fi -+ -+ if [ $rc != $OCF_SUCCESS ]; then -+ exit $rc -+ fi -+ return $rc -+} -+ -+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active" -+ -+case $__OCF_ACTION in -+ start) -+ evacuate_validate -+ evacuate_start -+ ;; -+ stop) -+ evacuate_stop -+ ;; -+ monitor) -+ evacuate_validate -+ evacuate_monitor -+ ;; -+ meta-data) -+ meta_data -+ exit $OCF_SUCCESS -+ ;; -+ usage|help) -+ evacuate_usage -+ exit $OCF_SUCCESS -+ ;; -+ validate-all) -+ exit $OCF_SUCCESS -+ ;; -+ *) -+ evacuate_usage -+ exit $OCF_ERR_UNIMPLEMENTED -+ ;; -+esac -+rc=$? -+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" -+exit $rc diff --git a/SOURCES/python3-syntax-fixes.patch b/SOURCES/python3-syntax-fixes.patch deleted file mode 100644 index c669dd5..0000000 --- a/SOURCES/python3-syntax-fixes.patch +++ /dev/null @@ -1,592 +0,0 @@ -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-10-08 12:36:31.868765636 +0200 -@@ -52,8 +52,8 @@ - if not filename == None: - self.exportInstanceToFile(result,filename) - else: -- print 'Filename is needed' -- except Exception,e: -+ print('Filename is needed') -+ except Exception as e: - print(e) - def _optimizeResult(self,result): - keys = result.keys() -@@ -81,9 +81,9 @@ - fp = open(fileName,'w') - try : - fp.write(json.dumps(result,indent=4)) -- print "success" -+ print("success") - except IOError: -- print "Error: can\'t find file or read data" -+ print("Error: can\'t find file or read data") - finally: - fp.close() - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-10-08 12:36:53.882358851 +0200 -@@ -16,7 +16,7 @@ - if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0: - filename = keyValues['--filename'][0] - else: -- print "A profile is needed! please use \'--filename\' and add the profile name." -+ print("A profile is needed! please use \'--filename\' and add the profile name.") - return filename - - def getInstanceCount(self,keyValues): -@@ -25,7 +25,7 @@ - if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0: - count = keyValues['--instancecount'][0] - else: -- print "InstanceCount should be a positive number! The default value(1) will be used!" -+ print("InstanceCount should be a positive number! The default value(1) will be used!") - return int(count) - - def getSubOperations(self,cmd,operation): -@@ -65,8 +65,8 @@ - _newkeyValues["RegionId"] = newkeyValues["RegionId"] - self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest) - else: -- print "InstanceId is need!" -- except Exception,e: -+ print("InstanceId is need!") -+ except Exception as e: - print(e) - - def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False): -@@ -81,7 +81,7 @@ - response.display_response("error", result, "json") - else: - response.display_response(extraOperation, result, "json") -- except Exception,e: -+ except Exception as e: - print(e) - - -@@ -127,7 +127,7 @@ - ''' - if data.has_key('InstanceId') and len(data['InstanceId']) > 0: - instanceId = data['InstanceId'] -- except Exception,e: -+ except Exception as e: - pass - finally: - return instanceId -@@ -156,5 +156,5 @@ - if __name__ == "__main__": - handler = EcsImportHandler() - handler.getKVFromJson('ttt') -- print handler.getKVFromJson('ttt') -+ print(handler.getKVFromJson('ttt')) - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200 -@@ -77,8 +77,8 @@ - if not filename == None: - self.exportInstanceToFile(result,filename) - else: -- print 'Filename is needed' -- except Exception,e: -+ print('Filename is needed') -+ except Exception as e: - print(e) - - def exportInstanceToFile(self, result, filename): -@@ -96,9 +96,9 @@ - fp = open(fileName,'w') - try : - fp.write(json.dumps(result,indent=4)) -- print "success" -+ print("success") - except IOError: -- print "Error: can\'t find file or read data" -+ print("Error: can\'t find file or read data") - finally: - fp.close() - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200 -@@ -26,7 +26,7 @@ - count = keyValues[import_count][0] - else: - pass -- # print "InstanceCount should be a positive number! The default value(1) will be used!" -+ # print("InstanceCount should be a positive number! The default value(1) will be used!") - return int(count), "InstanceCount is "+str(count)+" created." - - def getSubOperations(self,cmd,operation): -@@ -46,7 +46,7 @@ - if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues): - newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()] - newkeyValues["ClientToken"] = [self.random_str()] -- # print newkeyValues.keys() -+ # print(newkeyValues.keys()) - # return - # self._setAttr(cmdInstance, newkeyValues) # set all key values in instance - # self.apiHandler.changeEndPoint(cmdInstance, newkeyValues) -@@ -58,7 +58,7 @@ - response.display_response("error", result, "json") - else: - response.display_response(item, result, "json") -- except Exception,e: -+ except Exception as e: - print(e) - - def getKVFromJson(self,filename): -@@ -77,7 +77,7 @@ - fp = open(fileName,'r') - data=json.loads(fp.read()) - keys = data.keys() -- # print keys, type(data['Items']['DBInstanceAttribute'][0]) -+ # print(keys, type(data['Items']['DBInstanceAttribute'][0])) - # instanceAttribute = data['Items']['DBInstanceAttribute'][0] - items = data['Items']['DBInstanceAttribute'][0] - keys = items.keys() -@@ -130,7 +130,7 @@ - if __name__ == "__main__": - handler = RdsImportDBInstanceHandler() - # handler.getKVFromJson('ttt') -- # print handler.getKVFromJson('ttt') -- print handler.random_str() -+ # print(handler.getKVFromJson('ttt')) -+ print(handler.random_str()) - - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200 -@@ -24,9 +24,9 @@ - _value = keyValues[ProfileCmd.name][0] # use the first value - self.extensionCliHandler.setUserProfile(_value) - else: -- print "Do your forget profile name? please use \'--name\' and add the profile name." -+ print("Do your forget profile name? please use \'--name\' and add the profile name.") - else: -- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?" -+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?") - - def addProfileCmd(self, cmd, keyValues): - userKey = '' -@@ -52,12 +52,12 @@ - finally: - f.close() - else: -- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?" -+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?") - - - if __name__ == "__main__": - handler = ProfileHandler() - handler.handleProfileCmd("useprofile", {'--name':["profile444"]}) -- print handler.extensionCliHandler.getUserProfile() -+ print(handler.extensionCliHandler.getUserProfile()) - handler.addProfileCmd("addProfile", {}) -- handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) -\ No newline at end of file -+ handler.addProfileCmd("addProfile", {'--name':["profile2222"]}) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200 -@@ -24,14 +24,14 @@ - self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler() - - def showUsage(self): -- print "usage: aliyuncli [options and parameters]" -+ print("usage: aliyuncli [options and parameters]") - - def showExample(self): -- print "show example" -+ print("show example") - - def showCmdError(self, cmd): - self.showUsage() -- print " the valid command as follows:\n" -+ print(" the valid command as follows:\n") - cmds = self.openApiDataHandler.getApiCmds() - self.printAsFormat(cmds) - -@@ -44,7 +44,7 @@ - error.printInFormat("Wrong version", "The sdk version is not exit.") - return None - self.showUsage() -- print "["+cmd+"]","valid operations as follows:\n" -+ print("["+cmd+"]","valid operations as follows:\n") - operations = self.openApiDataHandler.getApiOperations(cmd, version) - extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd) - operations.update(extensions) -@@ -56,8 +56,8 @@ - self.printAsFormat(operations) - - def showParameterError(self, cmd, operation, parameterlist): -- print 'usage: aliyuncli [options and parameters]' -- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n' -+ print('usage: aliyuncli [options and parameters]') -+ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n') - self.printAsFormat(parameterlist) - pass - -@@ -72,7 +72,7 @@ - tmpList.append(item) - count = count+1 - if len(tmpList) == 2: -- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10') -+ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')) - tmpList = list() - if len(tmpList) == 1 and count == len(mlist): -- print tmpList[0] -\ No newline at end of file -+ print(tmpList[0]) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200 -@@ -91,7 +91,7 @@ - keyValues["RegionId"] = [self.extensionHandler.getUserRegion()] - #check necessaryArgs as:accesskeyid accesskeysecret regionId - if not self.handler.hasNecessaryArgs(keyValues): -- print 'accesskeyid/accesskeysecret/regionId is absence' -+ print('accesskeyid/accesskeysecret/regionId is absence') - return - result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest) - if result is None: -@@ -102,7 +102,7 @@ - else: - response.display_response(operation, result, outPutFormat,keyValues) - else: -- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com' -+ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com') - elif self.handler.isAvailableExtensionOperation(cmd, operation): - if self.args.__len__() >= 3 and self.args[2] == 'help': - import commandConfigure -@@ -125,7 +125,7 @@ - def showInstanceAttribute(self, cmd, operation, classname): - if self.args.__len__() >= 3 and self.args[2] == "help": - self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname)) -- #print self.completer._help_to_show_instance_attribute(cmdInstance) -+ #print(self.completer._help_to_show_instance_attribute(cmdInstance)) - return True - return False - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200 -@@ -141,7 +141,7 @@ - _key = keyValues[keystr][0] - if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0: - _secret = keyValues[secretstr][0] -- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret -+ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret) - return _key, _secret - - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200 -@@ -161,12 +161,12 @@ - - if __name__ == "__main__": - upgradeHandler = aliyunCliUpgradeHandler() -- # print upgradeHandler.getLatestTimeFromServer() -+ # print(upgradeHandler.getLatestTimeFromServer()) - # flag, url = upgradeHandler.isNewVersionReady() - # if flag: -- # print url -+ # print(url) - # else: -- # print "current version is latest one" -- # print "final test:" -- print upgradeHandler.checkForUpgrade() -- print upgradeHandler.handleUserChoice("N") -+ # print("current version is latest one") -+ # print("final test:") -+ print(upgradeHandler.checkForUpgrade()) -+ print(upgradeHandler.handleUserChoice("N")) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200 -@@ -127,35 +127,35 @@ - - # this api will show help page when user input aliyuncli help(-h or --help) - def showAliyunCliHelp(self): -- print color.bold+"ALIYUNCLI()"+color.end -- print color.bold+"\nNAME"+color.end -- print "\taliyuncli -" -- print color.bold+"\nDESCRIPTION"+color.end -- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. " -- print color.bold+"\nSYNOPSIS"+color.end -- print "\taliyuncli [options and parameters]" -- print "\n\taliyuncli has supported command completion now. The detail you can check our site." -- print color.bold+"OPTIONS"+color.end -- print color.bold+"\tconfigure"+color.end -- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)" -- print color.bold+"\n\t--output"+color.end+" (string)" -- print "\n\tThe formatting style for command output." -- print "\n\to json" -- print "\n\to text" -- print "\n\to table" -+ print(color.bold+"ALIYUNCLI()"+color.end) -+ print(color.bold+"\nNAME"+color.end) -+ print("\taliyuncli -") -+ print(color.bold+"\nDESCRIPTION"+color.end) -+ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ") -+ print(color.bold+"\nSYNOPSIS"+color.end) -+ print("\taliyuncli [options and parameters]") -+ print("\n\taliyuncli has supported command completion now. The detail you can check our site.") -+ print(color.bold+"OPTIONS"+color.end) -+ print(color.bold+"\tconfigure"+color.end) -+ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)") -+ print(color.bold+"\n\t--output"+color.end+" (string)") -+ print("\n\tThe formatting style for command output.") -+ print("\n\to json") -+ print("\n\to text") -+ print("\n\to table") - -- print color.bold+"\n\t--secure"+color.end -- print "\n\tMaking secure requests(HTTPS) to service" -+ print(color.bold+"\n\t--secure"+color.end) -+ print("\n\tMaking secure requests(HTTPS) to service") - -- print color.bold+"\nAVAILABLE SERVICES"+color.end -- print "\n\to ecs" -- print "\n\to ess" -- print "\n\to mts" -- print "\n\to rds" -- print "\n\to slb" -+ print(color.bold+"\nAVAILABLE SERVICES"+color.end) -+ print("\n\to ecs") -+ print("\n\to ess") -+ print("\n\to mts") -+ print("\n\to rds") -+ print("\n\to slb") - - def showCurrentVersion(self): -- print self._version -+ print(self._version) - - def findConfigureFilePath(self): - homePath = "" -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200 -@@ -39,9 +39,9 @@ - - - def oss_notice(): -- print "OSS operation in aliyuncli is not supported." -- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation." -- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n" -+ print("OSS operation in aliyuncli is not supported.") -+ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.") -+ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n") - - - try: -@@ -391,22 +391,22 @@ - return jsonobj - - except ImportError as e: -- print module, 'is not exist!' -+ print(module, 'is not exist!') - sys.exit(1) - - except ServerException as e: - error = cliError.error() - error.printInFormat(e.get_error_code(), e.get_error_msg()) -- print "Detail of Server Exception:\n" -- print str(e) -+ print("Detail of Server Exception:\n") -+ print(str(e)) - sys.exit(1) - - except ClientException as e: -- # print e.get_error_msg() -+ # print(e.get_error_msg()) - error = cliError.error() - error.printInFormat(e.get_error_code(), e.get_error_msg()) -- print "Detail of Client Exception:\n" -- print str(e) -+ print("Detail of Client Exception:\n") -+ print(str(e)) - sys.exit(1) - - def getSetFuncs(self,classname): -@@ -549,6 +549,6 @@ - - if __name__ == '__main__': - handler = aliyunOpenApiDataHandler() -- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance') -- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances') -- print "###############",handler.getExtensionOperationsFromCmd('ecs') -+ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')) -+ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances')) -+ print("###############",handler.getExtensionOperationsFromCmd('ecs')) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200 -@@ -44,7 +44,7 @@ - filename=self.fileName - self.writeCmdVersionToFile(cmd,version,filename) - else: -- print "A argument is needed! please use \'--version\' and add the sdk version." -+ print("A argument is needed! please use \'--version\' and add the sdk version.") - return - def showVersions(self,cmd,operation,stream=None): - configureVersion='(not configure)' -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200 -@@ -55,7 +55,7 @@ - # _mlist = self.rds.extensionOptions[self.rds.exportDBInstance] - self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance]) - if operation.lower() == self.rds.importDBInstance.lower(): -- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance]) -+ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance])) - # parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance]) - self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance]) - -@@ -89,8 +89,8 @@ - importInstance:['count','filename']} - - if __name__ == '__main__': -- # print type(rds.extensionOperations) -- # print type(rds.extensionOptions) -- # print rds.extensionOptions['ll'] -+ # print(type(rds.extensionOperations)) -+ # print(type(rds.extensionOptions)) -+ # print(rds.extensionOptions['ll']) - configure = commandConfigure() -- print configure.showExtensionOperationHelp("ecs", "ExportInstance") -+ print(configure.showExtensionOperationHelp("ecs", "ExportInstance")) -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200 -@@ -577,7 +577,7 @@ - operation = operations[i].strip() - self._getKeyFromSection(profilename,operation) - else: -- print 'The correct usage:aliyuncli configure get key --profile profilename' -+ print('The correct usage:aliyuncli configure get key --profile profilename') - return - - def _getKeyFromSection(self,profilename,key): -@@ -591,7 +591,7 @@ - elif key in _WRITE_TO_CONFIG_FILE : - self._getKeyFromFile(config_filename,sectionName,key) - else: -- print key,'=','None' -+ print(key,'=','None') - def _getKeyFromFile(self,filename,section,key): - if os.path.isfile(filename): - with open(filename, 'r') as f: -@@ -600,9 +600,9 @@ - start = self._configWriter.hasSectionName(section,contents)[1] - end = self._configWriter._getSectionEnd(start,contents) - value = self._configWriter._getValueInSlice(start,end,key,contents) -- print key,'=',value -+ print(key,'=',value) - else: -- print key,'=None' -+ print(key,'=None') - - - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200 -@@ -2,7 +2,7 @@ - - def handleEndPoint(cmd,operation,keyValues): - if not hasNecessaryArgs(keyValues): -- print 'RegionId/EndPoint is absence' -+ print('RegionId/EndPoint is absence') - return - if cmd is not None: - cmd = cmd.capitalize() -@@ -25,7 +25,7 @@ - from aliyunsdkcore.profile.region_provider import modify_point - modify_point(cmd,regionId,endPoint) - except Exception as e: -- print e -+ print(e) - pass - - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200 -@@ -111,14 +111,14 @@ - if os.path.isfile(cfgfile): - ans = raw_input('File existed. Do you wish to overwrite it?(y/n)') - if ans.lower() != 'y': -- print 'Answer is No. Quit now' -+ print('Answer is No. Quit now') - return - with open(cfgfile, 'w+') as f: - config.write(f) -- print 'Your configuration is saved to %s.' % cfgfile -+ print('Your configuration is saved to %s.' % cfgfile) - - def cmd_help(args): -- print HELP -+ print(HELP) - - def add_config(parser): - parser.add_argument('--host', type=str, help='service host') -@@ -161,7 +161,7 @@ - return CMD_LIST.keys() - def handleOas(pars=None): - if pars is None: -- print HELP -+ print(HELP) - sys.exit(0) - parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter) - -diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py ---- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100 -+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200 -@@ -61,7 +61,7 @@ - data = f.read() - return data - except (OSError, IOError) as e: -- print e -+ print(e) - def _getParamFromUrl(prefix,value,mode): - - req = urllib2.Request(value) -@@ -74,7 +74,7 @@ - errorMsg='Get the wrong content' - errorClass.printInFormat(response.getcode(), errorMsg) - except Exception as e: -- print e -+ print(e) - - PrefixMap = {'file://': _getParamFromFile, - 'fileb://': _getParamFromFile -@@ -86,4 +86,4 @@ - 'fileb://': {'mode': 'rb'}, - #'http://': {}, - #'https://': {} -- } -\ No newline at end of file -+ } -diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py ---- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100 -+++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200 -@@ -16,10 +16,10 @@ - 3a4 - """ - colorama.init() -- print "aaa" -- print "aaa" -- print "aaa" -- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4" -+ print("aaa") -+ print("aaa") -+ print("aaa") -+ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4") - - - if __name__ == '__main__': diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec deleted file mode 100644 index 8e9a7e4..0000000 --- a/SPECS/resource-agents.spec +++ /dev/null @@ -1,2186 +0,0 @@ -# -# All modifications and additions to the file contributed by third parties -# remain the property of their copyright owners, unless otherwise agreed -# upon. The license for this file, and modifications and additions to the -# file, is the same license as for the pristine package itself (unless the -# license for the pristine package is not an Open Source License, in which -# case the license is the MIT License). An "Open Source License" is a -# license that conforms to the Open Source Definition (Version 1.9) -# published by the Open Source Initiative. -# - -# Below is the script used to generate a new source file -# from the resource-agent upstream git repo. -# -# TAG=$(git log --pretty="format:%h" -n 1) -# distdir="ClusterLabs-resource-agents-${TAG}" -# TARFILE="${distdir}.tar.gz" -# rm -rf $TARFILE $distdir -# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE -# - -%global upstream_prefix ClusterLabs-resource-agents -%global upstream_version 55a4e2c9 - -# bundles -%global bundled_lib_dir bundled -## google cloud -# google-cloud-sdk bundle -%global googlecloudsdk google-cloud-sdk -%global googlecloudsdk_version 360.0.0 -%global googlecloudsdk_dir %{bundled_lib_dir}/gcp/%{googlecloudsdk} -# python-pyroute2 bundle -%global pyroute2 pyroute2 -%global pyroute2_version 0.4.13 -%global pyroute2_dir %{bundled_lib_dir}/gcp/%{pyroute2} -# python-httplib2 bundle -%global httplib2 httplib2 -%global httplib2_version 0.20.4 -## alibaba cloud -# python-colorama bundle -%global colorama colorama -%global colorama_version 0.3.3 -%global colorama_dir %{bundled_lib_dir}/aliyun/%{colorama} -# python-pycryptodome bundle -%global pycryptodome pycryptodome -%global pycryptodome_version 3.20.0 -%global pycryptodome_dir %{bundled_lib_dir}/aliyun/%{pycryptodome} -# python-aliyun-sdk-core bundle -%global aliyunsdkcore aliyun-python-sdk-core -%global aliyunsdkcore_version 2.13.1 -%global aliyunsdkcore_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkcore} -# python-aliyun-sdk-ecs bundle -%global aliyunsdkecs aliyun-python-sdk-ecs -%global aliyunsdkecs_version 4.9.3 -%global aliyunsdkecs_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkecs} -# python-aliyun-sdk-vpc bundle -%global aliyunsdkvpc aliyun-python-sdk-vpc -%global aliyunsdkvpc_version 3.0.2 -%global aliyunsdkvpc_dir %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc} -# aliyuncli bundle -%global aliyuncli aliyun-cli -%global aliyuncli_version 2.1.10 -%global aliyuncli_dir %{bundled_lib_dir}/aliyun/%{aliyuncli} -## fix CVEs -# urllib3 bundle -%global urllib3 urllib3 -%global urllib3_version 1.26.18 - -# determine the ras-set to process based on configure invokation -%bcond_with rgmanager -%bcond_without linuxha - -Name: resource-agents -Summary: Open Source HA Reusable Cluster Resource Scripts -Version: 4.9.0 -Release: 54%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.10 -License: GPLv2+ and LGPLv2+ -URL: https://github.com/ClusterLabs/resource-agents -%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} -Group: System Environment/Base -%else -Group: Productivity/Clustering/HA -%endif -Source0: %{upstream_prefix}-%{upstream_version}.tar.gz -Source1: %{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz -Source2: %{pyroute2}-%{pyroute2_version}.tar.gz -Source3: pyparsing-2.4.7-py2.py3-none-any.whl -Source4: %{httplib2}-%{httplib2_version}.tar.gz -Source5: %{colorama}-%{colorama_version}.tar.gz -Source6: %{pycryptodome}-%{pycryptodome_version}.tar.gz -Source7: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz -Source8: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz -Source9: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz -Source10: %{aliyuncli}-%{aliyuncli_version}.tar.gz -Source11: %{urllib3}-%{urllib3_version}.tar.gz -Patch0: nova-compute-wait-NovaEvacuate.patch -Patch1: bz1872754-pgsqlms-new-ra.patch -Patch2: bz1995178-storage-mon-fix-typo.patch -Patch3: bz2008333-gcp-pd-move-gcp-vpc-move-route-dont-fail-due-to-incorrect-rc.patch -Patch4: bz2003117-all-agents-set-correct-agent-ocf-version.patch -Patch5: bz2014415-nfsserver-add-nfs_server_scope-parameter.patch -Patch6: bz2015789-gcp-ilb-1-fix-log_enable.patch -Patch7: bz2015789-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch -Patch8: bz2015789-gcp-ilb-3-use-bundled-gcloud.patch -Patch9: bz2027591-nfsnotify-fix-notify_args-default.patch -Patch10: bz2012057-Route-return-OCF_NOT_RUNNING-missing-route.patch -Patch11: bz2029706-1-db2-crm_attribute-use-forever.patch -Patch12: bz2029706-2-db2-fixes.patch -Patch13: bz1992661-mysql-use-ssl-mode.patch -Patch14: bz2064342-1-IPsrcaddr-dhcp-warning.patch -Patch15: bz2064342-2-IPsrcaddr-error-message-route-not-found.patch -Patch16: bz2064342-3-IPsrcaddr-fix-indentation.patch -Patch17: bz2064342-4-IPsrcaddr-fixes.patch -Patch18: bz1908146-bz1908147-bz1908148-bz1949114-update-openstack-agents.patch -Patch19: bz2072043-LVM-activate-fix-fence-issue.patch -Patch20: bz2049414-Filesystem-1-fix-uuid-label-device-whitespace.patch -Patch21: bz2049414-Filesystem-2-improve-uuid-label-device-logic.patch -Patch22: bz2086889-lvmlockd-fail-when-use_lvmlockd-not-set.patch -Patch23: bz2093214-aws-vpc-move-ip-add-interface-label-support.patch -Patch24: bz1908148-openstack-info-fix-bashism.patch -Patch25: bz1908146-bz1908147-bz1949114-openstack-agents-fixes.patch -Patch26: bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-warn-when-openstackcli-slow.patch -Patch27: bz2103370-ocf-tester-1-update.patch -Patch28: bz2103370-ocf-tester-2-remove-deprecated-lrmd-lrmadmin-code.patch -Patch29: bz1908146-bz1908147-bz1908148-bz1949114-openstack-agents-set-domain-parameters-default.patch -Patch30: bz2090370-CTDB-move-process-to-root-cgroup-if-rt-enabled.patch -Patch31: bz2116941-ethmonitor-ovsmonitor-pgsql-fix-attrd_updater-q.patch -Patch32: bz2109159-storage_mon-1-exit-after-help.patch -Patch33: bz2109159-storage_mon-2-fix-specified-scores-count.patch -Patch34: bz2109159-storage_mon-3-fix-child-process-exit.patch -Patch35: bz2109159-storage_mon-4-fix-possible-false-negatives.patch -Patch36: bz1905820-LVM-activate-fix-return-codes.patch -Patch37: bz1977012-azure-events-az-new-ra.patch -Patch38: bz2133682-IPsrcaddr-proto-metric-scope-default-route-fixes.patch -Patch39: bz2141836-vdo-vol-dont-fail-probe-action.patch -Patch40: bz2049319-Filesystem-add-support-for-Amazon-EFS.patch -Patch41: bz2127117-nfsserver-nfsv4_only-parameter.patch -Patch42: bz2139131-mysql-common-return-error-if-kill-fails.patch -Patch43: bz2157873-1-all-ras-validate-all-OCF_CHECK_LEVEL-10.patch -Patch44: bz2157873-2-Filesystem-CTDB-validate-all-improvements.patch -Patch45: bz2157873-3-pgsqlms-validate-all-OCF_CHECK_LEVEL-10.patch -Patch46: bz2157873-4-exportfs-pgsql-validate-all-fixes.patch -Patch47: bz2157873-5-pgsqlms-alidate-all-OCF_CHECK_LEVEL-10.patch -Patch48: bz2040110-IPaddr2-IPsrcaddr-1-support-policy-based-routing.patch -Patch49: bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch -Patch50: bz2154727-ethmonitor-dont-log-iface-doesnt-exist-monitor.patch -Patch51: bz2039692-mysql-1-replication-fixes.patch -Patch52: bz2181019-azure-events-1-fix-no-transition-summary.patch -Patch53: bz2181019-azure-events-2-improve-logic.patch -Patch54: bz2183152-Filesystem-fail-efs-utils-not-installed.patch -Patch55: bz2039692-mysql-2-fix-demoted-score-bounce.patch -Patch56: bz2040110-IPaddr2-IPsrcaddr-2-fix-table-parameter.patch -Patch57: bz2189243-Filesystem-1-improve-stop-action.patch -Patch58: bz2189243-Filesystem-2-fix-incorrect-parameter-types.patch -Patch59: bz2189243-Filesystem-3-fix-signal_delay-default-value.patch -Patch60: bz1904465-mysql-common-improve-error-message.patch -Patch61: RHEL-15302-1-exportfs-make-fsid-optional.patch -Patch62: RHEL-15302-2-ocft-exportfs-remove-fsid-required-test.patch -Patch63: RHEL-15305-1-findif.sh-fix-loopback-handling.patch -Patch64: RHEL-16248-aws-vpc-move-ip-aws-vpc-route53-awseip-awsvip-auth_type-role.patch -Patch65: RHEL-17083-findif-EOS-fix.patch -Patch66: RHEL-15305-2-findif.sh-dont-use-table-parameter.patch -Patch67: RHEL-34137-aws-agents-use-curl_retry.patch -Patch68: RHEL-32828-db2-fix-OCF_SUCESS-typo.patch -Patch69: RHEL-61138-nfsserver-also-stop-rpc-statd-for-nfsv4_only.patch -Patch70: RHEL-69297-1-Filesystem-dont-kill-unrelated-processes.patch -Patch71: RHEL-69297-2-Filesystem-update-bsd-logic.patch -Patch72: RHEL-72956-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch -Patch73: RHEL-72956-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch -Patch74: RHEL-79823-portblock-fix-version-detection.patch - -# bundle patches -Patch1000: 7-gcp-bundled.patch -Patch1001: 9-google-cloud-sdk-oauth2client-python-rsa-to-cryptography.patch -Patch1002: 10-gcloud-support-info.patch -Patch1003: gcp-configure-skip-bundled-lib-checks.patch -Patch1004: bz1691456-gcloud-dont-detect-python2.patch -Patch1005: aliyun-vpc-move-ip-4-bundled.patch -Patch1006: python3-syntax-fixes.patch -Patch1007: aliyuncli-python3-fixes.patch -Patch1008: bz1935422-python-pygments-fix-CVE-2021-20270.patch -Patch1009: bz1943464-python-pygments-fix-CVE-2021-27291.patch -Patch1010: RHEL-44923-aliyun-gcp-fix-bundled-urllib3-CVE-2024-37891.patch -Patch1011: RHEL-50360-setuptools-fix-CVE-2024-6345.patch - -Obsoletes: heartbeat-resources <= %{version} -Provides: heartbeat-resources = %{version} - -# Build dependencies -BuildRequires: automake autoconf gcc -BuildRequires: perl-interpreter python3-devel -BuildRequires: libxslt glib2-devel -BuildRequires: systemd -BuildRequires: which - -%ifarch x86_64 -BuildRequires: python3-pip -%endif - -%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} -#BuildRequires: cluster-glue-libs-devel -BuildRequires: docbook-style-xsl docbook-dtds -%if 0%{?rhel} == 0 -BuildRequires: libnet-devel -%endif -%endif - -## Runtime deps -# system tools shared by several agents -Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk -Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat -Requires: /usr/sbin/fuser /bin/mount - -# Filesystem / fs.sh / netfs.sh -Requires: /sbin/fsck -Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4 -Requires: /usr/sbin/fsck.xfs -Requires: /sbin/mount.nfs /sbin/mount.nfs4 -%if 0%{?fedora} < 33 || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version} -%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8) -Requires: /usr/sbin/mount.cifs -%else -Recommends: /usr/sbin/mount.cifs -%endif -%endif - -# IPaddr2 -Requires: /sbin/ip - -# LVM / lvm.sh -Requires: /usr/sbin/lvm - -# nfsserver / netfs.sh -Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd - -# ocf.py -Requires: python3 - -# rgmanager -%if %{with rgmanager} -# ip.sh -Requires: /usr/sbin/ethtool -Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 - -# nfsexport.sh -Requires: /sbin/findfs -Requires: /sbin/quotaon /sbin/quotacheck -%endif - -%description -A set of scripts to interface with several services to operate in a -High Availability environment for both Pacemaker and rgmanager -service managers. - -%ifarch x86_64 -%package aliyun -License: GPLv2+ and LGPLv2+ and ASL 2.0 and BSD and MIT -Summary: Alibaba Cloud (Aliyun) resource agents -%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} -Group: System Environment/Base -%else -Group: Productivity/Clustering/HA -%endif -Requires: %{name} = %{version}-%{release} -Requires: python3-jmespath >= 0.9.0 -Requires: python3-urllib3 -# python-colorama bundle -Provides: bundled(python-%{colorama}) = %{colorama_version} -# python-pycryptodome bundle -Provides: bundled(python-%{pycryptodome}) = %{pycryptodome_version} -# python-aliyun-sdk-core bundle -Provides: bundled(python-aliyun-sdk-core) = %{aliyunsdkcore_version} -# python-aliyun-sdk-ecs bundle -Provides: bundled(python-aliyun-sdk-ecs) = %{aliyunsdkecs_version} -# python-aliyun-sdk-vpc bundle -Provides: bundled(python-aliyun-sdk-vpc) = %{aliyunsdkvpc_version} -# aliyuncli bundle -Provides: bundled(aliyuncli) = %{aliyuncli_version} -# urllib3 bundle -Provides: bundled(python-urllib3) = %{urllib3_version} - -%description aliyun -Alibaba Cloud (Aliyun) resource agents allows Alibaba Cloud -(Aliyun) instances to be managed in a cluster environment. -%endif - -%ifarch x86_64 -%package gcp -License: GPLv2+ and LGPLv2+ and BSD and ASL 2.0 and MIT and Python -Summary: Google Cloud Platform resource agents -%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} -Group: System Environment/Base -%else -Group: Productivity/Clustering/HA -%endif -Requires: %{name} = %{version}-%{release} -Requires: python3-google-api-client -Requires: socat -# google-cloud-sdk bundle -Requires: python3-cryptography >= 1.7.2 -Requires: python3-dateutil >= 2.6.0 -Provides: bundled(%{googlecloudsdk}) = %{googlecloudsdk_version} -Provides: bundled(python-antlr3) = 3.1.1 -Provides: bundled(python-appdirs) = 1.4.0 -Provides: bundled(python-argparse) = 1.2.1 -Provides: bundled(python-chardet) = 2.3.0 -Provides: bundled(python-dulwich) = 0.10.2 -Provides: bundled(python-ipaddress) = 1.0.16 -Provides: bundled(python-ipaddr) = 2.1.11 -Provides: bundled(python-mako) = 1.0.7 -Provides: bundled(python-oauth2client) = 3.0.0 -Provides: bundled(python-prompt_toolkit) = 1.0.13 -Provides: bundled(python-pyasn1) = 0.4.2 -Provides: bundled(python-pyasn1_modules) = 0.2.1 -Provides: bundled(python-pygments) = 2.2.0 -Provides: bundled(python-pyparsing) = 2.1.10 -Provides: bundled(python-requests) = 2.10.0 -Provides: bundled(python-six) = 1.11.0 -Provides: bundled(python-uritemplate) = 3.0.0 -Provides: bundled(python-urllib3) = %{urllib3_version} -Provides: bundled(python-websocket) = 0.47.0 -Provides: bundled(python-yaml) = 3.12 -# python-pyroute2 bundle -Provides: bundled(%{pyroute2}) = %{pyroute2_version} -# python-httplib2 bundle -Provides: bundled(%{httplib2}) = %{httplib2_version} - -%description gcp -The Google Cloud Platform resource agents allows Google Cloud -Platform instances to be managed in a cluster environment. -%endif - -%package paf -License: PostgreSQL -Summary: PostgreSQL Automatic Failover (PAF) resource agent -%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} -Group: System Environment/Base -%else -Group: Productivity/Clustering/HA -%endif -Requires: %{name} = %{version}-%{release} -Requires: perl-interpreter - -%description paf -PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL -databases to be managed in a cluster environment. - -%prep -%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0 -%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} -exit 1 -%endif -%setup -q -n %{upstream_prefix}-%{upstream_version} -%patch -p1 -P 0 -%patch -p1 -P 1 -%patch -p1 -P 2 -%patch -p1 -P 3 -%patch -p1 -P 4 -%patch -p1 -P 5 -%patch -p1 -P 6 -%patch -p1 -P 7 -%patch -p1 -P 8 -%patch -p1 -P 9 -%patch -p1 -P 10 -%patch -p1 -P 11 -%patch -p1 -P 12 -%patch -p1 -P 13 -%patch -p1 -P 14 -%patch -p1 -P 15 -%patch -p1 -P 16 -%patch -p1 -P 17 -%patch -p1 -P 18 -%patch -p1 -P 19 -%patch -p1 -P 20 -%patch -p1 -P 21 -%patch -p1 -P 22 -%patch -p1 -P 23 -%patch -p1 -P 24 -%patch -p1 -P 25 -%patch -p1 -P 26 -%patch -p1 -P 27 -%patch -p1 -P 28 -%patch -p1 -P 29 -%patch -p1 -P 30 -%patch -p1 -P 31 -%patch -p1 -P 32 -%patch -p1 -P 33 -%patch -p1 -P 34 -%patch -p1 -P 35 -%patch -p1 -P 36 -%patch -p1 -P 37 -%patch -p1 -P 38 -%patch -p1 -P 39 -%patch -p1 -P 40 -%patch -p1 -P 41 -%patch -p1 -P 42 -%patch -p1 -P 43 -%patch -p1 -P 44 -%patch -p1 -P 45 -%patch -p1 -P 46 -%patch -p1 -P 47 -%patch -p1 -P 48 -%patch -p1 -P 49 -%patch -p1 -P 50 -%patch -p1 -P 51 -%patch -p1 -P 52 -%patch -p1 -P 53 -%patch -p1 -P 54 -%patch -p1 -P 55 -%patch -p1 -P 56 -%patch -p1 -P 57 -%patch -p1 -P 58 -%patch -p1 -P 59 -%patch -p1 -P 60 -%patch -p1 -P 61 -%patch -p1 -P 62 -%patch -p1 -P 63 -%patch -p1 -P 64 -%patch -p1 -P 65 -%patch -p1 -P 66 -%patch -p1 -P 67 -F1 -%patch -p1 -P 68 -%patch -p1 -P 69 -%patch -p1 -P 70 -%patch -p1 -P 71 -%patch -p1 -P 72 -%patch -p1 -P 73 -%patch -p1 -P 74 - -chmod 755 heartbeat/nova-compute-wait -chmod 755 heartbeat/NovaEvacuate -chmod 755 heartbeat/pgsqlms - -# bundles -mkdir -p %{bundled_lib_dir}/gcp -mkdir -p %{bundled_lib_dir}/aliyun - -# google-cloud-sdk bundle -%ifarch x86_64 -tar -xzf %SOURCE1 -C %{bundled_lib_dir}/gcp -# gcp*: append bundled-directory to search path, gcloud-ra -%patch -p1 -P 1000 -# replace python-rsa with python-cryptography -%patch -p1 -P 1001 -# gcloud support info -%patch -p1 -P 1002 -# configure: skip bundled gcp lib checks -%patch -p1 -P 1003 -F1 -# gcloud remove python 2 detection -%patch -p1 -P 1004 -# rename gcloud -mv %{googlecloudsdk_dir}/bin/gcloud %{googlecloudsdk_dir}/bin/gcloud-ra -# keep googleapiclient -mv %{googlecloudsdk_dir}/platform/bq/third_party/googleapiclient %{googlecloudsdk_dir}/lib/third_party -# only keep gcloud -rm -rf %{googlecloudsdk_dir}/bin/{anthoscli,bootstrapping,bq,dev_appserver.py,docker-credential-gcloud,endpointscfg.py,git-credential-gcloud.sh,gsutil,java_dev_appserver.sh} %{googlecloudsdk_dir}/{completion.*,deb,install.*,path.*,platform,properties,RELEASE_NOTES,rpm,VERSION} -# remove Python 2 code -rm -rf %{googlecloudsdk_dir}/lib/third_party/*/python2 -# remove python-rsa -rm -rf %{googlecloudsdk_dir}/lib/third_party/rsa -# remove grpc -rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc -# remove dateutil -rm -rf %{googlecloudsdk_dir}/lib/third_party/dateutil -# docs/licenses -cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README -cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt -cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/httplib2/LICENSE %{googlecloudsdk}_httplib2_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/pyu2f/LICENSE %{googlecloudsdk}_pyu2f_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/LICENSE %{googlecloudsdk}_ml_sdk_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/ml_sdk/pkg/LICENSE %{googlecloudsdk}_pkg_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/ipaddr/LICENSE %{googlecloudsdk}_ipaddr_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/urllib3/LICENSE %{googlecloudsdk}_urllib3_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/ipaddress/LICENSE %{googlecloudsdk}_ipaddress_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/requests/LICENSE %{googlecloudsdk}_requests_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/docker/LICENSE %{googlecloudsdk}_docker_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/monotonic/LICENSE %{googlecloudsdk}_monotonic_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/websocket/LICENSE %{googlecloudsdk}_websocket_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/fasteners/LICENSE %{googlecloudsdk}_fasteners_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/wcwidth/LICENSE %{googlecloudsdk}_wcwidth_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/pygments/LICENSE %{googlecloudsdk}_pygments_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/oauth2client/LICENSE %{googlecloudsdk}_oauth2client_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/uritemplate/LICENSE %{googlecloudsdk}_uritemplate_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/dulwich/LICENSE %{googlecloudsdk}_dulwich_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/mako/LICENSE %{googlecloudsdk}_mako_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/packaging/LICENSE %{googlecloudsdk}_packaging_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/socks/LICENSE %{googlecloudsdk}_socks_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/antlr3/LICENSE %{googlecloudsdk}_antlr3_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/argparse/LICENSE.txt %{googlecloudsdk}_argparse_LICENSE.txt -cp %{googlecloudsdk_dir}/lib/third_party/chardet/LICENSE %{googlecloudsdk}_chardet_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/ruamel/LICENSE %{googlecloudsdk}_ruamel_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/appdirs/LICENSE %{googlecloudsdk}_appdirs_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/argcomplete/LICENSE %{googlecloudsdk}_argcomplete_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/pyasn1_modules/LICENSE %{googlecloudsdk}_pyasn1_modules_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/setuptools/LICENSE %{googlecloudsdk}_setuptools_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/google/LICENSE %{googlecloudsdk}_google_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/google/protobuf/LICENSE %{googlecloudsdk}_protobuf_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/six/LICENSE %{googlecloudsdk}_six_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/dns/LICENSE %{googlecloudsdk}_dns_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/enum/LICENSE %{googlecloudsdk}_enum_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/gae_ext_runtime/LICENSE %{googlecloudsdk}_gae_ext_runtime_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/fancy_urllib/LICENSE %{googlecloudsdk}_fancy_urllib_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/pyasn1/LICENSE %{googlecloudsdk}_pyasn1_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/apitools/LICENSE %{googlecloudsdk}_apitools_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloudsdk}_containerregistry_LICENSE - -# python-pyroute2 bundle -tar -xzf %SOURCE2 -C %{bundled_lib_dir}/gcp -mv %{bundled_lib_dir}/gcp/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir} -cp %{pyroute2_dir}/README.md %{pyroute2}_README.md -cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md -cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2 -cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2 - -# python-colorama bundle -tar -xzf %SOURCE5 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{colorama}-%{colorama_version} %{colorama_dir} -cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt -cp %{colorama_dir}/README.rst %{colorama}_README.rst - -pushd %{colorama_dir} -# remove bundled egg-info -rm -rf *.egg-info -popd - -# python-pycryptodome bundle -tar -xzf %SOURCE6 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir} -cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst -cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst - -# python-aliyun-sdk-core bundle -tar -xzf %SOURCE7 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir} -cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst - -# python-aliyun-sdk-ecs bundle -tar -xzf %SOURCE8 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir} -cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst - -# python-aliyun-sdk-vpc bundle -tar -xzf %SOURCE9 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir} -cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst - -# aliyuncli bundle -tar -xzf %SOURCE10 -C %{bundled_lib_dir}/aliyun -mv %{bundled_lib_dir}/aliyun/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir} -cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst -cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE -# aliyun*: use bundled libraries -%patch -p1 -P 1005 - -# aliyun Python 3 fixes -%patch -p1 -P 1006 -%patch -p1 -P 1007 - -# fix CVE's in python-pygments -pushd %{googlecloudsdk_dir}/lib/third_party -%patch -p1 -P 1008 -F2 -%patch -p1 -P 1009 -F2 -popd -%endif - -%build -if [ ! -f configure ]; then - ./autogen.sh -fi - -%if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5 -CFLAGS="$(echo '%{optflags}')" -%global conf_opt_fatal "--enable-fatal-warnings=no" -%else -CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" -%global conf_opt_fatal "--enable-fatal-warnings=yes" -%endif - -%if %{with rgmanager} -%global rasset rgmanager -%endif -%if %{with linuxha} -%global rasset linux-ha -%endif -%if %{with rgmanager} && %{with linuxha} -%global rasset all -%endif - -export CFLAGS - -%configure BASH_SHELL="/bin/bash" \ - PYTHON="%{__python3}" \ - %{conf_opt_fatal} \ -%if %{defined _unitdir} - SYSTEMD_UNIT_DIR=%{_unitdir} \ -%endif -%if %{defined _tmpfilesdir} - SYSTEMD_TMPFILES_DIR=%{_tmpfilesdir} \ - --with-rsctmpdir=/run/resource-agents \ -%endif - --with-pkg-name=%{name} \ - --with-ras-set=%{rasset} - -%if %{defined jobs} -JFLAGS="$(echo '-j%{jobs}')" -%else -JFLAGS="$(echo '%{_smp_mflags}')" -%endif - -make $JFLAGS - -%ifarch x86_64 -# python-pyroute2 bundle -pushd %{pyroute2_dir} -%{__python3} setup.py build -popd - -# python-colorama bundle -pushd %{colorama_dir} -%{__python3} setup.py build -popd - -# python-pycryptodome bundle -pushd %{pycryptodome_dir} -%{__python3} setup.py build -popd - -# python-aliyun-sdk-core bundle -pushd %{aliyunsdkcore_dir} -%{__python3} setup.py build -popd - -# python-aliyun-sdk-ecs bundle -pushd %{aliyunsdkecs_dir} -%{__python3} setup.py build -popd - -# python-aliyun-sdk-vpc bundle -pushd %{aliyunsdkvpc_dir} -%{__python3} setup.py build -popd - -# aliyuncli bundle -pushd %{aliyuncli_dir} -%{__python3} setup.py build -popd -%endif - -%install -rm -rf %{buildroot} -make install DESTDIR=%{buildroot} - -# byte compile ocf.py -%py_byte_compile %{__python3} %{buildroot}%{_usr}/lib/ocf/lib/heartbeat - -# google-cloud-sdk bundle -%ifarch x86_64 -pushd %{googlecloudsdk_dir} -# fix urllib3 CVEs -rm -rf lib/third_party/urllib3 -%{__python3} -m pip install --target lib/third_party --no-index --find-links %{_sourcedir} urllib3 -mkdir -p %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} -cp -a bin data lib %{buildroot}/usr/lib/%{name}/%{googlecloudsdk_dir} -mkdir %{buildroot}/%{_bindir} -ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir} -popd - -# python-pyroute2 bundle -pushd %{pyroute2_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/gcp -popd - -# python-httplib2 bundle -%{__python3} -m pip install --user --no-index --find-links %{_sourcedir} pyparsing -%{__python3} -m pip install --target %{buildroot}/usr/lib/%{name}/%{bundled_lib_dir}/gcp --no-index --find-links %{_sourcedir} %{httplib2} - -# python-colorama bundle -pushd %{colorama_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -popd - -# python-pycryptodome bundle -pushd %{pycryptodome_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -popd - -# python-aliyun-sdk-core bundle -pushd %{aliyunsdkcore_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -# fix urllib3 CVEs -rm -rf %{buildroot}/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3 -%{__python3} -m pip install --target %{buildroot}/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyunsdkcore/vendored/requests/packages --no-index --find-links %{_sourcedir} urllib3 -popd - -# python-aliyun-sdk-ecs bundle -pushd %{aliyunsdkecs_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -popd - -# python-aliyun-sdk-vpc bundle -pushd %{aliyunsdkvpc_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -popd - -# aliyuncli bundle -pushd %{aliyuncli_dir} -%{__python3} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir}/aliyun -sed -i -e "/^import sys/asys.path.insert(0, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun')\nsys.path.insert(1, '/usr/lib/%{name}/%{bundled_lib_dir}/aliyun/aliyuncli')" %{buildroot}/%{_bindir}/aliyuncli -mv %{buildroot}/%{_bindir}/aliyuncli %{buildroot}/%{_bindir}/aliyuncli-ra -# aliyun_completer / aliyun_zsh_complete.sh -rm %{buildroot}/%{_bindir}/aliyun_* -popd - -# regular patch doesnt work in build-section -pushd %{buildroot}/usr/lib/%{name}/%{bundled_lib_dir} -/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=2 < %{PATCH1010} -popd -pushd %{buildroot}/usr/lib/%{name}/%{bundled_lib_dir}/gcp/google-cloud-sdk/lib/third_party -/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1011} -popd -%endif - -## tree fixup -# remove docs (there is only one and they should come from doc sections in files) -rm -rf %{buildroot}/usr/share/doc/resource-agents - -## -# Create symbolic link between IPAddr and IPAddr2 -## -rm -f %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr -ln -s /usr/lib/ocf/resource.d/heartbeat/IPaddr2 %{buildroot}/usr/lib/ocf/resource.d/heartbeat/IPaddr - -%clean -rm -rf %{buildroot} - -%files -%defattr(-,root,root) -%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog -%if %{with linuxha} -%doc heartbeat/README.galera -%doc doc/README.webapps -%doc %{_datadir}/%{name}/ra-api-1.dtd -%doc %{_datadir}/%{name}/metadata.rng -%endif - -%if %{with rgmanager} -%{_datadir}/cluster -%{_sbindir}/rhev-check.sh -%endif - -%if %{with linuxha} -%dir %{_usr}/lib/ocf -%dir %{_usr}/lib/ocf/resource.d -%dir %{_usr}/lib/ocf/lib - -%{_usr}/lib/ocf/lib/heartbeat - -%{_usr}/lib/ocf/resource.d/heartbeat -%{_usr}/lib/ocf/resource.d/openstack -%if %{with rgmanager} -%{_usr}/lib/ocf/resource.d/redhat -%endif - -%{_datadir}/pkgconfig/%{name}.pc - -%if %{defined _unitdir} -%{_unitdir}/resource-agents-deps.target -%endif -%if %{defined _tmpfilesdir} -%{_tmpfilesdir}/%{name}.conf -%endif - -%dir %{_datadir}/%{name} -%dir %{_datadir}/%{name}/ocft -%{_datadir}/%{name}/ocft/configs -%{_datadir}/%{name}/ocft/caselib -%{_datadir}/%{name}/ocft/README -%{_datadir}/%{name}/ocft/README.zh_CN -%{_datadir}/%{name}/ocft/helpers.sh -%exclude %{_datadir}/%{name}/ocft/runocft -%exclude %{_datadir}/%{name}/ocft/runocft.prereq - -%{_sbindir}/ocf-tester -%{_sbindir}/ocft - -%{_includedir}/heartbeat - -%if %{defined _tmpfilesdir} -%dir %attr (1755, root, root) /run/resource-agents -%else -%dir %attr (1755, root, root) %{_var}/run/resource-agents -%endif - -%{_mandir}/man7/*.7* -%{_mandir}/man8/ocf-tester.8* - -### -# Supported, but in another sub package -### -%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* -%exclude %{_mandir}/man7/*aliyun-vpc-move-ip* -%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* -%exclude %{_mandir}/man7/*gcp* -%exclude /usr/lib/%{name}/%{bundled_lib_dir} -%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms -%exclude %{_mandir}/man7/*pgsqlms* -%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm - -### -# Moved to separate packages -### -%exclude /usr/lib/ocf/resource.d/heartbeat/SAP* -%exclude /usr/lib/ocf/lib/heartbeat/sap* -%exclude %{_mandir}/man7/*SAP* - -### -# Unsupported -### -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1 -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6 -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker-compose -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dovecot -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dummypy -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88 -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mdraid -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/nvmet-* -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/smb-share -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd -%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver -%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_docker-compose.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_dovecot.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_dummypy.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_mdraid.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_nvmet-*.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_smb-share.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz -%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz - -### -# Other excluded files. -### -# ldirectord is not supported -%exclude /etc/ha.d/resource.d/ldirectord -%exclude /etc/init.d/ldirectord -%exclude %{_unitdir}/ldirectord.service -%exclude /etc/logrotate.d/ldirectord -%exclude /usr/sbin/ldirectord -%exclude %{_mandir}/man8/ldirectord.8.gz - -# For compatability with pre-existing agents -%dir %{_sysconfdir}/ha.d -%{_sysconfdir}/ha.d/shellfuncs - -%{_libexecdir}/heartbeat -%endif - -%if %{with rgmanager} -%post -n resource-agents -ccs_update_schema > /dev/null 2>&1 ||: -%endif - -%ifarch x86_64 -%files aliyun -%doc aliyun*_README* %{colorama}_README.rst %{pycryptodome}_README.rst -%license %{aliyuncli}_LICENSE %{colorama}_LICENSE.txt %{pycryptodome}_LICENSE.rst -%defattr(-,root,root) -/usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* -%{_mandir}/man7/*aliyun-vpc-move-ip* -# bundle -%{_bindir}/aliyuncli-ra -%dir /usr/lib/%{name} -/usr/lib/%{name}/%{bundled_lib_dir}/aliyun -%endif - -%ifarch x86_64 -%files gcp -%doc %{googlecloudsdk}_*README* -%license %{googlecloudsdk}_*LICENSE* -%doc %{pyroute2}_README* -%license %{pyroute2}_LICENSE* -%defattr(-,root,root) -/usr/lib/ocf/resource.d/heartbeat/gcp-ilb -%{_mandir}/man7/*gcp-ilb* -/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-vip* -%{_mandir}/man7/*gcp-vpc-move-vip* -/usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-route* -%{_mandir}/man7/*gcp-vpc-move-route* -/usr/lib/ocf/resource.d/heartbeat/gcp-pd-move* -%{_mandir}/man7/*gcp-pd-move* -# bundle -%{_bindir}/gcloud-ra -%dir /usr/lib/%{name} -/usr/lib/%{name}/%{bundled_lib_dir}/gcp -%endif - -%files paf -%doc paf_README.md -%license paf_LICENSE -%defattr(-,root,root) -%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms -%{_mandir}/man7/*pgsqlms* -%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm - -%changelog -* Thu Feb 20 2025 Oyvind Albrigtsen - 4.9.0-54.10 -- portblock: fix iptables version detection - - Resolves: RHEL-79823 - -* Fri Jan 10 2025 Oyvind Albrigtsen - 4.9.0-54.8 -- openstack-cinder-volume: wait for volume to be available - - Resolves: RHEL-72956 - -* Wed Nov 27 2024 Oyvind Albrigtsen - 4.9.0-54.6 -- Filesystem: dont kill unrelated processes during stop-action - - Resolves: RHEL-69297 - -* Tue Oct 1 2024 Oyvind Albrigtsen - 4.9.0-54.5 -- nfsserver: also stop rpc-statd for nfsv4_only to avoid stop failing - in some cases - - Resolves: RHEL-61138 - -* Thu Jul 25 2024 Oyvind Albrigtsen - 4.9.0-54.4 -- bundled setuptools: fix CVE-2024-6345 - - Resolves: RHEL-50360 - -* Tue Jul 23 2024 Oyvind Albrigtsen - 4.9.0-54.3 -- gcp-pd-move: fix TLS_VERSION_1 issue - - Resolves: RHEL-50041 - -* Wed Jun 26 2024 Oyvind Albrigtsen - 4.9.0-54.2 -- bundled urllib3: fix CVE-2024-37891 - - Resolves: RHEL-44923 - -* Thu May 30 2024 Oyvind Albrigtsen - 4.9.0-54.1 -- AWS agents: retry failed metadata requests to avoid instantly - failing when there is a hiccup in the network or metadata service -- db2: fix OCF_SUCESS typo - - Resolves: RHEL-34137, RHEL-32828 - -* Thu Feb 8 2024 Oyvind Albrigtsen - 4.9.0-54 -- findif.sh: fix loopback IP handling - - Resolves: RHEL-15305 - -* Wed Jan 24 2024 Oyvind Albrigtsen - 4.9.0-53 -- bundled urllib3: fix CVE-2023-45803 -- bundled pycryptodome: fix CVE-2023-52323 - - Resolves: RHEL-22431, RHEL-20916 - -* Tue Nov 21 2023 Oyvind Albrigtsen - 4.9.0-52 -- findif: also check that netmaskbits != EOS - - Resolves: RHEL-17083 - -* Fri Nov 17 2023 Oyvind Albrigtsen - 4.9.0-51 -- aws-vpc-move-ip/aws-vpc-route53/awseip/awsvip: add auth_type parameter - and AWS Policy based authentication type - - Resolves: RHEL-16248 - -* Thu Nov 2 2023 Oyvind Albrigtsen - 4.9.0-49 -- exportfs: make "fsid" parameter optional - - Resolves: RHEL-15302 - -* Wed Sep 6 2023 Oyvind Albrigtsen - 4.9.0-48 -- mysql-common: improve error message - - Resolves: rhbz#1904465 - -* Thu Jul 20 2023 Oyvind Albrigtsen - 4.9.0-47 -- Filesystem: improve stop-action and allow setting term/kill signals - and signal_delay for large filesystems - - Resolves: rhbz#2189243 - -* Wed Jun 21 2023 Oyvind Albrigtsen - 4.9.0-44 -- IPaddr2/IPsrcaddr: support policy-based routing - - Resolves: rhbz#2040110 - -* Wed Jun 14 2023 Oyvind Albrigtsen - 4.9.0-43 -- mysql: fix replication issues - - Resolves: rhbz#2039692 - -* Mon May 1 2023 Oyvind Albrigtsen - 4.9.0-42 -- azure-events*: fix for no "Transition Summary" for Pacemaker 2.1+ -- Filesystem: fail if AWS efs-utils not installed when fstype=efs - - Resolves: rhbz#2181019 - Resolves: rhbz#2183152 - -* Wed Mar 22 2023 Oyvind Albrigtsen - 4.9.0-41 -- lvmlockd: add "use_lvmlockd = 1" if it's commented out or missing -- ethmonitor: dont log "Interface does not exist" for monitor-action - - Resolves: rhbz#2149970 - Resolves: rhbz#2154727 - -* Tue Jan 17 2023 Oyvind Albrigtsen - 4.9.0-40 -- all agents: dont check notify/promotable settings during - validate-action - - Resolves: rhbz#2157873 - -* Thu Nov 24 2022 Oyvind Albrigtsen - 4.9.0-35 -- mysql-common: return error in stop-action if kill fails to stop - the process, so the node can get fenced - - Resolves: rhbz#2139131 - -* Tue Nov 22 2022 Oyvind Albrigtsen - 4.9.0-34 -- nfsserver: add nfsv4_only parameter to make it run without - rpc-statd/rpcbind services - - Resolves: rhbz#2127117 - -* Mon Nov 14 2022 Oyvind Albrigtsen - 4.9.0-33 -- Filesystem: add support for Amazon EFS (Elastic File System) -- vdo-vol: dont fail probe action when the underlying device doesnt - exist - - Resolves: rhbz#2049319 - Resolves: rhbz#2141836 - -* Fri Oct 14 2022 Oyvind Albrigtsen - 4.9.0-31 -- IPsrcaddr: proto, metric, scope and default route fixes - - Resolves: rhbz#2133682 - -* Thu Sep 8 2022 Oyvind Albrigtsen - 4.9.0-30 -- storage_mon: fix specified scores count and possible false negatives -- LVM-activate: use correct return codes to fix unexpected behaviour -- azure-events-az: new resource agent - - Resolves: rhbz#2109159 - Resolves: rhbz#1905820 - Resolves: rhbz#1977012 - -* Wed Aug 10 2022 Oyvind Albrigtsen - 4.9.0-29 -- ethmonitor/pgsql: remove attrd_updater "-q" parameter to solve issue - with Pacemaker 2.1.3+ not ignoring it - - Resolves: rhbz#2116941 - -* Thu Aug 4 2022 Oyvind Albrigtsen - 4.9.0-28 -- CTDB: move process to root cgroup if realtime scheduling is enabled - - Resolves: rhbz#2090370 - -* Thu Jul 14 2022 Oyvind Albrigtsen - 4.9.0-27 -- ocf-tester: add testing tool - - Resolves: rhbz#2103370 - -* Thu Jul 14 2022 Oyvind Albrigtsen - 4.9.0-26 -- openstack-cinder-volume/openstack-floating-ip/openstack-info/ - openstack-virtual-ip: new resource agents - - Resolves: rhbz#1908146, rhbz#1908147, rhbz#1908148, rhbz#1949114 - -* Thu Jun 16 2022 Oyvind Albrigtsen - 4.9.0-22 -- gcp-vpc-move-route/gcp-vpc-move-vip: upgrade bundled - python-httplib2 to fix SSL issue - - Resolves: rhbz#2097462 - -* Thu Jun 9 2022 Oyvind Albrigtsen - 4.9.0-21 -- aws-vpc-move-ip: add interface label support - - Resolves: rhbz#2093214 - -* Wed Jun 8 2022 Oyvind Albrigtsen - 4.9.0-20 -- lvmlockd: fail when use_lvmlockd has not been set - - Resolves: rhbz#2086889 - -* Thu Apr 21 2022 Oyvind Albrigtsen - 4.9.0-19 -- Filesystem: fix UUID/label device support when there's whitespace - between parameter and UUID/label - - Resolves: rhbz#2049414 - -* Thu Apr 7 2022 Oyvind Albrigtsen - 4.9.0-18 -- LVM-activate: use correct return code to fence failed node - - Resolves: rhbz#2072043 - -* Thu Mar 3 2022 Oyvind Albrigtsen - 4.9.0-16 -- IPsrcaddr: add warning about possible issues when used with DHCP, - and add error message when matching route not found - - Resolves: rhbz#2064342 - -* Thu Feb 24 2022 Oyvind Albrigtsen - 4.9.0-15 -- db2: use -l forever to fix crm_attribute issue - - Resolves: rhbz#2029706 - -* Thu Jan 20 2022 Oyvind Albrigtsen - 4.9.0-13 -- mysql: add support for local SSL connection - - Resolves: rhbz#1992661 - -* Tue Dec 7 2021 Oyvind Albrigtsen - 4.9.0-12 -- Route: return OCF_NOT_RUNNING for probe action when interface - or route doesnt exist - - Resolves: rhbz#2012057 - -* Tue Nov 30 2021 Oyvind Albrigtsen - 4.9.0-11 -- nfsnotify: fix default value for "notify_args" - - Resolves: rhbz#2027591 - -* Tue Nov 9 2021 Oyvind Albrigtsen - 4.9.0-10 -- gcp-ilb: new resource agent - - Resolves: rhbz#2015789 - -* Thu Oct 28 2021 Oyvind Albrigtsen - 4.9.0-6 -- Route: return NOT_RUNNING if interface doesnt exist - - Resolves: rhbz#2002764 - -* Tue Oct 19 2021 Oyvind Albrigtsen - 4.9.0-5 -- All agents: set correct agent and OCF version in metadata -- nfsserver: add nfs_server_scope parameter - - Resolves: rhbz#2003117 - Resolves: rhbz#2014415 - -* Thu Oct 14 2021 Oyvind Albrigtsen - 4.9.0-4 -- gcp-vpc-move-route/gcp-vpc-move-vip: add serviceaccount JSON file - support - - Resolves: rhbz#1704348 - -* Thu Sep 30 2021 Oyvind Albrigtsen - 4.9.0-3 -- Rebase to resource-agents 4.9.0 upstream release. -- gcp-pd-move/gcp-vpc-move-route: dont fail with configuration rc - when it might be a network hickup - - Resolves: rhbz#1995178, rhbz#2008333 - -* Mon Aug 30 2021 Oyvind Albrigtsen - 4.1.1-98 -- storage-mon: new resource agent - - Resolves: rhbz#1509319 - -* Thu Jun 17 2021 Oyvind Albrigtsen - 4.1.1-97 -- podman: fix possible race during container creation - - Resolves: rhbz#1972743 - -* Tue Jun 15 2021 Oyvind Albrigtsen - 4.1.1-96 -- LVM-activate: fix drop-in check to avoid re-creating drop-in - - Resolves: rhbz#1972035 - -* Fri Jun 11 2021 Oyvind Albrigtsen - 4.1.1-95 -- lvmlockd: remove cmirrord support, as cmirrord is incompatible w/lvmlockd - - Resolves: rhbz#1969968 - -* Wed May 12 2021 Oyvind Albrigtsen - 4.1.1-94 -- gcp-vpc-move-vip: add retry logic - - Resolves: rhbz#1957765 - -* Wed Apr 28 2021 Oyvind Albrigtsen - 4.1.1-93 -- db2: add PRIMARY/REMOTE_CATCHUP_PENDING/CONNECTED status to promote-check -- pgsqlms: new resource agent -- python-pygments: fix CVE-2021-27291 and CVE-2021-20270 - - Resolves: rhbz#1872754, rhbz#1934651, rhbz#1935422, rhbz#1943464 - -* Thu Apr 8 2021 Oyvind Albrigtsen - 4.1.1-91 -- ethmonitor: fix vlan regex -- iface-vlan: make vlan parameter not unique -- nfsserver: error-check unmount -- VirtualDomain: fix pid status regex -- podman: return NOT_RUNNING when monitor cmd fails -- awsvip: dont partially match similar IPs during -- aws agents: dont spam log files -- aws-vpc-move-ip: add ENI lookup - - Resolves: rhbz#1891883, rhbz#1902045, rhbz#1924363, rhbz#1932863 - Resolves: rhbz#1920698, rhbz#1939992, rhbz#1940094, rhbz#1939281 - -* Mon Mar 22 2021 Oyvind Albrigtsen - 4.1.1-90 -- galera/rabbitmq-cluster/redis: run crm_mon without validation when - running in bundle (1940363) - -* Thu Mar 11 2021 Oyvind Albrigtsen - 4.1.1-89 -- azure-lb: redirect to avoid nc dying with EPIPE error (1937142) - -* Thu Feb 25 2021 Oyvind Albrigtsen - 4.1.1-87 -- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and - make vpc_network parameter optional - - Resolves: rhbz#1913932 - -* Thu Dec 3 2020 Oyvind Albrigtsen - 4.1.1-81 -- ocf-shellfuncs: fix traceback redirection for Bash 5+ - - Resolves: rhbz#1903677 - -* Tue Dec 1 2020 Oyvind Albrigtsen - 4.1.1-80 -- crypt: support symlink devices, and dont run sanity checks for probes - - Resolves: rhbz#1901357 - -* Mon Nov 30 2020 Oyvind Albrigtsen - 4.1.1-79 -- LVM-activate: add drop-in during start-action to avoid getting - fenced during reboot - - Resolves: rhbz#1902208 - -* Wed Nov 25 2020 Oyvind Albrigtsen - 4.1.1-77 -- NovaEvacuate: set delay_evacuate to 0 when it's not set - - Resolves: rhbz#1899551 - -* Tue Nov 24 2020 Oyvind Albrigtsen - 4.1.1-76 -- podman: recover from killed conmon process -- podman: recover from podman's storage being out of sync -- crypt: make key_file and crypt_type parameters not unique - - Resolves: rhbz#1886262 - Resolves: rhbz#1900015 - Resolves: rhbz#1898690 - -* Fri Nov 13 2020 Oyvind Albrigtsen - 4.1.1-75 -- AWS agents: add support for IMDSv2 - - Resolves: rhbz#1897570 - -* Wed Nov 11 2020 Oyvind Albrigtsen - 4.1.1-74 -- aws-vpc-move-ip: don't warn for expected scenarios - - Resolves: rhbz#1895811 - -* Mon Nov 2 2020 Oyvind Albrigtsen - 4.1.1-73 -- crypt: new resource agent - - Resolves: rhbz#1471182 - -* Wed Oct 28 2020 Oyvind Albrigtsen - 4.1.1-72 -- sybaseASE: Run verify_all() for start operation only -- sybaseASE: add logfile parameter -- galera: set bootstrap attribute before promote -- galera: recover after network split in a 2-node cluster - - Resolves: rhbz#1848025 - Resolves: rhbz#1861001 - Resolves: rhbz#1891835 - Resolves: rhbz#1891855 - -* Tue Oct 27 2020 Oyvind Albrigtsen - 4.1.1-71 -- redis: parse password correctly based on version -- all agents: fix pcs syntax in manpage for pcs 0.10+ -- gcp-pd-move: dont stop partially matched "disk_name" - - Resolves: rhbz#1815013 - Resolves: rhbz#1763249 - Resolves: rhbz#1890068 - -* Wed Oct 7 2020 Oyvind Albrigtsen - 4.1.1-70 -- galera: recover from joining a non existing cluster - - Resolves: rhbz#1881114 - -* Wed Sep 23 2020 Oyvind Albrigtsen - 4.1.1-69 -- pgsql: ignore masters re-promote -- pgsql: add PostgreSQL 12 support -- Make Samba/CIFS dependency weak -- Filesystem: Support whitespace in device or directory name -- aws-vpc-move-ip: add region parameter - - Resolves: rhbz#1640587 - Resolves: rhbz#1795535 - Resolves: rhbz#1828600 - Resolves: rhbz#1858752 - Resolves: rhbz#1872999 - -* Thu Aug 20 2020 Oyvind Albrigtsen - 4.1.1-68 -- azure-lb: fix redirect issue - - Resolves: rhbz#1850778 - -* Wed Aug 19 2020 Oyvind Albrigtsen - 4.1.1-67 -- gcp-vpc-move-vip: add support for multiple alias IPs - - Resolves: rhbz#1846733 - -* Thu Jul 30 2020 Oyvind Albrigtsen - 4.1.1-65 -- azure-events: handle exceptions in urlopen - - Resolves: rhbz#1845574 - -* Mon Jul 27 2020 Oyvind Albrigtsen - 4.1.1-64 -- nfsserver: fix NFSv4-only support -- azure-events: new resource agent for Azure - - Resolves: rhbz#1818997 - Resolves: rhbz#1819965 - -* Thu Jun 25 2020 Oyvind Albrigtsen - 4.1.1-60 -- Upgrade bundled python-httplib2 to fix CVE-2020-11078 - - Resolves: rhbz#1850990 - -* Wed Jun 17 2020 Oyvind Albrigtsen - 4.1.1-59 -- pgsql: support Pacemaker v2.03+ output - - Resolves: rhbz#1836186 - -* Thu Jun 11 2020 Oyvind Albrigtsen - 4.1.1-56 -- Filesystem: set "fast_stop" default to "no" for GFS2 filesystems - - Resolves: rhbz#1814896 - -* Wed Jun 10 2020 Oyvind Albrigtsen - 4.1.1-55 -- nfsserver: dont log error message when /etc/sysconfig/nfs does not exist -- exportfs: describe clientspec format in metadata - - Resolves: rhbz#1845581 - Resolves: rhbz#1845583 - -* Tue Jun 9 2020 Oyvind Albrigtsen - 4.1.1-54 -- exportfs: add symlink support -- aliyun-vpc-move-ip: log output when failing - - Resolves: rhbz#1820523 - Resolves: rhbz#1843999 - -* Tue Jun 2 2020 Oyvind Albrigtsen - 4.1.1-53 -- podman: force remove container if remove fails - - Resolves: rhbz#1839721 - -* Thu May 28 2020 Oyvind Albrigtsen - 4.1.1-52 -- gcp-pd-move: new resource agent for Google Cloud - - Resolves: rhbz#1633251 - -* Wed May 27 2020 Oyvind Albrigtsen - 4.1.1-51 -- NovaEvacuate: suppress expected initial error message -- db2 (HADR): promote standby node when master node disappears - - Resolves: rhbz#1830716 - Resolves: rhbz#1836945 - -* Thu May 7 2020 Oyvind Albrigtsen - 4.1.1-50 -- rabbitmq-cluster: increase rabbitmqctl wait timeout during start - - Resolves: rhbz#1832321 - -* Tue Apr 28 2020 Oyvind Albrigtsen - 4.1.1-49 -- aws-vpc-route53: new resource agent for AWS -- pgsql: improve checks to prevent incorrect status, and set initial - score for primary and hot standby - - Resolves: rhbz#1759115 - Resolves: rhbz#1744190 - -* Mon Apr 6 2020 Oyvind Albrigtsen - 4.1.1-47 -- aws-vpc-move-ip: delete remaining route entries - - Resolves: rhbz#1819021 - -* Fri Mar 27 2020 Oyvind Albrigtsen - 4.1.1-46 -- use safe temp file location -- ocf-shellfuncs: ocf_is_clone(): fix to return true when clone-max - is set to 0 - - Resolves: rhbz#1817432 - Resolves: rhbz#1817598 - -* Wed Mar 18 2020 Oyvind Albrigtsen - 4.1.1-45 -- azure-lb: support using socat instead of nc -- aws-vpc-move-ip: add "routing_table_role" parameter -- redis: fix validate-all action and run it during start - - Resolves: rhbz#1804658 - Resolves: rhbz#1810466 - Resolves: rhbz#1792237 - -* Fri Mar 6 2020 Oyvind Albrigtsen - 4.1.1-44 -- lvmlockd: automatically remove locking_type from lvm.conf for LVM - v2.03+ - - Resolves: rhbz#1808468 - -* Tue Jan 28 2020 Oyvind Albrigtsen - 4.1.1-43 -- rabbitmq-cluster: delete nodename when stop fails - - Resolves: rhbz#1792196 - -* Thu Jan 23 2020 Oyvind Albrigtsen - 4.1.1-42 -- IPsrcaddr: add destination and table parameters - - Resolves: rhbz#1744224 - -* Mon Jan 13 2020 Oyvind Albrigtsen - 4.1.1-40 -- podman: improve image exist check -- IPaddr2: add CLUSTERIP not supported info to metadata/manpage -- Filesystem: refresh UUID if block device doesnt exist - - Resolves: rhbz#1788889 - Resolves: rhbz#1767916 - Resolves: rhbz#1777381 - -* Wed Nov 27 2019 Oyvind Albrigtsen - 4.1.1-38 -- IPaddr2: add noprefixroute parameter - - Resolves: rhbz#1741042 - -* Wed Nov 13 2019 Oyvind Albrigtsen - 4.1.1-36 -- exportfs: allow multiple exports with same fsid -- mysql/galera: fix incorrect rc - - Resolves: rhbz#1764888 - Resolves: rhbz#1765128 - -* Mon Oct 14 2019 Oyvind Albrigtsen - 4.1.1-35 -- Route: dont fence when parameters not set -- LVM-activate: add partial-activation support - - Resolves: rhbz#1750261 - Resolves: rhbz#1741843 - -* Wed Oct 2 2019 Oyvind Albrigtsen - 4.1.1-34 -- LVM/clvm: remove manpages for excluded agents -- LVM-activate: return NOT_RUNNING when node rejoins cluster -- LVM-activate: detect systemid volume without reboot -- Filesystem: add symlink support -- Filesystem: avoid corrupt mount-list and dont kill incorrect processes - for bind-mounts -- IPsrcaddr: make proto optional to fix regression when used without - NetworkManager -- docker: fix stop issues -- rabbitmq-cluster: also restore users in single node mode -- IPaddr2: sanitize compressed IPv6 IPs -- nfsserver: systemd performance improvements -- NovaEvacuate: add "evacuate_delay" parameter - - Resolves: rhbz#1694392 - Resolves: rhbz#1695039 - Resolves: rhbz#1738428 - Resolves: rhbz#1744103 - Resolves: rhbz#1744140 - Resolves: rhbz#1757837 - Resolves: rhbz#1748768 - Resolves: rhbz#1750352 - Resolves: rhbz#1751700 - Resolves: rhbz#1751962 - Resolves: rhbz#1755760 - -* Tue Aug 27 2019 Oyvind Albrigtsen - 4.1.1-33 -- rabbitmq-cluster: fail monitor when node is in minority partition, - fix stop regression, retry start when cluster join fails, ensure - node attributes are removed - - Resolves: rhbz#1745713 - -* Mon Aug 12 2019 Oyvind Albrigtsen - 4.1.1-32 -- mysql/galera: use runuser/su to avoid using DAC_OVERRIDE - - Resolves: rhbz#1692960 - -* Wed Aug 7 2019 Oyvind Albrigtsen - 4.1.1-31 -- podman: add drop-in dependency support - - Resolves: rhbz#1736746 - -* Wed Jul 31 2019 Oyvind Albrigtsen - 4.1.1-30 -- iSCSITarget/iSCSILogicalUnit: only create iqn/acls when it doesnt - exist - - Resolves: rhbz#1692413 - -* Tue Jul 30 2019 Oyvind Albrigtsen - 4.1.1-29 -- CTDB: add support for v4.9+ - - Resolves: rhbz#1732867 - -* Tue Jul 23 2019 Oyvind Albrigtsen - 4.1.1-28 -- podman: fixes to avoid bundle resources restarting when probing - takes too long -- LVM-activate: fix monitor to avoid hang caused by validate-all call - - Resolves: rhbz#1718219 - Resolves: rhbz#1730455 - -* Wed Jun 19 2019 Oyvind Albrigtsen - 4.1.1-27 -- ocf_log: do not log debug messages when HA_debug unset -- Filesystem: remove notify-action from metadata -- dhcpd keep SELinux context in chroot - - Resolves: rhbz#1707969 - Resolves: rhbz#1717759 - Resolves: rhbz#1719684 - -* Tue Jun 11 2019 Oyvind Albrigtsen - 4.1.1-26 -- sap/sap-hana: split subpackages into separate packages - - Resolves: rhbz#1705767 - -* Wed May 29 2019 Oyvind Albrigtsen - 4.1.1-24 -- Squid: fix PID file issue - - Resolves: rhbz#1689184 - -* Tue May 28 2019 Oyvind Albrigtsen - 4.1.1-23 -- Route: make family parameter optional -- redis: mute password warning - - Resolves: rhbz#1669140 - Resolves: rhbz#1683548 - -* Thu May 23 2019 Oyvind Albrigtsen - 4.1.1-22 -- aws-vpc-move-ip: add multi route-table support and fix issue - w/multiple NICs - - Resolves: rhbz#1697559 - -* Fri Apr 5 2019 Oyvind Albrigtsen - 4.1.1-21 -- gcp-vpc-move-route/gcp-vpc-move-vip: fix Python 3 encoding issue - - Resolves: rhbz#1695656 - -* Mon Apr 1 2019 Oyvind Albrigtsen - 4.1.1-20 -- aws-vpc-move-ip: use "--query" to avoid a possible race condition -- gcloud-ra: fix Python 3 issue and remove Python 2 detection - - Resolves: rhbz#1693662 - Resolves: rhbz#1691456 - -* Thu Mar 21 2019 Oyvind Albrigtsen - 4.1.1-19 -- Add CI gating tests -- LVM-activate: support LVs from same VG -- tomcat: use systemd when catalina.sh is unavailable -- Fixed python-devel/perl build dependencies - - Resolves: rhbz#1682136 - Resolves: rhbz#1667414 - Resolves: rhbz#1666691 - Resolves: rhbz#1595854 - -* Thu Mar 7 2019 Oyvind Albrigtsen - 4.1.1-18 -- aliyun-vpc-move-ip: exclude from main package -- aliyuncli-ra: upgrade bundled python-aliyun-sdk-core and fix Python 3 issues -- ocf.py: byte compile - - Resolves: rhbz#1677204 - Resolves: rhbz#1677981 - Resolves: rhbz#1678874 - -* Tue Feb 5 2019 Oyvind Albrigtsen - 4.1.1-17 -- LVM-activate: dont require locking_type - - Resolves: rhbz#1658664 - -* Fri Jan 11 2019 Oyvind Albrigtsen - 4.1.1-16 -- vdo-vol: fix monitor-action -- LVM-activate: dont fail initial probe - - Resolves: rhbz#1662466 - Resolves: rhbz#1643307 - -* Tue Oct 23 2018 Oyvind Albrigtsen - 4.1.1-15 -- nfsserver: fix start-issues when nfs_shared_infodir parameter is - changed - - Resolves: rhbz#1642027 - -* Mon Oct 8 2018 Oyvind Albrigtsen - 4.1.1-14 -- redis: use basename in pidof to avoid issues in containers - - Resolves: rhbz#1635785 - -* Wed Sep 26 2018 Oyvind Albrigtsen - 4.1.1-11 -- Remove grpc from bundle - - Resolves: rhbz#1630627 - -* Fri Sep 21 2018 Oyvind Albrigtsen - 4.1.1-10 -- systemd-tmpfiles: change path to /run/resource-agents - - Resolves: rhbz#1631291 - -* Fri Aug 24 2018 Oyvind Albrigtsen - 4.1.1-9 -- podman: new resource agent - - Resolves: rhbz#1607607 - -* Wed Aug 22 2018 Oyvind Albrigtsen - 4.1.1-8 -- LVM: fix missing dash in activate_options -- LVM-activate: warn about incorrect vg_access_mode -- lvmlockd: add cmirrord support - -* Wed Aug 1 2018 Oyvind Albrigtsen - 4.1.1-7 -- findif: only match lines containing netmasks - -* Mon Jul 30 2018 Florian Weimer - 4.1.1-6 -- Rebuild with fixed binutils - -* Fri Jul 27 2018 Oyvind Albrigtsen - 4.1.1-5 -- vdo-vol: new resource agent - - Resolves: rhbz#1552330 - -* Wed Jul 4 2018 Oyvind Albrigtsen - 4.1.1-4 -- VirtualDomain: add stateless support -- Exclude unsupported agents - -* Thu Jun 28 2018 Oyvind Albrigtsen - 4.1.1-3 -- Added SAPHana and OpenStack agents - -* Fri May 25 2018 Oyvind Albrigtsen - 4.1.1-2 -- Remove unsupported clvm and LVM agents - -* Tue Mar 13 2018 Oyvind Albrigtsen - 4.1.1-1 -- Rebase to resource-agents 4.1.1 upstream release. - -* Mon Feb 19 2018 Oyvind Albrigtsen - 4.1.0-2 -- Add gcc to BuildRequires - -* Fri Feb 09 2018 Igor Gnatenko - 4.1.0-1.1 -- Escape macros in %%changelog - -* Wed Jan 10 2018 Oyvind Albrigtsen - 4.1.0-1 -- Rebase to resource-agents 4.1.0 upstream release. - -* Thu Aug 03 2017 Fedora Release Engineering - 4.0.1-1.3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 4.0.1-1.2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Sat Feb 11 2017 Fedora Release Engineering - 4.0.1-1.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Thu Feb 2 2017 Oyvind Albrigtsen - 4.0.1-1 -- Rebase to resource-agents 4.0.1 upstream release. - -* Wed Feb 1 2017 Oyvind Albrigtsen - 4.0.0-2 -- galera: remove "long SST monitoring" support due to corner-case issues - -* Tue Jan 31 2017 Oyvind Albrigtsen - 4.0.0-1 -- Rebase to resource-agents 4.0.0 upstream release. - -* Thu Dec 15 2016 Oyvind Albrigtsen - 3.9.7-6 -- Add netstat dependency - -* Tue Feb 9 2016 Oyvind Albrigtsen - 3.9.7-4 -- Rebase to resource-agents 3.9.7 upstream release. - -* Thu Feb 04 2016 Fedora Release Engineering - 3.9.6-2.2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Thu Jun 18 2015 Fedora Release Engineering - 3.9.6-2.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Mon Apr 20 2015 David Vossel - 3.9.6-2 -- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent - -* Fri Feb 13 2015 David Vossel - 3.9.6-1 -- Rebase to resource-agents 3.9.6 upstream release. - -* Sun Aug 17 2014 Fedora Release Engineering - 3.9.5-12.2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Sun Jun 08 2014 Fedora Release Engineering - 3.9.5-12.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Wed Apr 30 2014 David Vossel - 3.9.5-12 -- Sync with latest upstream. - -* Thu Jan 2 2014 David Vossel - 3.9.5-11 -- Sync with latest upstream. - -* Sun Oct 20 2013 David Vossel - 3.9.5-10 -- Fix build system for rawhide. - -* Wed Oct 16 2013 David Vossel - 3.9.5-9 -- Remove rgmanager agents from build. - -* Sun Aug 04 2013 Fedora Release Engineering - 3.9.5-8 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild - -* Wed Jul 17 2013 Petr Pisar - 3.9.5-7 -- Perl 5.18 rebuild - -* Tue Jun 18 2013 David Vossel - 3.9.5-6 -- Restores rsctmp directory to upstream default. - -* Tue Jun 18 2013 David Vossel - 3.9.5-5 -- Merges redhat provider into heartbeat provider. Remove - rgmanager's redhat provider. - - Resolves: rhbz#917681 - Resolves: rhbz#928890 - Resolves: rhbz#952716 - Resolves: rhbz#960555 - -* Tue Mar 12 2013 David Vossel - 3.9.5-3 -- Fixes build system error with conditional logic involving - IPv6addr and updates spec file to build against rhel 7 as - well as fedora 19. - -* Mon Mar 11 2013 David Vossel - 3.9.5-2 -- Resolves rhbz#915050 - -* Mon Mar 11 2013 David Vossel - 3.9.5-1 -- New upstream release. - -* Fri Jan 25 2013 Kevin Fenzi - 3.9.2-5 -- Fix cifs mount requires - -* Mon Nov 12 2012 Chris Feist - 3.9.2-4 -- Removed version number after dist - -* Mon Oct 29 2012 Chris Feist - 3.9.2-3.8 -- Remove cluster-glue-libs-devel -- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to - disappearance of cluster-glue in F18) - -* Sat Jul 21 2012 Fedora Release Engineering - 3.9.2-3.5 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Thu Jul 05 2012 Chris Feist - 3.9.2-3.4 -- Fix location of lvm (change from /sbin to /usr/sbin) - -* Wed Apr 04 2012 Jon Ciesla - 3.9.2-3.3 -- Rebuilt to fix rawhide dependency issues (caused by move of fsck from - /sbin to /usr/sbin). - -* Fri Mar 30 2012 Jon Ciesla - 3.9.2-3.1 -- libnet rebuild. - -* Sat Jan 14 2012 Fedora Release Engineering - 3.9.2-2.1 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild - -* Fri Jul 8 2011 Fabio M. Di Nitto - 3.9.2-2 -- add post call to resource-agents to integrate with cluster 3.1.4 - -* Thu Jun 30 2011 Fabio M. Di Nitto - 3.9.2-1 -- new upstream release -- fix 2 regressions from 3.9.1 - -* Mon Jun 20 2011 Fabio M. Di Nitto - 3.9.1-1 -- new upstream release -- import spec file from upstream - -* Tue Mar 1 2011 Fabio M. Di Nitto - 3.1.1-1 -- new upstream release 3.1.1 and 1.0.4 - -* Wed Feb 09 2011 Fedora Release Engineering - 3.1.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild - -* Thu Dec 2 2010 Fabio M. Di Nitto - 3.1.0-1 -- new upstream release -- spec file update: - Update upstream URL - Update source URL - use standard configure macro - use standard make invokation - -* Thu Oct 7 2010 Fabio M. Di Nitto - 3.0.17-1 -- new upstream release - Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013 - Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733 - Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718 - Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943 - Resolves: rhbz#639018 - -* Thu Oct 7 2010 Andrew Beekhof - 3.0.16-2 -- new upstream release of the Pacemaker agents: 71b1377f907c - -* Thu Sep 2 2010 Fabio M. Di Nitto - 3.0.16-1 -- new upstream release - Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680 - Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844 - Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691 - Resolves: rhbz#622576 - -* Thu Jul 29 2010 Fabio M. Di Nitto - 3.0.14-1 -- new upstream release - Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003 - Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547 - Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368 - Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989 - Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181 - Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110 - Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356 - Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202 - Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566 - Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814 - -* Mon Jun 7 2010 Fabio M. Di Nitto - 3.0.13-1 -- new upstream release - Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626 - Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002 - Resolves: rhbz#599643 - -* Tue May 18 2010 Andrew Beekhof - 3.0.12-2 -- libnet is not available on RHEL -- Do not package ldirectord on RHEL - Resolves: rhbz#577264 - -* Mon May 10 2010 Fabio M. Di Nitto - 3.0.12-1 -- new upstream release - Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753 - Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890 - Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010 - Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823 - -* Mon May 10 2010 Andrew Beekhof - 3.0.12-1 -- New pacemaker agents upstream release: a7c0f35916bf - + High: pgsql: properly implement pghost parameter - + High: RA: mysql: fix syntax error - + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371) - + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378) - + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data - + Medium: IPaddr: return the correct code if interface delete failed - + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis) - + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815) - + Medium: pgsql: implement "config" parameter - + Medium: RA: iSCSITarget: follow changed IET access policy - -* Wed Apr 21 2010 Fabio M. Di Nitto - 3.0.11-1 -- new upstream release - Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017 - Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017 - Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533 -- Switch to file based Requires. - Also address several other problems related to missing runtime - components in different agents. - With the current Requires: set, we guarantee all basic functionalities - out of the box for lvm/fs/clusterfs/netfs/networking. - Resolves: rhbz#570008 - -* Sat Apr 17 2010 Andrew Beekhof - 3.0.10-2 -- New pacemaker agents upstream release - + High: RA: vmware: fix set_environment() invocation (LF 2342) - + High: RA: vmware: update to version 0.2 - + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388) - + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg) - + Medium: IPsrcaddr: modify the interface route (lf#2367) - + Medium: ldirectord: Allow multiple email addresses (LF 2168) - + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328) - + Medium: meta-data: improve timeouts in most resource agents - + Medium: nfsserver: use default values (lf#2321) - + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal - + Medium: ocf-shellfuncs: don't output to stderr if using syslog - + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid - + Medium: RA: iSCSILogicalUnit: fix monitor for STGT - + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284) - + Medium: RA: ManageRAID: require bash - + Medium: RA: ManageRAID: require bash - + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988) - + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION - + Medium: RA: VirtualDomain: improve error messages - + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name - + Medium: Route: add route table parameter (lf#2335) - + Medium: sfex: don't use pid file (lf#2363,bnc#585416) - + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416) - -* Fri Apr 9 2010 Fabio M. Di Nitto - 3.0.10-1 -- New rgmanager resource agents upstream release - Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027 - Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335 - Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249 - Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626 - Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626 - Resolves: rhbz#579059 - -* Wed Mar 24 2010 Andrew Beekhof - 3.0.9-2 -- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page -- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built - -* Mon Mar 1 2010 Fabio M. Di Nitto - 3.0.9-1 -- New rgmanager resource agents upstream release - Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902 - Resolves: rhbz#512171, rhbz#519491 - -* Mon Feb 22 2010 Fabio M. Di Nitto - 3.0.8-1 -- New rgmanager resource agents upstream release - Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901 - Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157 - Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201 - Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363 - Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630 - Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047 - Resolves: rhbz#554968, rhbz#555047 -- spec file update: - * update spec file copyright date - * use bz2 tarball - -* Fri Jan 15 2010 Fabio M. Di Nitto - 3.0.7-2 -- Add python as BuildRequires - -* Mon Jan 11 2010 Fabio M. Di Nitto - 3.0.7-1 -- New rgmanager resource agents upstream release - Resolves: rhbz#526286, rhbz#533461 - -* Mon Jan 11 2010 Andrew Beekhof - 3.0.6-2 -- Update Pacameker agents to upstream version: c76b4a6eb576 - + High: RA: VirtualDomain: fix forceful stop (LF 2283) - + High: apache: monitor operation of depth 10 for web applications (LF 2234) - + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281) - + Medium: RA: Route: improve validate (LF 2232) - + Medium: mark obsolete RAs as deprecated (LF 2244) - + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work - -* Mon Dec 7 2009 Fabio M. Di Nitto - 3.0.6-1 -- New rgmanager resource agents upstream release -- spec file update: - * use global instead of define - * use new Source0 url - * use %%name macro more aggressively - -* Mon Dec 7 2009 Andrew Beekhof - 3.0.5-2 -- Update Pacameker agents to upstream version: bc00c0b065d9 - + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239) - + High: doc: add man pages for all RAs (LF2237) - + High: syslog-ng: new RA - + High: vmware: make meta-data work and several cleanups (LF 2212) - + Medium: .ocf-shellfuncs: add ocf_is_probe function - + Medium: Dev: make RAs executable (LF2239) - + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034) - + Medium: add mercurial repository version information to .ocf-shellfuncs - + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469) - + Medium: iSCSITarget, iSCSILogicalUnit: support LIO - + Medium: nfsserver: use check_binary properly in validate (LF 2211) - + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219) - + Medium: oracle/oralsnr: export variables properly - + Medium: pgsql: remove the previous backup_label if it exists - + Medium: postfix: fix double stop (thanks to Dinh N. Quoc) - + RA: LVM: Make monitor operation quiet in logs (bnc#546353) - + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968) - + ldirectord: OCF agent: overhaul - -* Fri Nov 20 2009 Fabio M. Di Nitto - 3.0.5-1 -- New rgmanager resource agents upstream release -- Allow pacemaker to use rgmanager resource agents - -* Wed Oct 28 2009 Andrew Beekhof - 3.0.4-2 -- Update Pacameker agents to upstream version: e2338892f59f - + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes - + High: Trap sigterm for compatibility with the libnet version of send_arp - + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down - + Medium: IPv6addr: recognize network masks properly - + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define" - -* Wed Oct 21 2009 Fabio M. Di Nitto - 3.0.4-1 -- New rgmanager resource agents upstream release - -* Mon Oct 12 2009 Andrew Beekhof - 3.0.3-3 -- Update Pacameker agents to upstream version: 099c0e5d80db - + Add the ha_parameter function back into .ocf-shellfuncs. - + Bug bnc#534803 - Provide a default for MAILCMD - + Fix use of undefined macro @HA_NOARCHDATAHBDIR@ - + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff) - + Import shellfuncs from heartbeat as badly written RAs use it - + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate - + Medium: RA: Filesystem: implement monitor operation - + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable - + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum) - + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID - + Medium: RA: iSCSITarget: be more persistent deleting targets on stop - + Medium: RA: portblock: add per-IP filtering capability - + Medium: mysql-proxy: log_level and keepalive parameters - + Medium: oracle: drop spurious output from sqlplus - + RA: Filesystem: allow configuring smbfs mounts as clones - -* Wed Sep 23 2009 Fabio M. Di Nitto - 3.0.3-1 -- New rgmanager resource agents upstream release - -* Thu Aug 20 2009 Fabio M. Di Nitto - 3.0.1-1 -- New rgmanager resource agents upstream release - -* Tue Aug 18 2009 Andrew Beekhof - 3.0.0-16 -- Create an ldirectord package -- Update Pacameker agents to upstream version: 2198dc90bec4 - + Build: Import ldirectord. - + Ensure HA_VARRUNDIR has a value to substitute - + High: Add findif tool (mandatory for IPaddr/IPaddr2) - + High: IPv6addr: new nic and cidr_netmask parameters - + High: postfix: new resource agent - + Include license information - + Low (LF 2159): Squid: make the regexp match more precisely output of netstat - + Low: configure: Fix package name. - + Low: ldirectord: add dependency on $remote_fs. - + Low: ldirectord: add mandatory required header to init script. - + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp - + Medium: VirtualDomain: destroy domain shortly before timeout expiry - + Medium: shellfuncs: Make the mktemp wrappers work. - + Remove references to Echo function - + Remove references to heartbeat shellfuncs. - + Remove useless path lookups - + findif: actually include the right header. Simplify configure. - + ldirectord: Remove superfluous configure artifact. - + ocf-tester: Fix package reference and path to DTD. - -* Tue Aug 11 2009 Ville Skyttä - 3.0.0-15 -- Use bzipped upstream hg tarball. - -* Wed Jul 29 2009 Fabio M. Di Nitto - 3.0.0-14 -- Merge Pacemaker cluster resource agents: - * Add Source1. - * Drop noarch. We have real binaries now. - * Update BuildRequires. - * Update all relevant prep/build/install/files/description sections. - -* Sun Jul 26 2009 Fedora Release Engineering - 3.0.0-13 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild - -* Wed Jul 8 2009 Fabio M. Di Nitto - 3.0.0-12 -- spec file updates: - * Update copyright header - * final release.. undefine alphatag - -* Thu Jul 2 2009 Fabio M. Di Nitto - 3.0.0-11.rc4 -- New upstream release. - -* Sat Jun 20 2009 Fabio M. Di Nitto - 3.0.0-10.rc3 -- New upstream release. - -* Wed Jun 10 2009 Fabio M. Di Nitto - 3.0.0-9.rc2 -- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970 - -* Tue Mar 24 2009 Fabio M. Di Nitto - 3.0.0-8.rc1 -- New upstream release. -- Update BuildRoot usage to preferred versions/names - -* Mon Mar 9 2009 Fabio M. Di Nitto - 3.0.0-7.beta1 -- New upstream release. - -* Fri Mar 6 2009 Fabio M. Di Nitto - 3.0.0-6.alpha7 -- New upstream release. - -* Tue Mar 3 2009 Fabio M. Di Nitto - 3.0.0-5.alpha6 -- New upstream release. - -* Tue Feb 24 2009 Fabio M. Di Nitto - 3.0.0-4.alpha5 -- Drop Conflicts with rgmanager. - -* Mon Feb 23 2009 Fabio M. Di Nitto - 3.0.0-3.alpha5 -- New upstream release. - -* Thu Feb 19 2009 Fabio M. Di Nitto - 3.0.0-2.alpha4 -- Add comments on how to build this package. - -* Thu Feb 5 2009 Fabio M. Di Nitto - 3.0.0-1.alpha4 -- New upstream release. -- Fix datadir/cluster directory ownership. - -* Tue Jan 27 2009 Fabio M. Di Nitto - 3.0.0-1.alpha3 - - Initial packaging diff --git a/ha-cloud-support-aliyun.patch b/ha-cloud-support-aliyun.patch new file mode 100644 index 0000000..d714729 --- /dev/null +++ b/ha-cloud-support-aliyun.patch @@ -0,0 +1,12 @@ +diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip +--- a/heartbeat/aliyun-vpc-move-ip 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/aliyun-vpc-move-ip 2021-08-25 13:38:26.786626079 +0200 +@@ -17,7 +17,7 @@ + OCF_RESKEY_interface_default="eth0" + OCF_RESKEY_profile_default="default" + OCF_RESKEY_endpoint_default="vpc.aliyuncs.com" +-OCF_RESKEY_aliyuncli_default="detect" ++OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/aliyun-cli/aliyun" + + + : ${OCF_RESKEY_address=${OCF_RESKEY_address_default}} diff --git a/ha-cloud-support-gcloud.patch b/ha-cloud-support-gcloud.patch new file mode 100644 index 0000000..8054aa1 --- /dev/null +++ b/ha-cloud-support-gcloud.patch @@ -0,0 +1,33 @@ +diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +--- a/heartbeat/gcp-pd-move.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-pd-move.in 2021-08-25 13:50:54.461732967 +0200 +@@ -32,6 +32,7 @@ + from ocf import logger + + try: ++ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages") + import googleapiclient.discovery + except ImportError: + pass +diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +--- a/heartbeat/gcp-vpc-move-route.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-vpc-move-route.in 2021-08-25 13:51:17.489797999 +0200 +@@ -45,6 +45,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages") + import googleapiclient.discovery + import pyroute2 + try: +diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +--- a/heartbeat/gcp-vpc-move-vip.in 2021-08-19 09:37:57.000000000 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2021-08-25 13:51:35.012847487 +0200 +@@ -29,6 +29,7 @@ + from ocf import * + + try: ++ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages") + import googleapiclient.discovery + try: + from google.oauth2.service_account import Credentials as ServiceAccountCredentials diff --git a/SOURCES/bz1872754-pgsqlms-new-ra.patch b/pgsqlms-ra.patch similarity index 92% rename from SOURCES/bz1872754-pgsqlms-new-ra.patch rename to pgsqlms-ra.patch index b3b314e..9644a2b 100644 --- a/SOURCES/bz1872754-pgsqlms-new-ra.patch +++ b/pgsqlms-ra.patch @@ -1,7 +1,7 @@ diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am ---- a/doc/man/Makefile.am 2021-04-12 12:51:56.831835953 +0200 -+++ b/doc/man/Makefile.am 2021-04-13 13:38:14.198361848 +0200 -@@ -154,6 +154,7 @@ +--- a/doc/man/Makefile.am 2023-10-11 09:03:53.000000000 +0200 ++++ b/doc/man/Makefile.am 2024-06-12 09:14:42.898393461 +0200 +@@ -184,6 +184,7 @@ ocf_heartbeat_ovsmonitor.7 \ ocf_heartbeat_pgagent.7 \ ocf_heartbeat_pgsql.7 \ @@ -10,9 +10,9 @@ diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am ocf_heartbeat_podman.7 \ ocf_heartbeat_portblock.7 \ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am ---- a/heartbeat/Makefile.am 2021-04-12 12:51:56.831835953 +0200 -+++ b/heartbeat/Makefile.am 2021-04-13 13:37:45.741292178 +0200 -@@ -149,6 +149,7 @@ +--- a/heartbeat/Makefile.am 2023-10-11 09:03:53.000000000 +0200 ++++ b/heartbeat/Makefile.am 2024-06-12 09:14:42.898393461 +0200 +@@ -156,6 +156,7 @@ ovsmonitor \ pgagent \ pgsql \ @@ -20,7 +20,7 @@ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am pingd \ podman \ portblock \ -@@ -209,7 +210,10 @@ +@@ -224,7 +225,10 @@ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh \ @@ -34,7 +34,7 @@ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am hbdir = $(sysconfdir)/ha.d diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm --- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/OCF_Directories.pm 2021-04-13 13:37:35.621267404 +0200 ++++ b/heartbeat/OCF_Directories.pm 2024-06-12 09:23:45.434638170 +0200 @@ -0,0 +1,139 @@ +#!/usr/bin/perl +# This program is open source, licensed under the PostgreSQL License. @@ -146,7 +146,7 @@ diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm + our @EXPORT_OK = ( @EXPORT ); +} + -+our $INITDIR = ( $ENV{'INITDIR'} || '/etc/init.d' ); ++our $INITDIR = ( $ENV{'INITDIR'} || '/etc/rc.d/init.d' ); +our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' ); +our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' ); +our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' ); @@ -177,7 +177,7 @@ diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm + diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm --- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/OCF_Functions.pm 2021-04-13 13:37:35.621267404 +0200 ++++ b/heartbeat/OCF_Functions.pm 2023-01-04 12:25:21.724889658 +0100 @@ -0,0 +1,631 @@ +#!/usr/bin/perl +# This program is open source, licensed under the PostgreSQL License. @@ -812,7 +812,7 @@ diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm +Licensed under the PostgreSQL License. diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm --- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/OCF_ReturnCodes.pm 2021-04-13 13:37:35.621267404 +0200 ++++ b/heartbeat/OCF_ReturnCodes.pm 2023-01-04 12:25:21.724889658 +0100 @@ -0,0 +1,97 @@ +#!/usr/bin/perl +# This program is open source, licensed under the PostgreSQL License. @@ -913,8 +913,8 @@ diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm +Licensed under the PostgreSQL License. diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms --- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100 -+++ b/heartbeat/pgsqlms 2021-04-13 13:37:40.934280411 +0200 -@@ -0,0 +1,2308 @@ ++++ b/heartbeat/pgsqlms 2024-06-12 10:48:57.220019549 +0200 +@@ -0,0 +1,2337 @@ +#!/usr/bin/perl +# This program is open source, licensed under the PostgreSQL License. +# For license terms, see the LICENSE file. @@ -945,17 +945,15 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +use File::Temp; +use Data::Dumper; + -+my $OCF_FUNCTIONS_DIR; -+BEGIN { -+ $OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat"; -+} -+use lib "$OCF_FUNCTIONS_DIR"; ++use FindBin; ++use lib "$FindBin::RealBin/../heartbeat/"; ++use lib "$FindBin::RealBin/../../lib/heartbeat/"; + +use OCF_ReturnCodes; +use OCF_Directories; +use OCF_Functions; + -+our $VERSION = 'v2.3.0'; ++our $VERSION = '2.3.0'; +our $PROGRAM = 'pgsqlms'; + +# OCF environment @@ -1145,11 +1143,14 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + # We check locations of connected standbies by querying the + # "pg_stat_replication" view. -+ # The row_number applies on the result set ordered on write_location ASC so -+ # the highest row_number should be given to the closest node from the -+ # master, then the lowest node name (alphanumeric sort) in case of equality. -+ # The result set itself is order by priority DESC to process best known -+ # candidate first. ++ # The row_number applies on the result set ordered on write_location DESC so ++ # the smallest row_number should be given to the closest node from the ++ # primary (1), then the lowest node name (alphanumeric sort) in case of ++ # equality. This row_number - 1 is then used to decrease the priority (score) by ++ # step of 10 units starting from 1000. ++ # E.g. row_number = 1 and maxlag = 0, ( 1000 - (row_number - 1) * 10 ) * 1 = 1000 ++ # The result set itself is order by priority DESC to process best ++ # known candidate first. + $query = qq{ + SELECT application_name, priority, location, state, current_lag + FROM ( @@ -1157,7 +1158,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + (1000 - ( + row_number() OVER ( + PARTITION BY state IN ('startup', 'backup') -+ ORDER BY location ASC, application_name ASC ++ ORDER BY location DESC, application_name ASC + ) - 1 + ) * 10 + ) * CASE WHEN ( $maxlag > 0 @@ -1326,8 +1327,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + return 0; +} + -+# Check if the current transiation is a recover of a master clone on given node. -+sub _is_master_recover { ++# Check if the current transition is a recover of the primary on given node. ++sub _is_primary_recover { + my ( $n ) = @_; + + return ( @@ -1336,8 +1337,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + ); +} + -+# Check if the current transition is a recover of a slave clone on given node. -+sub _is_slave_recover { ++# Check if the current transition is a recover of a standby clone on given node. ++sub _is_standby_recover { + my ( $n ) = @_; + + return ( @@ -1346,7 +1347,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + ); +} + -+# check if th current transition is a switchover to the given node. ++# check if the current transition is a switchover to the given node. +sub _is_switchover { + my ( $n ) = @_; + my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'}; @@ -1626,18 +1627,18 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +} + +# Check the write_location of all secondaries, and adapt their master score so -+# that the instance closest to the master will be the selected candidate should -+# a promotion be triggered. ++# that the instance closest to the primary will be the selected candidate ++# should a promotion be triggered. +# NOTE: This is only a hint to pacemaker! The selected candidate to promotion +# actually re-check it is the best candidate and force a re-election by failing +# if a better one exists. This avoid a race condition between the call of the -+# monitor action and the promotion where another slave might have catchup faster -+# with the master. ++# monitor action and the promotion where another standby might have catchup ++# faster with the primary. +# NOTE: we cannot directly use the write_location, neither a lsn_diff value as +# promotion score as Pacemaker considers any value greater than 1,000,000 as +# INFINITY. +# -+# This sub must be executed from a master monitor action. ++# This sub must be executed from a Master-role monitor action. +# +sub _check_locations { + my $partition_nodes; @@ -1659,7 +1660,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + # If no lag are reported at this point, it means that there is no + # secondary instance connected. -+ ocf_log( 'warning', 'No secondary connected to the master' ) ++ ocf_log( 'warning', 'No secondary connected to the primary' ) + if $row_num == 0; + + # For each standby connected, set their master score based on the following @@ -1739,10 +1740,10 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + +# _check_switchover +# check if the pgsql switchover to the localnode is safe. -+# This is supposed to be called **after** the master has been stopped or demoted. -+# This sub checks if the local standby received the shutdown checkpoint from the -+# old master to make sure it can take over the master role and the old master -+# will be able to catchup as a standby after. ++# This is supposed to be called **after** the primary has been stopped or ++# demoted. It checks if the local standby received the shutdown checkpoint ++# from the old primary to make sure it can promote safely and the old ++# primary will be able to catchup as a standby after. +# +# Returns 0 if switchover is safe +# Returns 1 if swithcover is not safe @@ -1762,20 +1763,20 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + .' Need to check the last record in WAL', + $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename ); + -+ # check if we received the shutdown checkpoint of the master during its ++ # check if we received the shutdown checkpoint of the primary during its + # demote process. + # We need the last local checkpoint LSN and the last received LSN from -+ # master to check in the WAL between these adresses if we have a ++ # primary to check in the WAL between these addresses if we have a + # "checkpoint shutdown" using pg_xlogdump/pg_waldump. + # + # Force a checkpoint to make sure the controldata shows the very last TL -+ # and the master's shutdown checkpoint ++ # and the primary's shutdown checkpoint + _query( q{ CHECKPOINT }, {} ); + %cdata = _get_controldata(); + $tl = $cdata{'tl'}; + $last_redo = $cdata{'redo'}; + -+ # Get the last received LSN from master ++ # Get the last received LSN from primary + $last_lsn = _get_last_received_lsn(); + + unless ( defined $last_lsn ) { @@ -1796,12 +1797,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + if ( $rc == 0 and + $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m + ) { -+ ocf_log( 'info', 'Slave received the shutdown checkpoint' ); ++ ocf_log( 'info', 'Standby received the shutdown checkpoint' ); + return 0; + } + + ocf_exit_reason( -+ 'Did not receive the shutdown checkpoint from the old master!' ); ++ 'Did not receive the shutdown checkpoint from the old primary!' ); + + return 1; +} @@ -1828,7 +1829,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + elsif ( $is_in_recovery eq 'f' ) { + # The instance is a primary. + ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary"); -+ # Check lsn diff with current slaves if any ++ # Check lsn diff with current standbys if any + _check_locations() if $__OCF_ACTION eq 'monitor'; + return $OCF_RUNNING_MASTER; + } @@ -1904,9 +1905,10 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + elsif ( $controldata_rc == $OCF_SUCCESS ) { + # The controldata has not been updated to "shutdown in recovery". + # It should mean we had a crash on a secondary instance. -+ # There is no "FAILED_SLAVE" return code, so we return a generic error. ++ # There is no "FAILED_STANDBY" return code, so we return a generic ++ # error. + ocf_exit_reason( -+ 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed', ++ 'Instance "%s" controldata indicates a running standby instance, the instance has probably crashed', + $OCF_RESOURCE_INSTANCE ); + return $OCF_ERR_GENERIC; + } @@ -1980,9 +1982,9 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + +=item B + -+Maximum lag allowed on a standby before we set a negative master score on it. ++Maximum lag allowed on a standby before forbidding any promotion to it. +The calculation is based on the difference between the current xlog location on -+the master and the write location on the standby. ++the primary and the write location on the standby. + +(optional, integer, default "0" disables this feature) + @@ -2014,13 +2016,16 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +sub ocf_meta_data { + print qq{ + -+ ++ + 1.0 + + -+ Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource. ++ Resource script for PostgreSQL in replication. It manages PostgreSQL ++ servers using streaming replication as an HA resource. + -+ Manages PostgreSQL servers in replication ++ ++ Manages PostgreSQL servers in replication ++ + + + @@ -2032,7 +2037,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + + -+ Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl. ++ Path to the directory storing the PostgreSQL binaries. The agent ++ uses psql, pg_isready, pg_controldata and pg_ctl. + + Path to the PostgreSQL binaries + @@ -2048,17 +2054,23 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + + -+ Path to the directory set in data_directory from your postgresql.conf file. This parameter -+ has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a -+ special PostgreSQL setup and you understand this parameter, ignore it. ++ Path to the directory set in data_directory from your ++ postgresql.conf file. This parameter has the same default than ++ PostgreSQL itself: the pgdata parameter value. Unless you have a ++ special PostgreSQL setup and you understand this parameter, ++ ignore it. + -+ Path to the directory set in data_directory from your postgresql.conf file ++ ++ Path to the directory set in data_directory from your ++ postgresql.conf file ++ + + + + + -+ Host IP address or unix socket folder the instance is listening on. ++ Host IP address or unix socket folder the instance is listening ++ on. + + Instance IP or unix socket folder + @@ -2074,25 +2086,31 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + + -+ Maximum lag allowed on a standby before we set a negative master score on it. The calculation -+ is based on the difference between the current LSN on the master and the LSN -+ written on the standby. -+ This parameter must be a valid positive number as described in PostgreSQL documentation. ++ Maximum lag allowed on a standby before forbidding any promotion ++ on it. The calculation is based on the difference between the ++ current LSN on the primary and the LSN written on the standby. ++ This parameter must be a valid positive number as described in ++ PostgreSQL documentation. + See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC + -+ Maximum write lag before we mark a standby as inappropriate to promote ++ ++ Maximum write lag before we mark a standby as inappropriate to ++ promote ++ + + + + + -+ Path to the recovery.conf template. This file is simply copied to \$PGDATA -+ before starting the instance as slave. ++ Path to the recovery.conf template. This file is simply copied ++ to \$PGDATA before starting the instance as standby. + ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for -+ PostgreSQL 12 and higher. The cluster will refuse to start if a template -+ file is found. ++ PostgreSQL 12 and higher. The cluster will refuse to start if a ++ template file is found. + -+ Path to the recovery.conf template for PostgreSQL 11 and older. ++ ++ Path to the recovery.conf template for PostgreSQL 11 and older. ++ + + + @@ -2103,7 +2121,9 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + postgresql.conf file is not in the data directory (PGDATA), eg.: + "-c config_file=/etc/postgresql/9.3/main/postgresql.conf". + -+ Additionnal arguments given to the postgres process on startup. ++ ++ Additionnal arguments given to the postgres process on startup. ++ + + + @@ -2114,7 +2134,6 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + + -+ + + + @@ -2148,11 +2167,11 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + +=item B + -+Promotes the resource to the Master role. Suggested minimum timeout: 30. ++Promotes the resource to the primary role. Suggested minimum timeout: 30. + +=item B + -+Demotes the resource to the Slave role. Suggested minimum timeout: 120. ++Demotes the resource to the standby role. Suggested minimum timeout: 120. + +=item B + @@ -2215,19 +2234,17 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2 + ) { + ocf_exit_reason( -+ 'PAF %s is compatible with Pacemaker 1.1.13 and greater', ++ 'PAF v%s is compatible with Pacemaker 1.1.13 and greater', + $VERSION + ); + return $OCF_ERR_INSTALLED; + } + + # check notify=true -+ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\ -+ --meta --get-parameter notify 2>/dev/null }; -+ chomp $ans; -+ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { ++ unless ( defined $ENV{'OCF_RESKEY_CRM_meta_notify'} ++ and lc($ENV{'OCF_RESKEY_CRM_meta_notify'}) =~ /^true$|^on$|^yes$|^y$|^1$/ ) { + ocf_exit_reason( -+ 'You must set meta parameter notify=true for your master resource' ++ 'You must set meta parameter notify=true for your "master" resource' + ); + return $OCF_ERR_INSTALLED; + } @@ -2238,7 +2255,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1' + ) { + ocf_exit_reason( -+ 'You must set meta parameter master-max=1 for your master resource' ++ 'You must set meta parameter master-max=1 for your "master" resource' + ); + return $OCF_ERR_INSTALLED; + } @@ -2399,14 +2416,14 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + # Check if a master score exists in the cluster. + # During the very first start of the cluster, no master score will -+ # exists on any of the existing slaves, unless an admin designated -+ # one of them using crm_master. If no master exists the cluster will -+ # not promote a master among the slaves. ++ # exists on any of the existing standbys, unless an admin designated ++ # one of them using crm_master. If no master score exists the ++ # cluster can not pick a standby to promote. + # To solve this situation, we check if there is at least one master + # score existing on one node in the cluster. Do nothing if at least -+ # one master score is found among the clones of the resource. If no -+ # master score exists, set a score of 1 only if the resource was a -+ # shut downed master before the start. ++ # one master score is found among the clones of the resource. ++ # If no master score exists, set a score of 1 only if the resource ++ # was a shut downed primary before the start. + if ( $prev_state eq "shut down" and not _master_score_exists() ) { + ocf_log( 'info', 'No master score around. Set mine to 1' ); + @@ -2417,7 +2434,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + } + + ocf_exit_reason( -+ 'Instance "%s" is not running as a slave (returned %d)', ++ 'Instance "%s" is not running as a standby (returned %d)', + $OCF_RESOURCE_INSTANCE, $rc ); + + return $OCF_ERR_GENERIC; @@ -2624,7 +2641,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + return $OCF_SUCCESS; + } + elsif ( $rc == $OCF_NOT_RUNNING ) { -+ # Instance is stopped. Nothing to do. ++ # Instance is stopped. Need to start as standby. + ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down', + $OCF_RESOURCE_INSTANCE ); + } @@ -2638,12 +2655,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + return $OCF_ERR_GENERIC; + } + -+ # TODO we need to make sure at least one slave is connected!! ++ # TODO Do we need to make sure at least one standby is connected? + -+ # WARNING if the resource state is stopped instead of master, the ocf ra dev -+ # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where -+ # it computes transitions of demote(failing)->stop->start->promote actions -+ # until failcount == migration-threshold. ++ # WARNING if the resource state is stopped instead of primary, the ocf ra ++ # dev rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop ++ # where it computes transitions of demote(failing)->stop->start->promote ++ # actions until failcount == migration-threshold. + # This is a really ugly trick to keep going with the demode action if the + # rsc is already stopped gracefully. + # See discussion "CRM trying to demote a stopped resource" on @@ -2711,12 +2728,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + $rc = pgsql_monitor(); + + if ( $rc == $OCF_SUCCESS ) { -+ # Running as slave. Normal, expected behavior. ++ # Running as standby. Normal, expected behavior. + ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby', + $OCF_RESOURCE_INSTANCE ); + } + elsif ( $rc == $OCF_RUNNING_MASTER ) { -+ # Already a master. Unexpected, but not a problem. ++ # Already a primary. Unexpected, but not a problem. + ocf_log( 'info', '"%s" already running as a primary', + $OCF_RESOURCE_INSTANCE ); + return $OCF_SUCCESS; @@ -2756,19 +2773,20 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + # internal error during _check_switchover + } + -+ # Do not check for a better candidate if we try to recover the master -+ # Recover of a master is detected during the pre-promote action. It sets the -+ # private attribute 'recover_master' to '1' if this is a master recover. -+ if ( _get_priv_attr( 'recover_master' ) eq '1' ) { -+ ocf_log( 'info', 'Recovering old master, no election needed'); ++ # Do not check for a better candidate if we try to recover the primary. ++ # Recover of a primary is detected during the pre-promote action. It sets ++ # the private attribute 'recover_primary' to '1' if this is a primary ++ # recover. ++ if ( _get_priv_attr( 'recover_primary' ) eq '1' ) { ++ ocf_log( 'info', 'Recovering old primary, no election needed') + } + else { + + # The promotion is occurring on the best known candidate (highest + # master score), as chosen by pacemaker during the last working monitor -+ # on previous master (see pgsql_monitor/_check_locations subs). ++ # on previous primary (see pgsql_monitor/_check_locations subs). + # To avoid any race condition between the last monitor action on the -+ # previous master and the **real** most up-to-date standby, we ++ # previous primary and the **real** most up-to-date standby, we + # set each standby location during the "pre-promote" action, and stored + # them using the "lsn_location" resource attribute. + # @@ -2891,8 +2909,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + return $OCF_SUCCESS; +} + -+# This action is called **before** the actual promotion when a failing master is -+# considered unreclaimable, recoverable or a new master must be promoted ++# This action is called **before** the actual promotion when a failing primary ++# is considered unreclaimable, recoverable or a new primary must be promoted +# (switchover or first start). +# As every "notify" action, it is executed almost simultaneously on all +# available nodes. @@ -2907,11 +2925,11 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + ocf_log( 'info', 'Promoting instance on node "%s"', + $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ); + -+ # No need to do an election between slaves if this is recovery of the master -+ if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { -+ ocf_log( 'warning', 'This is a master recovery!' ); ++ # No need to do an election if this is a recovery of the primary ++ if ( _is_primary_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) { ++ ocf_log( 'warning', 'This is a primary recovery!' ); + -+ _set_priv_attr( 'recover_master', '1' ) ++ _set_priv_attr( 'recover_primary', '1' ) + if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename; + + return $OCF_SUCCESS; @@ -2919,7 +2937,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + # Environment cleanup! + _delete_priv_attr( 'lsn_location' ); -+ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'recover_primary' ); + _delete_priv_attr( 'nodes' ); + _delete_priv_attr( 'cancel_switchover' ); + @@ -2943,19 +2961,19 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + # FIXME: should we allow a switchover to a lagging slave? + } + -+ # We need to trigger an election between existing slaves to promote the best -+ # one based on its current LSN location. Each node set a private attribute -+ # "lsn_location" with its TL and LSN location. ++ # We need to trigger an election between existing standbys to promote the ++ # best one based on its current LSN location. Each node set a private ++ # attribute "lsn_location" with its TL and LSN location. + # + # During the following promote action, The designated standby for + # promotion use these attributes to check if the instance to be promoted + # is the best one, so we can avoid a race condition between the last -+ # successful monitor on the previous master and the current promotion. ++ # successful monitor on the previous primary and the current promotion. + + # As we can not break the transition from a notification action, we check + # during the promotion if each node TL and LSN are valid. + -+ # Force a checpoint to make sure the controldata shows the very last TL ++ # Force a checkpoint to make sure the controldata shows the very last TL + _query( q{ CHECKPOINT }, {} ); + %cdata = _get_controldata(); + $node_lsn = _get_last_received_lsn( 'in decimal' ); @@ -2977,12 +2995,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + ocf_log( 'warning', 'Could not set the current node LSN' ) + if $? != 0 ; + -+ # If this node is the future master, keep track of the slaves that ++ # If this node is the future primary, keep track of the standbys that + # received the same notification to compare our LSN with them during + # promotion + if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) { + # Build the list of active nodes: -+ # master + slave + start - stop ++ # primary + standby + start - stop + # FIXME: Deal with rsc started during the same transaction but **after** + # the promotion ? + $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} }, @@ -2995,26 +3013,27 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + _set_priv_attr( 'nodes', $attr_nodes ); + } + ++ # whatever the result, it is ignored by pacemaker. + return $OCF_SUCCESS; +} + +# This action is called after a promote action. +sub pgsql_notify_post_promote { + -+ # We have a new master (or the previous one recovered). ++ # We have a new primary (or the previous one recovered). + # Environment cleanup! + _delete_priv_attr( 'lsn_location' ); -+ _delete_priv_attr( 'recover_master' ); ++ _delete_priv_attr( 'recover_primary' ); + _delete_priv_attr( 'nodes' ); + _delete_priv_attr( 'cancel_switchover' ); + ++ # whatever the result, it is ignored by pacemaker. + return $OCF_SUCCESS; +} + +# This is called before a demote occurs. +sub pgsql_notify_pre_demote { + my $rc; -+ my %cdata; + + # do nothing if the local node will not be demoted + return $OCF_SUCCESS unless scalar @@ -3022,12 +3041,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + $rc = pgsql_monitor(); + -+ # do nothing if this is not a master recovery -+ return $OCF_SUCCESS unless _is_master_recover( $nodename ) ++ # do nothing if this is not a primary recovery ++ return $OCF_SUCCESS unless _is_primary_recover( $nodename ) + and $rc == $OCF_FAILED_MASTER; + -+ # in case of master crash, we need to detect if the CRM tries to recover -+ # the master clone. The usual transition is to do: ++ # in case of primary crash, we need to detect if the CRM tries to recover ++ # the primary. The usual transition is to do: + # demote->stop->start->promote + # + # There are multiple flaws with this transition: @@ -3040,18 +3059,26 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + # If it success, at least it will be demoted correctly with a normal + # status. If it fails, it will be catched up in next steps. + -+ ocf_log( 'info', 'Trying to start failing master "%s"...', ++ ocf_log( 'info', 'Trying to start failing primary "%s"', + $OCF_RESOURCE_INSTANCE ); + + # Either the instance managed to start or it couldn't. -+ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't ++ # We rely on the pg_ctl '-w' switch to take care of this. If it couldn't + # start, this error will be catched up later during the various checks -+ _pg_ctl_start(); ++ if( _pg_ctl_start() == 0 ) { ++ my %cdata = _get_controldata(); + -+ %cdata = _get_controldata(); ++ ocf_log( 'info', 'Recovery of %s succeed', $OCF_RESOURCE_INSTANCE ); ++ ocf_log( 'info', 'State is "%s" after recovery attempt', ++ $cdata{'state'} ); ++ } ++ else { ++ ocf_log( 'err', 'Could not recover failing primary %s', ++ $OCF_RESOURCE_INSTANCE ); ++ } + -+ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); + ++ # whatever the result, it is ignored by pacemaker. + return $OCF_SUCCESS; +} + @@ -3066,14 +3093,14 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + $rc = _controldata_to_ocf(); + -+ # do nothing if this is not a slave recovery -+ return $OCF_SUCCESS unless _is_slave_recover( $nodename ) ++ # do nothing if this is not a standby recovery ++ return $OCF_SUCCESS unless _is_standby_recover( $nodename ) + and $rc == $OCF_RUNNING_SLAVE; + -+ # in case of slave crash, we need to detect if the CRM tries to recover -+ # the slaveclone. The usual transition is to do: stop->start ++ # in case of standby crash, we need to detect if the CRM tries to recover ++ # it. The usual transition is to do: stop->start + # -+ # This transition can no twork because the instance is in ++ # This transition can not work because the instance is in + # OCF_ERR_GENERIC step. So the stop action will fail, leading most + # probably to fencing action. + # @@ -3081,7 +3108,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + # If it success, at least it will be stopped correctly with a normal + # status. If it fails, it will be catched up in next steps. + -+ ocf_log( 'info', 'Trying to start failing slave "%s"...', ++ ocf_log( 'info', 'Trying to start failing standby "%s"...', + $OCF_RESOURCE_INSTANCE ); + + # Either the instance managed to start or it couldn't. @@ -3093,6 +3120,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + + ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} ); + ++ # whatever the result, it is ignored by pacemaker. + return $OCF_SUCCESS; +} + @@ -3116,6 +3144,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms + elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() } + } + ++ # whatever the result, it is ignored by pacemaker. + return $OCF_SUCCESS; +} + @@ -3225,7 +3254,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms +=cut diff --color -uNr a/paf_LICENSE b/paf_LICENSE --- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100 -+++ b/paf_LICENSE 2021-04-14 09:16:39.083555835 +0200 ++++ b/paf_LICENSE 2023-01-04 12:25:21.721889640 +0100 @@ -0,0 +1,19 @@ +Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault. + @@ -3248,7 +3277,7 @@ diff --color -uNr a/paf_LICENSE b/paf_LICENSE + diff --color -uNr a/paf_README.md b/paf_README.md --- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100 -+++ b/paf_README.md 2021-04-14 09:18:57.450968048 +0200 ++++ b/paf_README.md 2023-01-04 12:25:21.721889640 +0100 @@ -0,0 +1,86 @@ +# PostgreSQL Automatic Failover + diff --git a/resource-agents.spec b/resource-agents.spec new file mode 100644 index 0000000..5691fd9 --- /dev/null +++ b/resource-agents.spec @@ -0,0 +1,644 @@ +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. +# + +# Below is the script used to generate a new source file +# from the resource-agent upstream git repo. +# +# TAG=$(git log --pretty="format:%h" -n 1) +# distdir="ClusterLabs-resource-agents-${TAG}" +# TARFILE="${distdir}.tar.gz" +# rm -rf $TARFILE $distdir +# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE +# + +%global upstream_prefix ClusterLabs-resource-agents +%global upstream_version 56e76b01 + +# Whether this platform defaults to using systemd as an init system +# (needs to be evaluated prior to BuildRequires being enumerated and +# installed as it's intended to conditionally select some of these, and +# for that there are only few indicators with varying reliability: +# - presence of systemd-defined macros (when building in a full-fledged +# environment, which is not the case with ordinary mock-based builds) +# - systemd-aware rpm as manifested with the presence of particular +# macro (rpm itself will trivially always be present when building) +# - existence of /usr/lib/os-release file, which is something heavily +# propagated by systemd project +# - when not good enough, there's always a possibility to check +# particular distro-specific macros (incl. version comparison) +%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ + } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ + } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) + +# determine the ras-set to process based on configure invokation +%bcond_with rgmanager +%bcond_without linuxha + +Name: resource-agents +Summary: Open Source HA Reusable Cluster Resource Scripts +Version: 4.16.0 +Release: 9%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +License: GPL-2.0-or-later AND LGPL-2.1-or-later +URL: https://github.com/ClusterLabs/resource-agents +Source0: %{upstream_prefix}-%{upstream_version}.tar.gz +Patch0: pgsqlms-ra.patch +Patch1: RHEL-66293-1-aws-agents-reuse-imds-token-until-it-expires.patch +Patch2: RHEL-66293-2-aws-agents-reuse-imds-token-improvements.patch +Patch3: RHEL-68740-awsvip-add-interface-parameter.patch +Patch4: RHEL-72954-1-openstack-cinder-volume-wait-for-volume-to-be-available.patch +Patch5: RHEL-72954-2-openstack-cinder-volume-fix-detach-not-working-during-start-action.patch +Patch6: RHEL-75574-1-all-agents-use-grep-E-F.patch +Patch7: RHEL-75574-2-ocf-binaries-add-FGREP.patch +Patch8: RHEL-76007-Filesystem-dont-report-warnings-when-creating-resource.patch +Patch9: RHEL-76037-1-storage-mon-remove-unused-variables.patch +Patch10: RHEL-76037-2-storage-mon-fix-daemon-mode-bug-that-caused-delayed-initial-score.patch +Patch11: RHEL-76037-3-storage-mon-only-use-underscores-in-functions.patch +Patch12: RHEL-76037-4-storage-mon-check-if-daemon-is-already-running.patch +Patch13: RHEL-76037-5-storage-mon-log-storage_mon-is-already-running-in-start-action.patch +Patch14: RHEL-73689-1-ocf-shellfuncs-fix-syntax-error-in-crm_mon_no_validation.patch +Patch15: RHEL-73689-2-ocf-shellfuncs-add-missing-variable-in-crm_mon_no_validation.patch +Patch16: RHEL-79822-1-portblock-fix-version-detection.patch +Patch17: RHEL-79822-2-portblock-use-ocf_log-for-logging.patch + +# bundled ha-cloud-support libs +Patch500: ha-cloud-support-aliyun.patch +Patch501: ha-cloud-support-gcloud.patch + +Obsoletes: heartbeat-resources <= %{version} +Provides: heartbeat-resources = %{version} + +# Build dependencies +BuildRequires: make +BuildRequires: automake autoconf pkgconfig gcc +BuildRequires: perl +BuildRequires: libxslt glib2-devel libqb-devel +BuildRequires: systemd +BuildRequires: which + +%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version} +BuildRequires: python3-devel +%else +BuildRequires: python-devel +%endif + +# for pgsqlms +BuildRequires: perl-devel perl-English perl-FindBin + +%ifarch x86_64 +BuildRequires: ha-cloud-support +%endif + +%if 0%{?fedora} || 0%{?centos} || 0%{?rhel} +BuildRequires: docbook-style-xsl docbook-dtds +%if 0%{?rhel} == 0 +BuildRequires: libnet-devel +%endif +%endif + +%if 0%{?suse_version} +BuildRequires: libnet-devel +%if 0%{?suse_version} > 1500 +BuildRequires: cluster-glue-devel +%else +BuildRequires: libglue-devel +%endif +BuildRequires: libxslt docbook_4 docbook-xsl-stylesheets +%endif + +# dependencies for powervs-subnet +BuildRequires: python3-requests python3-urllib3 + +## Runtime deps +# system tools shared by several agents +Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk +Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat +Requires: /bin/mount +%if 0%{?suse_version} +Requires: /usr/bin/fuser +%else +Requires: /usr/sbin/fuser +%endif + +# Filesystem / fs.sh / netfs.sh +%if 0%{?fedora} > 39 || 0%{?rhel} > 9 || 0%{?suse_version} +Requires: /usr/sbin/fsck +%else +Requires: /sbin/fsck +%endif +Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4 +Requires: /usr/sbin/fsck.xfs +%if 0%{?fedora} > 40 || 0%{?rhel} > 9 || 0%{?suse_version} +Requires: /usr/sbin/mount.nfs /usr/sbin/mount.nfs4 +%else +Requires: /sbin/mount.nfs /sbin/mount.nfs4 +%endif +%if (0%{?fedora} && 0%{?fedora} < 33) || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version} +%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8) +Requires: /usr/sbin/mount.cifs +%else +Recommends: /usr/sbin/mount.cifs +%endif +%endif + +# IPaddr2 +Requires: /sbin/ip + +# LVM / lvm.sh +Requires: /usr/sbin/lvm + +# nfsserver / netfs.sh +%if 0%{?fedora} > 40 || 0%{?rhel} > 9 || 0%{?suse_version} +Requires: /usr/sbin/rpc.statd +%else +Requires: /sbin/rpc.statd +%endif +Requires: /usr/sbin/rpc.nfsd /usr/sbin/rpc.mountd + +# ocf.py +Requires: python3 + +# ethmonitor +Requires: bc + +# rgmanager +%if %{with rgmanager} +# ip.sh +Requires: /usr/sbin/ethtool +Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 + +# nfsexport.sh +%if 0%{?fedora} > 39 || 0%{?rhel} > 9 +Requires: /usr/sbin/findfs +Requires: /usr/sbin/quotaon /usr/sbin/quotacheck +%else +Requires: /sbin/findfs +Requires: /sbin/quotaon /sbin/quotacheck +%endif +%endif + +%description +A set of scripts to interface with several services to operate in a +High Availability environment for both Pacemaker and rgmanager +service managers. + +%ifarch x86_64 +%package cloud +License: GPLv2+ and LGPLv2+ +Summary: Cloud resource agents +Requires: %{name} = %{version}-%{release} +Requires: ha-cloud-support +Requires: python3-requests python3-urllib3 +Requires: socat +Provides: resource-agents-aliyun +Obsoletes: resource-agents-aliyun <= %{version} +Provides: resource-agents-gcp +Obsoletes: resource-agents-gcp <= %{version} + +%description cloud +Cloud resource agents allows Cloud instances to be managed +in a cluster environment. +%endif + +%package paf +License: PostgreSQL +Summary: PostgreSQL Automatic Failover (PAF) resource agent +Requires: %{name} = %{version}-%{release} +Requires: perl-interpreter perl-lib perl-English perl-FindBin + +%description paf +PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL +databases to be managed in a cluster environment. + +%prep +%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos} == 0 && 0%{?rhel} == 0 +%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} +exit 1 +%endif +%setup -q -n %{upstream_prefix}-%{upstream_version} +%patch -p1 -P 0 +%patch -p1 -P 1 +%patch -p1 -P 2 +%patch -p1 -P 3 +%patch -p1 -P 4 +%patch -p1 -P 5 +%patch -p1 -P 6 +%patch -p1 -P 7 +%patch -p1 -P 8 +%patch -p1 -P 9 +%patch -p1 -P 10 +%patch -p1 -P 11 +%patch -p1 -P 12 +%patch -p1 -P 13 +%patch -p1 -P 14 +%patch -p1 -P 15 +%patch -p1 -P 16 +%patch -p1 -P 17 + +# bundled ha-cloud-support libs +%patch -p1 -P 500 +%patch -p1 -P 501 + +chmod 755 heartbeat/pgsqlms + +%build +sed -i -e "s/#PYTHON3_VERSION#/%{python3_version}/" heartbeat/gcp* + +if [ ! -f configure ]; then + ./autogen.sh +fi + +%if 0%{?fedora} >= 11 || 0%{?centos} > 5 || 0%{?rhel} > 5 +CFLAGS="$(echo '%{optflags}')" +%global conf_opt_fatal "--enable-fatal-warnings=no" +%else +CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" +%global conf_opt_fatal "--enable-fatal-warnings=yes" +%endif + +%if %{with rgmanager} +%global rasset rgmanager +%endif +%if %{with linuxha} +%global rasset linux-ha +%endif +%if %{with rgmanager} && %{with linuxha} +%global rasset all +%endif + +export CFLAGS + +%configure \ +%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version} + PYTHON="%{__python3}" \ +%endif +%ifarch x86_64 + PYTHONPATH="%{_usr}/lib/fence-agents/support/google/lib/python%{python3_version}/site-packages" \ +%endif + %{conf_opt_fatal} \ +%if %{defined _unitdir} + SYSTEMD_UNIT_DIR=%{_unitdir} \ +%endif +%if %{defined _tmpfilesdir} + SYSTEMD_TMPFILES_DIR=%{_tmpfilesdir} \ + --with-rsctmpdir=/run/resource-agents \ +%endif + --with-pkg-name=%{name} \ + --with-ras-set=%{rasset} + +make %{_smp_mflags} + +%install +rm -rf %{buildroot} +make install DESTDIR=%{buildroot} + +## tree fixup +# remove docs (there is only one and they should come from doc sections in files) +rm -rf %{buildroot}/usr/share/doc/resource-agents + +%files +%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog +%if %{with linuxha} +%doc heartbeat/README.galera +%doc doc/README.webapps +%doc %{_datadir}/%{name}/ra-api-1.dtd +%doc %{_datadir}/%{name}/metadata.rng +%endif + +%if %{with rgmanager} +%{_datadir}/cluster +%{_sbindir}/rhev-check.sh +%endif + +%if %{with linuxha} +%dir %{_usr}/lib/ocf +%dir %{_usr}/lib/ocf/resource.d +%dir %{_usr}/lib/ocf/lib + +%{_usr}/lib/ocf/lib/heartbeat + +%{_usr}/lib/ocf/resource.d/heartbeat + +%{_datadir}/pkgconfig/%{name}.pc + +%if %{defined _unitdir} +%{_unitdir}/resource-agents-deps.target +%endif +%if %{defined _tmpfilesdir} +%{_tmpfilesdir}/%{name}.conf +%endif + +%dir %{_datadir}/%{name} +%dir %{_datadir}/%{name}/ocft +%{_datadir}/%{name}/ocft/configs +%{_datadir}/%{name}/ocft/caselib +%{_datadir}/%{name}/ocft/README +%{_datadir}/%{name}/ocft/README.zh_CN +%{_datadir}/%{name}/ocft/helpers.sh +%exclude %{_datadir}/%{name}/ocft/runocft +%exclude %{_datadir}/%{name}/ocft/runocft.prereq + +%{_sbindir}/ocf-tester +%{_sbindir}/ocft + +%{_includedir}/heartbeat + +%if %{defined _tmpfilesdir} +%dir %attr (1755, root, root) /run/resource-agents +%else +%dir %attr (1755, root, root) %{_var}/run/resource-agents +%endif + +%{_mandir}/man7/*.7* +%{_mandir}/man8/ocf-tester.8* + +### +# Supported, but in another sub package +### +%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/aws* +%exclude /usr/lib/ocf/resource.d/heartbeat/azure-* +%exclude %{_mandir}/man7/*aliyun-vpc-move-ip* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp* +%exclude %{_mandir}/man7/*gcp* +%exclude /usr/lib/ocf/resource.d/heartbeat/powervs-subnet +%exclude %{_mandir}/man7/*powervs-subnet* +%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms +%exclude %{_mandir}/man7/*pgsqlms* +%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +### +# Moved to separate packages +### +%exclude /usr/lib/ocf/resource.d/heartbeat/SAP* +%exclude /usr/lib/ocf/lib/heartbeat/sap* +%exclude %{_mandir}/man7/*SAP* + +### +# Unsupported +### +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/IPaddr +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker* +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dovecot +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dummypy +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88 +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-macvlan +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mdraid +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/nvmet-* +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ocivip +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/osceip +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rabbitmq-server-ha +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/smb-share +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd +%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver +%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_docker*.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dovecot.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_dummypy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iface-macvlan.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mdraid.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_nvmet-*.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ocivip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_osceip.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rabbitmq-server-ha.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_smb-share.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz +%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz + +### +# Other excluded files. +### +# ldirectord is not supported +%exclude /etc/ha.d/resource.d/ldirectord +%exclude %{_sysconfdir}/rc.d/init.d/ldirectord +%exclude %{_unitdir}/ldirectord.service +%exclude /etc/logrotate.d/ldirectord +%exclude /usr/sbin/ldirectord +%exclude %{_mandir}/man8/ldirectord.8.gz + +# For compatability with pre-existing agents +%dir %{_sysconfdir}/ha.d +%{_sysconfdir}/ha.d/shellfuncs + +%{_libexecdir}/heartbeat +%endif + +%ifarch x86_64 +%files cloud +/usr/lib/ocf/resource.d/heartbeat/aliyun-* +%{_mandir}/man7/*aliyun-* +/usr/lib/ocf/resource.d/heartbeat/aws* +%{_mandir}/man7/*aws* +/usr/lib/ocf/resource.d/heartbeat/azure-* +%{_mandir}/man7/*azure-* +/usr/lib/ocf/resource.d/heartbeat/gcp-* +%{_mandir}/man7/*gcp-* +/usr/lib/ocf/resource.d/heartbeat/powervs-subnet +%{_mandir}/man7/*powervs-subnet* +%exclude /usr/lib/ocf/resource.d/heartbeat/azure-events +%exclude %{_mandir}/man7/*azure-events.7* +%exclude /usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-ip +%exclude %{_mandir}/man7/*gcp-vpc-move-ip* +%endif + +%files paf +%doc paf_README.md +%license paf_LICENSE +%defattr(-,root,root) +%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms +%{_mandir}/man7/*pgsqlms* +%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm + +%changelog +* Thu Feb 20 2025 Oyvind Albrigtsen - 4.16.0-9 +- portblock: fix iptables version detection +- Remove unsupported agents + + Resolves: RHEL-79822, RHEL-80293 + + +* Tue Feb 11 2025 Oyvind Albrigtsen - 4.16.0-8 +- ocf-shellfuncs: fix syntax error in crm_mon_no_validation() + + Resolves: RHEL-73689 + +* Mon Jan 27 2025 Oyvind Albrigtsen - 4.16.0-7 +- storage-mon: fix daemon mode bug that caused delayed initial score + + Resolves: RHEL-76037 + +* Thu Jan 23 2025 Oyvind Albrigtsen - 4.16.0-3 +- openstack-cinder-volume: wait for volume to be available +- All agents: use grep -E/-F +- Filesystem: dont report warnings when creating resource + + Resolves: RHEL-72954, RHEL-75574, RHEL-76007 + +* Tue Nov 26 2024 Oyvind Albrigtsen - 4.16.0-1 +- Rebase to resource-agents 4.16.0 upstream release +- AWS agents: reuse IMDS token until it expires +- awsvip: add interface parameter +- ethmonitor: add bc dependency +- build: use /usr/sbin path for nfs-utils dependencies + + Resolves: RHEL-65331, RHEL-66293, RHEL-68740, RHEL-53615, RHEL-68840 + +* Tue Oct 29 2024 Troy Dawson - 4.15.1-1.1 +- Bump release for October 2024 mass rebuild: + Resolves: RHEL-64018 + +* Fri Jul 26 2024 Oyvind Albrigtsen - 4.15.1-1 +- Rebase to resource-agents 4.15.1 upstream release +- IPaddr2: change default for lvs_ipv6_addrlabel to true to avoid + last added IP becoming src IP +- powervs-subnet: new resource agent + + Resolves: RHEL-50378, RHEL-46557, RHEL-50380 + +* Thu Jun 27 2024 Oyvind Albrigtsen - 4.13.0-6 +- apache: prefer curl due to wget2 issues, and dont use -L for wget2 + + Resolves: RHEL-40720 + +* Mon Jun 24 2024 Troy Dawson - 4.13.0-4.1 +- Bump release for June 2024 mass rebuild + +* Wed Jun 12 2024 Oyvind Albrigtsen - 4.13.0-4 +- cloud agents: set support library path +- pgsqlms: add to -paf subpackage + +* Tue Jan 30 2024 Zbigniew Jedrzejewski-Szmek - 4.13.0-2.3 +- Replace /sbin by /usr/sbin in some paths so that the package remains + installable without full filepath metadata (rhbz#2229951) + +* Fri Jan 26 2024 Fedora Release Engineering - 4.13.0-2.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Mon Jan 22 2024 Fedora Release Engineering - 4.13.0-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Wed Jan 10 2024 Oyvind Albrigtsen - 4.13.0-2 +- configure: fix "C preprocessor "gcc -E" fails sanity check" error + with autoconf 2.72+ + + Resolves: rhbz#2256836 + +* Wed Oct 11 2023 Oyvind Albrigtsen - 4.13.0-1 +- Rebase to resource-agents 4.13.0 upstream release. diff --git a/sources b/sources new file mode 100644 index 0000000..cc83394 --- /dev/null +++ b/sources @@ -0,0 +1 @@ +SHA512 (ClusterLabs-resource-agents-56e76b01.tar.gz) = 14fded6bebcc9ada33c0c3db4fee45d6efc5d33865c52c1cf76b4485180cf9a76bae3456d84ba32635a2214ed1ee4010ca8704c844cebf4093ebc0843081919e