Compare commits
No commits in common. "c10s" and "c8" have entirely different histories.
14
.gitignore
vendored
14
.gitignore
vendored
@ -1,2 +1,12 @@
|
|||||||
/*
|
SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||||
!/*.patch
|
SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||||
|
SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||||
|
SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||||
|
SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||||
|
SOURCES/colorama-0.3.3.tar.gz
|
||||||
|
SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||||
|
SOURCES/httplib2-0.20.4.tar.gz
|
||||||
|
SOURCES/pycryptodome-3.20.0.tar.gz
|
||||||
|
SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||||
|
SOURCES/pyroute2-0.4.13.tar.gz
|
||||||
|
SOURCES/urllib3-1.26.18.tar.gz
|
||||||
|
12
.resource-agents.metadata
Normal file
12
.resource-agents.metadata
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
dfc65f4cac3f95026b2f5674019814a527333004 SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||||
|
306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||||
|
0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||||
|
c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||||
|
f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||||
|
0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz
|
||||||
|
81f039cf075e9c8b70d5af99c189296a9e031de3 SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||||
|
7caf4412d9473bf17352316249a8133fa70b7e37 SOURCES/httplib2-0.20.4.tar.gz
|
||||||
|
c55d177e9484d974c95078d4ae945f89ba2c7251 SOURCES/pycryptodome-3.20.0.tar.gz
|
||||||
|
c8307f47e3b75a2d02af72982a2dfefa3f56e407 SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||||
|
147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz
|
||||||
|
84e2852d8da1655373f7ce5e7d5d3e256b62b4e4 SOURCES/urllib3-1.26.18.tar.gz
|
@ -1,455 +0,0 @@
|
|||||||
From 61cec34a754017537c61e79cd1212f2688c32429 Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
|
||||||
Date: Mon, 4 Nov 2024 12:19:10 +0530
|
|
||||||
Subject: [PATCH 1/7] Introduce a new shell function to reuse IMDS token
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/ocf-shellfuncs.in | 31 +++++++++++++++++++++++++++++++
|
|
||||||
1 file changed, 31 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
|
||||||
index 5c4bb3264..0c4632cf9 100644
|
|
||||||
--- a/heartbeat/ocf-shellfuncs.in
|
|
||||||
+++ b/heartbeat/ocf-shellfuncs.in
|
|
||||||
@@ -1111,3 +1111,34 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
|
||||||
if ocf_is_true "$HA_use_logd"; then
|
|
||||||
: ${HA_LOGD:=yes}
|
|
||||||
fi
|
|
||||||
+
|
|
||||||
+# File to store the token and timestamp
|
|
||||||
+TOKEN_FILE="/tmp/.imds_token"
|
|
||||||
+TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
+TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
+
|
|
||||||
+# Function to fetch a new token
|
|
||||||
+fetch_new_token() {
|
|
||||||
+ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME")
|
|
||||||
+ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
|
||||||
+ echo "$TOKEN"
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Function to retrieve or renew the token
|
|
||||||
+get_token() {
|
|
||||||
+ if [[ -f "$TOKEN_FILE" ]]; then
|
|
||||||
+ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
|
||||||
+ CURRENT_TIME=$(date +%s)
|
|
||||||
+ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
|
||||||
+
|
|
||||||
+ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
|
||||||
+ # Token is still valid
|
|
||||||
+ echo "$STORED_TOKEN"
|
|
||||||
+ return
|
|
||||||
+ fi
|
|
||||||
+ fi
|
|
||||||
+ # Fetch a new token if not valid
|
|
||||||
+ fetch_new_token
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+
|
|
||||||
|
|
||||||
From 00629fa44cb7a8dd1045fc8cad755e1d0c808476 Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
|
||||||
Date: Mon, 4 Nov 2024 12:21:18 +0530
|
|
||||||
Subject: [PATCH 2/7] Utilize the get_token function to reuse the token
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws-vpc-move-ip | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
|
||||||
index 6115e5ba8..fbeb2ee64 100755
|
|
||||||
--- a/heartbeat/aws-vpc-move-ip
|
|
||||||
+++ b/heartbeat/aws-vpc-move-ip
|
|
||||||
@@ -270,7 +270,7 @@ ec2ip_validate() {
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
|
||||||
+ TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
|
|
||||||
From 36126cdcb90ad617ecfce03d986550907732aa4f Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
|
||||||
Date: Mon, 4 Nov 2024 12:22:16 +0530
|
|
||||||
Subject: [PATCH 3/7] Utilize to get_token function to reuse the token
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/awsvip | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
|
||||||
index f2b238a0f..ca19ac086 100755
|
|
||||||
--- a/heartbeat/awsvip
|
|
||||||
+++ b/heartbeat/awsvip
|
|
||||||
@@ -266,7 +266,7 @@ if [ -n "${OCF_RESKEY_region}" ]; then
|
|
||||||
AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
|
||||||
fi
|
|
||||||
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
|
||||||
-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
|
||||||
+TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
|
|
||||||
From dcd0050df5ba94905bc71d38b05cbb93f5687b61 Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <beer18317@gmail.com>
|
|
||||||
Date: Mon, 4 Nov 2024 20:05:33 +0530
|
|
||||||
Subject: [PATCH 4/7] Move token renewal function to aws.sh for reuse in AWS
|
|
||||||
agent scripts
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/Makefile.am | 1 +
|
|
||||||
heartbeat/aws-vpc-move-ip | 1 +
|
|
||||||
heartbeat/aws-vpc-route53.in | 3 ++-
|
|
||||||
heartbeat/aws.sh | 46 ++++++++++++++++++++++++++++++++++++
|
|
||||||
heartbeat/awseip | 3 ++-
|
|
||||||
heartbeat/awsvip | 1 +
|
|
||||||
heartbeat/ocf-shellfuncs.in | 33 +-------------------------
|
|
||||||
7 files changed, 54 insertions(+), 34 deletions(-)
|
|
||||||
create mode 100644 heartbeat/aws.sh
|
|
||||||
|
|
||||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
|
||||||
index 409847970..655740f14 100644
|
|
||||||
--- a/heartbeat/Makefile.am
|
|
||||||
+++ b/heartbeat/Makefile.am
|
|
||||||
@@ -218,6 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \
|
|
||||||
ocf-rarun \
|
|
||||||
ocf-distro \
|
|
||||||
apache-conf.sh \
|
|
||||||
+ aws.sh \
|
|
||||||
http-mon.sh \
|
|
||||||
sapdb-nosha.sh \
|
|
||||||
sapdb.sh \
|
|
||||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
|
||||||
index fbeb2ee64..f4b0492f2 100755
|
|
||||||
--- a/heartbeat/aws-vpc-move-ip
|
|
||||||
+++ b/heartbeat/aws-vpc-move-ip
|
|
||||||
@@ -33,6 +33,7 @@
|
|
||||||
|
|
||||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
|
||||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
|
||||||
|
|
||||||
# Defaults
|
|
||||||
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
|
||||||
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
|
||||||
index eba2ed95c..f7e756782 100644
|
|
||||||
--- a/heartbeat/aws-vpc-route53.in
|
|
||||||
+++ b/heartbeat/aws-vpc-route53.in
|
|
||||||
@@ -43,6 +43,7 @@
|
|
||||||
|
|
||||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
|
||||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
|
||||||
|
|
||||||
# Defaults
|
|
||||||
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
|
||||||
@@ -377,7 +378,7 @@ r53_monitor() {
|
|
||||||
_get_ip() {
|
|
||||||
case $OCF_RESKEY_ip in
|
|
||||||
local|public)
|
|
||||||
- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
|
||||||
+ TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
new file mode 100644
|
|
||||||
index 000000000..fc557109c
|
|
||||||
--- /dev/null
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -0,0 +1,46 @@
|
|
||||||
+#!/bin/sh
|
|
||||||
+#
|
|
||||||
+#
|
|
||||||
+# AWS Helper Scripts
|
|
||||||
+#
|
|
||||||
+#
|
|
||||||
+
|
|
||||||
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
|
||||||
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
+
|
|
||||||
+# Defaults
|
|
||||||
+OCF_RESKEY_curl_retries_default="3"
|
|
||||||
+OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
+
|
|
||||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
+
|
|
||||||
+# Function to enable reusable IMDS token retrieval for efficient repeated access
|
|
||||||
+# File to store the token and timestamp
|
|
||||||
+TOKEN_FILE="/tmp/.imds_token"
|
|
||||||
+TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
+TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
+
|
|
||||||
+# Function to fetch a new token
|
|
||||||
+fetch_new_token() {
|
|
||||||
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token")
|
|
||||||
+ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
|
||||||
+ echo "$TOKEN"
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+# Function to retrieve or renew the token
|
|
||||||
+get_token() {
|
|
||||||
+ if [ -f "$TOKEN_FILE" ]; then
|
|
||||||
+ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
|
||||||
+ CURRENT_TIME=$(date +%s)
|
|
||||||
+ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
|
||||||
+
|
|
||||||
+ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
|
||||||
+ # Token is still valid
|
|
||||||
+ echo "$STORED_TOKEN"
|
|
||||||
+ return
|
|
||||||
+ fi
|
|
||||||
+ fi
|
|
||||||
+ # Fetch a new token if not valid
|
|
||||||
+ fetch_new_token
|
|
||||||
+}
|
|
||||||
\ No newline at end of file
|
|
||||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
|
||||||
index ffb6223a1..049c2e566 100755
|
|
||||||
--- a/heartbeat/awseip
|
|
||||||
+++ b/heartbeat/awseip
|
|
||||||
@@ -38,6 +38,7 @@
|
|
||||||
|
|
||||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
|
||||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
|
|
||||||
@@ -306,7 +307,7 @@ fi
|
|
||||||
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
|
|
||||||
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
|
||||||
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
|
||||||
-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
|
||||||
+TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
|
||||||
index ca19ac086..de67981d8 100755
|
|
||||||
--- a/heartbeat/awsvip
|
|
||||||
+++ b/heartbeat/awsvip
|
|
||||||
@@ -37,6 +37,7 @@
|
|
||||||
|
|
||||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
|
||||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
|
|
||||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
|
||||||
index 0c4632cf9..922c6ea45 100644
|
|
||||||
--- a/heartbeat/ocf-shellfuncs.in
|
|
||||||
+++ b/heartbeat/ocf-shellfuncs.in
|
|
||||||
@@ -1110,35 +1110,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
|
||||||
# pacemaker sets HA_use_logd, some others use HA_LOGD :/
|
|
||||||
if ocf_is_true "$HA_use_logd"; then
|
|
||||||
: ${HA_LOGD:=yes}
|
|
||||||
-fi
|
|
||||||
-
|
|
||||||
-# File to store the token and timestamp
|
|
||||||
-TOKEN_FILE="/tmp/.imds_token"
|
|
||||||
-TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
-TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
-
|
|
||||||
-# Function to fetch a new token
|
|
||||||
-fetch_new_token() {
|
|
||||||
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME")
|
|
||||||
- echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
|
||||||
- echo "$TOKEN"
|
|
||||||
-}
|
|
||||||
-
|
|
||||||
-# Function to retrieve or renew the token
|
|
||||||
-get_token() {
|
|
||||||
- if [[ -f "$TOKEN_FILE" ]]; then
|
|
||||||
- read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
|
||||||
- CURRENT_TIME=$(date +%s)
|
|
||||||
- ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
|
||||||
-
|
|
||||||
- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
|
||||||
- # Token is still valid
|
|
||||||
- echo "$STORED_TOKEN"
|
|
||||||
- return
|
|
||||||
- fi
|
|
||||||
- fi
|
|
||||||
- # Fetch a new token if not valid
|
|
||||||
- fetch_new_token
|
|
||||||
-}
|
|
||||||
-
|
|
||||||
-
|
|
||||||
+fi
|
|
||||||
\ No newline at end of file
|
|
||||||
|
|
||||||
From 9f7be201923c8eab1b121f2067ed74a69841cf8a Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <beer18317@gmail.com>
|
|
||||||
Date: Tue, 5 Nov 2024 19:12:34 +0530
|
|
||||||
Subject: [PATCH 5/7] Refactor to use common temp path and update shell syntax
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/Makefile.am | 2 +-
|
|
||||||
heartbeat/aws.sh | 4 ++--
|
|
||||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
|
||||||
index 655740f14..8352f3a3d 100644
|
|
||||||
--- a/heartbeat/Makefile.am
|
|
||||||
+++ b/heartbeat/Makefile.am
|
|
||||||
@@ -218,7 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \
|
|
||||||
ocf-rarun \
|
|
||||||
ocf-distro \
|
|
||||||
apache-conf.sh \
|
|
||||||
- aws.sh \
|
|
||||||
+ aws.sh \
|
|
||||||
http-mon.sh \
|
|
||||||
sapdb-nosha.sh \
|
|
||||||
sapdb.sh \
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
index fc557109c..c77f93b91 100644
|
|
||||||
--- a/heartbeat/aws.sh
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -17,7 +17,7 @@ OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
|
|
||||||
# Function to enable reusable IMDS token retrieval for efficient repeated access
|
|
||||||
# File to store the token and timestamp
|
|
||||||
-TOKEN_FILE="/tmp/.imds_token"
|
|
||||||
+TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
|
||||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ get_token() {
|
|
||||||
CURRENT_TIME=$(date +%s)
|
|
||||||
ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
|
||||||
|
|
||||||
- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
|
||||||
+ if [ "$ELAPSED_TIME" -lt "$((TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD))" ]; then
|
|
||||||
# Token is still valid
|
|
||||||
echo "$STORED_TOKEN"
|
|
||||||
return
|
|
||||||
|
|
||||||
From 4f61048064d1df3bebdb5c1441cf0020f213c01b Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <beer18317@gmail.com>
|
|
||||||
Date: Tue, 5 Nov 2024 19:30:15 +0530
|
|
||||||
Subject: [PATCH 6/7] Consolidate curl_retry and curl_sleep variable to a
|
|
||||||
single location in aws.sh
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws-vpc-move-ip | 4 ----
|
|
||||||
heartbeat/aws-vpc-route53.in | 4 ----
|
|
||||||
heartbeat/awseip | 4 ----
|
|
||||||
heartbeat/awsvip | 4 ----
|
|
||||||
4 files changed, 16 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
|
||||||
index f4b0492f2..3aa9ceb02 100755
|
|
||||||
--- a/heartbeat/aws-vpc-move-ip
|
|
||||||
+++ b/heartbeat/aws-vpc-move-ip
|
|
||||||
@@ -48,8 +48,6 @@ OCF_RESKEY_interface_default="eth0"
|
|
||||||
OCF_RESKEY_iflabel_default=""
|
|
||||||
OCF_RESKEY_monapi_default="false"
|
|
||||||
OCF_RESKEY_lookup_type_default="InstanceId"
|
|
||||||
-OCF_RESKEY_curl_retries_default="3"
|
|
||||||
-OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
|
||||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
|
||||||
@@ -63,8 +61,6 @@ OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
|
||||||
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
|
||||||
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
#######################################################################
|
|
||||||
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
|
||||||
index f7e756782..85c8de3c1 100644
|
|
||||||
--- a/heartbeat/aws-vpc-route53.in
|
|
||||||
+++ b/heartbeat/aws-vpc-route53.in
|
|
||||||
@@ -54,8 +54,6 @@ OCF_RESKEY_hostedzoneid_default=""
|
|
||||||
OCF_RESKEY_fullname_default=""
|
|
||||||
OCF_RESKEY_ip_default="local"
|
|
||||||
OCF_RESKEY_ttl_default=10
|
|
||||||
-OCF_RESKEY_curl_retries_default="3"
|
|
||||||
-OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
|
||||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
|
||||||
@@ -65,8 +63,6 @@ OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}}
|
|
||||||
: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}}
|
|
||||||
: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
cat <<-EOT
|
|
||||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
|
||||||
index 049c2e566..4b1c3bc6a 100755
|
|
||||||
--- a/heartbeat/awseip
|
|
||||||
+++ b/heartbeat/awseip
|
|
||||||
@@ -50,16 +50,12 @@ OCF_RESKEY_auth_type_default="key"
|
|
||||||
OCF_RESKEY_profile_default="default"
|
|
||||||
OCF_RESKEY_region_default=""
|
|
||||||
OCF_RESKEY_api_delay_default="3"
|
|
||||||
-OCF_RESKEY_curl_retries_default="3"
|
|
||||||
-OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
|
||||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
|
||||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
|
||||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
|
||||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
|
|
||||||
meta_data() {
|
|
||||||
cat <<END
|
|
||||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
|
||||||
index de67981d8..8c71e7fac 100755
|
|
||||||
--- a/heartbeat/awsvip
|
|
||||||
+++ b/heartbeat/awsvip
|
|
||||||
@@ -49,16 +49,12 @@ OCF_RESKEY_auth_type_default="key"
|
|
||||||
OCF_RESKEY_profile_default="default"
|
|
||||||
OCF_RESKEY_region_default=""
|
|
||||||
OCF_RESKEY_api_delay_default="3"
|
|
||||||
-OCF_RESKEY_curl_retries_default="3"
|
|
||||||
-OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
|
||||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
|
||||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
|
||||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
|
||||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
|
|
||||||
meta_data() {
|
|
||||||
cat <<END
|
|
||||||
|
|
||||||
From d451c5c595b08685f84ec85da96ae9cb4fc076fe Mon Sep 17 00:00:00 2001
|
|
||||||
From: harshkiprofile <beer18317@gmail.com>
|
|
||||||
Date: Tue, 5 Nov 2024 20:50:24 +0530
|
|
||||||
Subject: [PATCH 7/7] aws.sh needs to added to be symlinkstargets in
|
|
||||||
doc/man/Makefile.am
|
|
||||||
|
|
||||||
---
|
|
||||||
doc/man/Makefile.am | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
|
|
||||||
index ef7639bff..447f5cba3 100644
|
|
||||||
--- a/doc/man/Makefile.am
|
|
||||||
+++ b/doc/man/Makefile.am
|
|
||||||
@@ -42,7 +42,7 @@ radir = $(abs_top_builddir)/heartbeat
|
|
||||||
# required for out-of-tree build
|
|
||||||
symlinkstargets = \
|
|
||||||
ocf-distro ocf.py ocf-rarun ocf-returncodes \
|
|
||||||
- findif.sh apache-conf.sh http-mon.sh mysql-common.sh \
|
|
||||||
+ findif.sh apache-conf.sh aws.sh http-mon.sh mysql-common.sh \
|
|
||||||
nfsserver-redhat.sh openstack-common.sh ora-common.sh
|
|
||||||
|
|
||||||
preptree:
|
|
@ -1,161 +0,0 @@
|
|||||||
From cc5ffa5e599c974c426e93faa821b342e96b916d Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
|
||||||
Date: Mon, 11 Nov 2024 12:46:27 +0100
|
|
||||||
Subject: [PATCH 1/2] aws.sh: chmod 600 $TOKEN_FILE, add get_instance_id() with
|
|
||||||
DMI support, and use get_instance_id() in AWS agents
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws-vpc-move-ip | 2 +-
|
|
||||||
heartbeat/aws.sh | 30 +++++++++++++++++++++++++++---
|
|
||||||
heartbeat/awseip | 2 +-
|
|
||||||
heartbeat/awsvip | 2 +-
|
|
||||||
4 files changed, 30 insertions(+), 6 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
|
||||||
index 3aa9ceb02..09ae68b57 100755
|
|
||||||
--- a/heartbeat/aws-vpc-move-ip
|
|
||||||
+++ b/heartbeat/aws-vpc-move-ip
|
|
||||||
@@ -269,7 +269,7 @@ ec2ip_validate() {
|
|
||||||
|
|
||||||
TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
- EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
+ EC2_INSTANCE_ID=$(get_instance_id)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
|
|
||||||
if [ -z "${EC2_INSTANCE_ID}" ]; then
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
index c77f93b91..9cd343c16 100644
|
|
||||||
--- a/heartbeat/aws.sh
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -9,8 +9,8 @@
|
|
||||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
|
||||||
|
|
||||||
# Defaults
|
|
||||||
-OCF_RESKEY_curl_retries_default="3"
|
|
||||||
-OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
+OCF_RESKEY_curl_retries_default="4"
|
|
||||||
+OCF_RESKEY_curl_sleep_default="3"
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
|
||||||
: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
|
||||||
@@ -20,11 +20,13 @@ OCF_RESKEY_curl_sleep_default="1"
|
|
||||||
TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
|
||||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
+DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances.
|
|
||||||
|
|
||||||
# Function to fetch a new token
|
|
||||||
fetch_new_token() {
|
|
||||||
TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token")
|
|
||||||
echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
|
||||||
+ chmod 600 "$TOKEN_FILE"
|
|
||||||
echo "$TOKEN"
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -43,4 +45,26 @@ get_token() {
|
|
||||||
fi
|
|
||||||
# Fetch a new token if not valid
|
|
||||||
fetch_new_token
|
|
||||||
-}
|
|
||||||
\ No newline at end of file
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+get_instance_id() {
|
|
||||||
+ local INSTANCE_ID
|
|
||||||
+
|
|
||||||
+ # Try to get the EC2 instance ID from DMI first before falling back to IMDS.
|
|
||||||
+ ocf_log debug "EC2: Attempt to get EC2 Instance ID from local file."
|
|
||||||
+ if [ -r "$DMI_FILE" ] && [ -s "$DMI_FILE" ]; then
|
|
||||||
+ INSTANCE_ID="$(cat "$DMI_FILE")"
|
|
||||||
+ case "$INSTANCE_ID" in
|
|
||||||
+ i-0*) echo "$INSTANCE_ID"; return "$OCF_SUCCESS" ;;
|
|
||||||
+ esac
|
|
||||||
+ fi
|
|
||||||
+
|
|
||||||
+ INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
+ if [ $? -ne 0 ]; then
|
|
||||||
+ ocf_exit_reason "Failed to get EC2 Instance ID"
|
|
||||||
+ exit $OCF_ERR_GENERIC
|
|
||||||
+ fi
|
|
||||||
+
|
|
||||||
+ echo "$INSTANCE_ID"
|
|
||||||
+ return "$OCF_SUCCESS"
|
|
||||||
+}
|
|
||||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
|
||||||
index 4b1c3bc6a..7f38376dc 100755
|
|
||||||
--- a/heartbeat/awseip
|
|
||||||
+++ b/heartbeat/awseip
|
|
||||||
@@ -305,7 +305,7 @@ ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
|
||||||
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
|
||||||
TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
+INSTANCE_ID=$(get_instance_id)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
|
|
||||||
case $__OCF_ACTION in
|
|
||||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
|
||||||
index 8c71e7fac..0856ac5e4 100755
|
|
||||||
--- a/heartbeat/awsvip
|
|
||||||
+++ b/heartbeat/awsvip
|
|
||||||
@@ -265,7 +265,7 @@ fi
|
|
||||||
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
|
||||||
TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
|
||||||
+INSTANCE_ID=$(get_instance_id)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
|
|
||||||
From b8d3ecc6a8ce4baf4b28d02978dd573728ccf5fa Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
|
||||||
Date: Mon, 18 Nov 2024 11:10:42 +0100
|
|
||||||
Subject: [PATCH 2/2] aws.sh/ocf-shellfuncs: add ability to fresh token if it's
|
|
||||||
invalid
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws.sh | 1 +
|
|
||||||
heartbeat/ocf-shellfuncs.in | 11 ++++++++++-
|
|
||||||
2 files changed, 11 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
index 9cd343c16..64f2e13a7 100644
|
|
||||||
--- a/heartbeat/aws.sh
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -18,6 +18,7 @@ OCF_RESKEY_curl_sleep_default="3"
|
|
||||||
# Function to enable reusable IMDS token retrieval for efficient repeated access
|
|
||||||
# File to store the token and timestamp
|
|
||||||
TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
|
||||||
+TOKEN_FUNC="fetch_new_token" # Used by curl_retry() if saved token is invalid
|
|
||||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
|
||||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
|
||||||
DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances.
|
|
||||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
|
||||||
index 922c6ea45..8e51fa3c8 100644
|
|
||||||
--- a/heartbeat/ocf-shellfuncs.in
|
|
||||||
+++ b/heartbeat/ocf-shellfuncs.in
|
|
||||||
@@ -697,6 +697,15 @@ curl_retry()
|
|
||||||
|
|
||||||
ocf_log debug "result: $result"
|
|
||||||
[ $rc -eq 0 ] && break
|
|
||||||
+ if [ -n "$TOKEN" ] && [ -n "$TOKEN_FILE" ] && \
|
|
||||||
+ [ -f "$TOKEN_FILE" ] && [ -n "$TOKEN_FUNC" ] && \
|
|
||||||
+ echo "$result" | grep -q "The requested URL returned error: 401$"; then
|
|
||||||
+ local OLD_TOKEN="$TOKEN"
|
|
||||||
+ ocf_log err "Token invalid. Getting new token."
|
|
||||||
+ TOKEN=$($TOKEN_FUNC)
|
|
||||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
+ args=$(echo "$args" | sed "s/$OLD_TOKEN/$TOKEN/")
|
|
||||||
+ fi
|
|
||||||
sleep $sleep
|
|
||||||
done
|
|
||||||
|
|
||||||
@@ -1110,4 +1119,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
|
||||||
# pacemaker sets HA_use_logd, some others use HA_LOGD :/
|
|
||||||
if ocf_is_true "$HA_use_logd"; then
|
|
||||||
: ${HA_LOGD:=yes}
|
|
||||||
-fi
|
|
||||||
\ No newline at end of file
|
|
||||||
+fi
|
|
@ -1,184 +0,0 @@
|
|||||||
From 392d40048a25d7cb73ec5b5e9f7a5862f7a3fd48 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
|
||||||
Date: Mon, 11 Nov 2024 12:22:27 +0100
|
|
||||||
Subject: [PATCH 1/2] aws.sh: add get_interface_mac()
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws.sh | 21 +++++++++++++++++++++
|
|
||||||
1 file changed, 21 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
index 64f2e13a7..ebb4eb1f4 100644
|
|
||||||
--- a/heartbeat/aws.sh
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -69,3 +69,24 @@ get_instance_id() {
|
|
||||||
echo "$INSTANCE_ID"
|
|
||||||
return "$OCF_SUCCESS"
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+get_interface_mac() {
|
|
||||||
+ local MAC_FILE MAC_ADDR rc
|
|
||||||
+ MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
|
||||||
+ if [ -f "$MAC_FILE" ]; then
|
|
||||||
+ cmd="cat ${MAC_FILE}"
|
|
||||||
+ else
|
|
||||||
+ cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
|
||||||
+ fi
|
|
||||||
+ ocf_log debug "executing command: $cmd"
|
|
||||||
+ MAC_ADDR="$(eval $cmd)"
|
|
||||||
+ rc=$?
|
|
||||||
+ if [ $rc != 0 ]; then
|
|
||||||
+ ocf_log warn "command failed, rc: $rc"
|
|
||||||
+ return $OCF_ERR_GENERIC
|
|
||||||
+ fi
|
|
||||||
+ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
|
||||||
+
|
|
||||||
+ echo $MAC_ADDR
|
|
||||||
+ return $OCF_SUCCESS
|
|
||||||
+}
|
|
||||||
|
|
||||||
From 87337ac4da931d5a53c83d53d4bab17ee123ba9f Mon Sep 17 00:00:00 2001
|
|
||||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
|
||||||
Date: Mon, 11 Nov 2024 12:26:38 +0100
|
|
||||||
Subject: [PATCH 2/2] awsvip: let user specify which interface to use, and make
|
|
||||||
the parameter optional in aws-vpc-move-ip
|
|
||||||
|
|
||||||
---
|
|
||||||
heartbeat/aws-vpc-move-ip | 20 ++++----------------
|
|
||||||
heartbeat/aws.sh | 4 +++-
|
|
||||||
heartbeat/awsvip | 24 +++++++++++++++++-------
|
|
||||||
3 files changed, 24 insertions(+), 24 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
|
||||||
index 09ae68b57..2afc0ba53 100755
|
|
||||||
--- a/heartbeat/aws-vpc-move-ip
|
|
||||||
+++ b/heartbeat/aws-vpc-move-ip
|
|
||||||
@@ -157,7 +157,7 @@ Role to use to query/update the route table
|
|
||||||
<content type="string" default="${OCF_RESKEY_routing_table_role_default}" />
|
|
||||||
</parameter>
|
|
||||||
|
|
||||||
-<parameter name="interface" required="1">
|
|
||||||
+<parameter name="interface" required="0">
|
|
||||||
<longdesc lang="en">
|
|
||||||
Name of the network interface, i.e. eth0
|
|
||||||
</longdesc>
|
|
||||||
@@ -321,7 +321,7 @@ ec2ip_monitor() {
|
|
||||||
ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- cmd="ip addr show to $OCF_RESKEY_ip up"
|
|
||||||
+ cmd="ip addr show dev $OCF_RESKEY_interface to $OCF_RESKEY_ip up"
|
|
||||||
ocf_log debug "executing command: $cmd"
|
|
||||||
RESULT=$($cmd | grep "$OCF_RESKEY_ip")
|
|
||||||
if [ -z "$RESULT" ]; then
|
|
||||||
@@ -331,7 +331,7 @@ ec2ip_monitor() {
|
|
||||||
level="info"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to running interface"
|
|
||||||
+ ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to interface $OCF_RESKEY_interface"
|
|
||||||
return $OCF_NOT_RUNNING
|
|
||||||
fi
|
|
||||||
|
|
||||||
@@ -369,19 +369,7 @@ ec2ip_drop() {
|
|
||||||
}
|
|
||||||
|
|
||||||
ec2ip_get_instance_eni() {
|
|
||||||
- MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
|
||||||
- if [ -f $MAC_FILE ]; then
|
|
||||||
- cmd="cat ${MAC_FILE}"
|
|
||||||
- else
|
|
||||||
- cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
|
||||||
- fi
|
|
||||||
- ocf_log debug "executing command: $cmd"
|
|
||||||
- MAC_ADDR="$(eval $cmd)"
|
|
||||||
- rc=$?
|
|
||||||
- if [ $rc != 0 ]; then
|
|
||||||
- ocf_log warn "command failed, rc: $rc"
|
|
||||||
- return $OCF_ERR_GENERIC
|
|
||||||
- fi
|
|
||||||
+ MAC_ADDR=$(get_interface_mac)
|
|
||||||
ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
|
||||||
|
|
||||||
cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\""
|
|
||||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
|
||||||
index ebb4eb1f4..216033afe 100644
|
|
||||||
--- a/heartbeat/aws.sh
|
|
||||||
+++ b/heartbeat/aws.sh
|
|
||||||
@@ -73,7 +73,9 @@ get_instance_id() {
|
|
||||||
get_interface_mac() {
|
|
||||||
local MAC_FILE MAC_ADDR rc
|
|
||||||
MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
|
||||||
- if [ -f "$MAC_FILE" ]; then
|
|
||||||
+ if [ -z "$OCF_RESKEY_interface" ]; then
|
|
||||||
+ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/mac\""
|
|
||||||
+ elif [ -f "$MAC_FILE" ]; then
|
|
||||||
cmd="cat ${MAC_FILE}"
|
|
||||||
else
|
|
||||||
cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
|
||||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
|
||||||
index 0856ac5e4..015180d5a 100755
|
|
||||||
--- a/heartbeat/awsvip
|
|
||||||
+++ b/heartbeat/awsvip
|
|
||||||
@@ -49,12 +49,14 @@ OCF_RESKEY_auth_type_default="key"
|
|
||||||
OCF_RESKEY_profile_default="default"
|
|
||||||
OCF_RESKEY_region_default=""
|
|
||||||
OCF_RESKEY_api_delay_default="3"
|
|
||||||
+OCF_RESKEY_interface_default=""
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
|
||||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
|
||||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
|
||||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
|
||||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
|
||||||
+: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}}
|
|
||||||
|
|
||||||
meta_data() {
|
|
||||||
cat <<END
|
|
||||||
@@ -125,6 +127,14 @@ a short delay between API calls, to avoid sending API too quick
|
|
||||||
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
|
||||||
</parameter>
|
|
||||||
|
|
||||||
+<parameter name="interface" required="0">
|
|
||||||
+<longdesc lang="en">
|
|
||||||
+Name of the network interface, i.e. eth0
|
|
||||||
+</longdesc>
|
|
||||||
+<shortdesc lang="en">network interface name</shortdesc>
|
|
||||||
+<content type="string" default="${OCF_RESKEY_interface_default}" />
|
|
||||||
+</parameter>
|
|
||||||
+
|
|
||||||
<parameter name="curl_retries" unique="0">
|
|
||||||
<longdesc lang="en">
|
|
||||||
curl retries before failing
|
|
||||||
@@ -207,16 +217,16 @@ awsvip_stop() {
|
|
||||||
}
|
|
||||||
|
|
||||||
awsvip_monitor() {
|
|
||||||
- $AWSCLI_CMD ec2 describe-instances \
|
|
||||||
- --instance-id "${INSTANCE_ID}" \
|
|
||||||
- --query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
|
|
||||||
+ $AWSCLI_CMD ec2 describe-network-interfaces \
|
|
||||||
+ --network-interface-ids "${NETWORK_ID}" \
|
|
||||||
+ --query 'NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
|
|
||||||
--output text | \
|
|
||||||
grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)"
|
|
||||||
- RET=$?
|
|
||||||
-
|
|
||||||
- if [ $RET -ne 0 ]; then
|
|
||||||
+ if [ $? -ne 0 ]; then
|
|
||||||
+ [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_log error "IP $SECONDARY_PRIVATE_IP not assigned to interface ${NETWORK_ID}"
|
|
||||||
return $OCF_NOT_RUNNING
|
|
||||||
fi
|
|
||||||
+
|
|
||||||
return $OCF_SUCCESS
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -267,7 +277,7 @@ TOKEN=$(get_token)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
INSTANCE_ID=$(get_instance_id)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
-MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
|
||||||
+MAC_ADDRESS=$(get_interface_mac)
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
||||||
NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id")
|
|
||||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
|
25
SOURCES/10-gcloud-support-info.patch
Normal file
25
SOURCES/10-gcloud-support-info.patch
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py
|
||||||
|
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100
|
||||||
|
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200
|
||||||
|
@@ -900,6 +900,9 @@
|
||||||
|
return """\
|
||||||
|
For detailed information on this command and its flags, run:
|
||||||
|
{command_path} --help
|
||||||
|
+
|
||||||
|
+WARNING: {command_path} is only supported for "{command_path} init" and for use
|
||||||
|
+with the agents in resource-agents.
|
||||||
|
""".format(command_path=' '.join(command.GetPath()))
|
||||||
|
|
||||||
|
|
||||||
|
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
|
||||||
|
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100
|
||||||
|
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200
|
||||||
|
@@ -84,7 +84,7 @@
|
||||||
|
|
||||||
|
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
|
||||||
|
loader = cli.CLILoader(
|
||||||
|
- name='gcloud',
|
||||||
|
+ name='gcloud-ra',
|
||||||
|
command_root_directory=os.path.join(pkg_root, 'surface'),
|
||||||
|
allow_non_existing_modules=True,
|
||||||
|
version_func=VersionFunc,
|
45
SOURCES/7-gcp-bundled.patch
Normal file
45
SOURCES/7-gcp-bundled.patch
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||||
|
--- a/heartbeat/gcp-pd-move.in 2024-07-22 10:59:42.170483160 +0200
|
||||||
|
+++ b/heartbeat/gcp-pd-move.in 2024-07-22 11:01:51.455543850 +0200
|
||||||
|
@@ -32,6 +32,7 @@
|
||||||
|
from ocf import logger
|
||||||
|
|
||||||
|
try:
|
||||||
|
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||||
|
import googleapiclient.discovery
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
diff --color -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
|
||||||
|
--- a/heartbeat/gcp-vpc-move-ip.in 2024-07-22 10:59:42.170483160 +0200
|
||||||
|
+++ b/heartbeat/gcp-vpc-move-ip.in 2024-07-22 11:01:18.010752081 +0200
|
||||||
|
@@ -36,7 +36,7 @@
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
-OCF_RESKEY_gcloud_default="/usr/bin/gcloud"
|
||||||
|
+OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra"
|
||||||
|
OCF_RESKEY_configuration_default="default"
|
||||||
|
OCF_RESKEY_vpc_network_default="default"
|
||||||
|
OCF_RESKEY_interface_default="eth0"
|
||||||
|
diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||||
|
--- a/heartbeat/gcp-vpc-move-route.in 2024-07-22 10:59:42.170483160 +0200
|
||||||
|
+++ b/heartbeat/gcp-vpc-move-route.in 2024-07-22 11:01:18.011752105 +0200
|
||||||
|
@@ -45,6 +45,7 @@
|
||||||
|
from ocf import *
|
||||||
|
|
||||||
|
try:
|
||||||
|
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||||
|
import googleapiclient.discovery
|
||||||
|
import pyroute2
|
||||||
|
try:
|
||||||
|
diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
|
||||||
|
--- a/heartbeat/gcp-vpc-move-vip.in 2024-07-22 10:59:42.170483160 +0200
|
||||||
|
+++ b/heartbeat/gcp-vpc-move-vip.in 2024-07-22 11:01:18.012752128 +0200
|
||||||
|
@@ -29,6 +29,7 @@
|
||||||
|
from ocf import *
|
||||||
|
|
||||||
|
try:
|
||||||
|
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||||
|
import googleapiclient.discovery
|
||||||
|
try:
|
||||||
|
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
@ -0,0 +1,129 @@
|
|||||||
|
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py
|
||||||
|
--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100
|
||||||
|
+++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200
|
||||||
|
@@ -19,8 +19,14 @@
|
||||||
|
certificates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
+from pyasn1.codec.der import decoder
|
||||||
|
from pyasn1_modules import pem
|
||||||
|
-import rsa
|
||||||
|
+from pyasn1_modules.rfc2459 import Certificate
|
||||||
|
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
|
||||||
|
+from cryptography.hazmat.primitives import serialization, hashes
|
||||||
|
+from cryptography.hazmat.primitives.asymmetric import padding
|
||||||
|
+from cryptography import x509
|
||||||
|
+from cryptography.hazmat.backends import default_backend
|
||||||
|
import six
|
||||||
|
|
||||||
|
from oauth2client import _helpers
|
||||||
|
@@ -40,7 +46,7 @@
|
||||||
|
'-----END RSA PRIVATE KEY-----')
|
||||||
|
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
|
||||||
|
'-----END PRIVATE KEY-----')
|
||||||
|
-_PKCS8_SPEC = None
|
||||||
|
+_PKCS8_SPEC = PrivateKeyInfo()
|
||||||
|
|
||||||
|
|
||||||
|
def _bit_list_to_bytes(bit_list):
|
||||||
|
@@ -67,7 +73,8 @@
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, pubkey):
|
||||||
|
- self._pubkey = pubkey
|
||||||
|
+ self._pubkey = serialization.load_pem_public_key(pubkey,
|
||||||
|
+ backend=default_backend())
|
||||||
|
|
||||||
|
def verify(self, message, signature):
|
||||||
|
"""Verifies a message against a signature.
|
||||||
|
@@ -84,8 +91,9 @@
|
||||||
|
"""
|
||||||
|
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||||
|
try:
|
||||||
|
- return rsa.pkcs1.verify(message, signature, self._pubkey)
|
||||||
|
- except (ValueError, rsa.pkcs1.VerificationError):
|
||||||
|
+ return self._pubkey.verify(signature, message, padding.PKCS1v15(),
|
||||||
|
+ hashes.SHA256())
|
||||||
|
+ except (ValueError, TypeError, InvalidSignature):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@@ -109,19 +117,18 @@
|
||||||
|
"""
|
||||||
|
key_pem = _helpers._to_bytes(key_pem)
|
||||||
|
if is_x509_cert:
|
||||||
|
- from pyasn1.codec.der import decoder
|
||||||
|
- from pyasn1_modules import rfc2459
|
||||||
|
-
|
||||||
|
- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
|
||||||
|
- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate())
|
||||||
|
+ der = x509.load_pem_x509_certificate(pem_data, default_backend())
|
||||||
|
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
|
||||||
|
if remaining != b'':
|
||||||
|
raise ValueError('Unused bytes', remaining)
|
||||||
|
|
||||||
|
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
|
||||||
|
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
|
||||||
|
- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
|
||||||
|
+ pubkey = serialization.load_der_public_key(decoded_key,
|
||||||
|
+ backend=default_backend())
|
||||||
|
else:
|
||||||
|
- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
|
||||||
|
+ pubkey = serialization.load_pem_public_key(decoded_key,
|
||||||
|
+ backend=default_backend())
|
||||||
|
return cls(pubkey)
|
||||||
|
|
||||||
|
|
||||||
|
@@ -134,6 +141,8 @@
|
||||||
|
|
||||||
|
def __init__(self, pkey):
|
||||||
|
self._key = pkey
|
||||||
|
+ self._pubkey = serialization.load_pem_private_key(pkey,
|
||||||
|
+ backend=default_backend())
|
||||||
|
|
||||||
|
def sign(self, message):
|
||||||
|
"""Signs a message.
|
||||||
|
@@ -145,7 +154,7 @@
|
||||||
|
string, The signature of the message for the given key.
|
||||||
|
"""
|
||||||
|
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||||
|
- return rsa.pkcs1.sign(message, self._key, 'SHA-256')
|
||||||
|
+ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, key, password='notasecret'):
|
||||||
|
@@ -163,27 +172,24 @@
|
||||||
|
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
|
||||||
|
PEM format.
|
||||||
|
"""
|
||||||
|
- global _PKCS8_SPEC
|
||||||
|
key = _helpers._from_bytes(key) # pem expects str in Py3
|
||||||
|
marker_id, key_bytes = pem.readPemBlocksFromFile(
|
||||||
|
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
|
||||||
|
|
||||||
|
if marker_id == 0:
|
||||||
|
- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
|
||||||
|
- format='DER')
|
||||||
|
- elif marker_id == 1:
|
||||||
|
- from pyasn1.codec.der import decoder
|
||||||
|
- from pyasn1_modules import rfc5208
|
||||||
|
+ pkey = serialization.load_der_private_key(
|
||||||
|
+ key_bytes, password=None,
|
||||||
|
+ backend=default_backend())
|
||||||
|
|
||||||
|
- if _PKCS8_SPEC is None:
|
||||||
|
- _PKCS8_SPEC = rfc5208.PrivateKeyInfo()
|
||||||
|
+ elif marker_id == 1:
|
||||||
|
key_info, remaining = decoder.decode(
|
||||||
|
key_bytes, asn1Spec=_PKCS8_SPEC)
|
||||||
|
if remaining != b'':
|
||||||
|
raise ValueError('Unused bytes', remaining)
|
||||||
|
pkey_info = key_info.getComponentByName('privateKey')
|
||||||
|
- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
|
||||||
|
- format='DER')
|
||||||
|
+ pkey = serialization.load_der_private_key(
|
||||||
|
+ pkey_info.asOctets(), password=None,
|
||||||
|
+ backend=default_backend())
|
||||||
|
else:
|
||||||
|
raise ValueError('No key could be detected.')
|
||||||
|
|
75
SOURCES/RHEL-15302-1-exportfs-make-fsid-optional.patch
Normal file
75
SOURCES/RHEL-15302-1-exportfs-make-fsid-optional.patch
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
From b806487ca758fce838c988767556007ecf66a6e3 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Roger Zhou <zzhou@suse.com>
|
||||||
|
Date: Mon, 10 Apr 2023 18:08:56 +0800
|
||||||
|
Subject: [PATCH] exportfs: make the "fsid=" parameter optional
|
||||||
|
|
||||||
|
Based on feedback [1] from the kernel developer @neilbrown regarding the
|
||||||
|
NFS clustering use case, it has been determined that the fsid= parameter
|
||||||
|
is now considered optional and safe to omit.
|
||||||
|
|
||||||
|
[1] https://bugzilla.suse.com/show_bug.cgi?id=1201271#c49
|
||||||
|
"""
|
||||||
|
Since some time in 2007 NFS has used the UUID of a filesystem as the
|
||||||
|
primary identifier for that filesystem, rather than using the device
|
||||||
|
number. So from that time there should have been reduced need for the
|
||||||
|
"fsid=" option. Probably there are some filesystems that this didn't
|
||||||
|
work for. btrfs has been problematic at time, particularly when subvols
|
||||||
|
are exported. But for quite some years this has all "just worked" at
|
||||||
|
least for the major filesystems (ext4 xfs btrfs). [...] I would suggest
|
||||||
|
getting rid of the use of fsid= altogether. [...] I'm confident that it
|
||||||
|
was no longer an issue in SLE-12 and similarly not in SLE-15.
|
||||||
|
"""
|
||||||
|
---
|
||||||
|
heartbeat/exportfs | 12 +++++++-----
|
||||||
|
1 file changed, 7 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/exportfs b/heartbeat/exportfs
|
||||||
|
index 2307a9e67b..435a19646b 100755
|
||||||
|
--- a/heartbeat/exportfs
|
||||||
|
+++ b/heartbeat/exportfs
|
||||||
|
@@ -82,7 +82,7 @@ The directory or directories to export.
|
||||||
|
<content type="string" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
-<parameter name="fsid" unique="0" required="1">
|
||||||
|
+<parameter name="fsid" unique="0" required="0">
|
||||||
|
<longdesc lang="en">
|
||||||
|
The fsid option to pass to exportfs. This can be a unique positive
|
||||||
|
integer, a UUID (assuredly sans comma characters), or the special string
|
||||||
|
@@ -185,6 +185,8 @@ exportfs_methods() {
|
||||||
|
|
||||||
|
reset_fsid() {
|
||||||
|
CURRENT_FSID=$OCF_RESKEY_fsid
|
||||||
|
+ [ -z "$CURRENT_FSID" ] && CURRENT_FSID=`echo "$OCF_RESKEY_options" | sed -n 's/.*fsid=\([^,]*\).*/\1/p'`
|
||||||
|
+ echo $CURRENT_FSID
|
||||||
|
}
|
||||||
|
bump_fsid() {
|
||||||
|
CURRENT_FSID=$((CURRENT_FSID+1))
|
||||||
|
@@ -322,7 +324,7 @@ export_one() {
|
||||||
|
if echo "$opts" | grep fsid >/dev/null; then
|
||||||
|
#replace fsid in options list
|
||||||
|
opts=`echo "$opts" | sed "s,fsid=[^,]*,fsid=$(get_fsid),g"`
|
||||||
|
- else
|
||||||
|
+ elif [ -n "$OCF_RESKEY_fsid" ]; then
|
||||||
|
#tack the fsid option onto our options list.
|
||||||
|
opts="${opts}${sep}fsid=$(get_fsid)"
|
||||||
|
fi
|
||||||
|
@@ -448,8 +450,8 @@ exportfs_validate_all ()
|
||||||
|
ocf_exit_reason "$OCF_RESKEY_fsid cannot contain a comma"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
- if [ $NUMDIRS -gt 1 ] &&
|
||||||
|
- ! ocf_is_decimal "$OCF_RESKEY_fsid"; then
|
||||||
|
+ if [ $NUMDIRS -gt 1 ] && [ -n "$(reset_fsid)" ] &&
|
||||||
|
+ ! ocf_is_decimal "$(reset_fsid)"; then
|
||||||
|
ocf_exit_reason "use integer fsid when exporting multiple directories"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -485,6 +487,6 @@ done
|
||||||
|
OCF_RESKEY_directory="${directories%% }"
|
||||||
|
|
||||||
|
NUMDIRS=`echo "$OCF_RESKEY_directory" | wc -w`
|
||||||
|
-OCF_REQUIRED_PARAMS="directory fsid clientspec"
|
||||||
|
+OCF_REQUIRED_PARAMS="directory clientspec"
|
||||||
|
OCF_REQUIRED_BINARIES="exportfs"
|
||||||
|
ocf_rarun $*
|
@ -0,0 +1,43 @@
|
|||||||
|
From 1d1481aa6d848efab4d398ad6e74d80b5b32549f Mon Sep 17 00:00:00 2001
|
||||||
|
From: Valentin Vidic <vvidic@debian.org>
|
||||||
|
Date: Wed, 1 Nov 2023 18:25:45 +0100
|
||||||
|
Subject: [PATCH] exportfs: remove test for "fsid=" parameter
|
||||||
|
|
||||||
|
fsid parameter is now considered optional.
|
||||||
|
---
|
||||||
|
tools/ocft/exportfs | 5 -----
|
||||||
|
tools/ocft/exportfs-multidir | 5 -----
|
||||||
|
2 files changed, 10 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/ocft/exportfs b/tools/ocft/exportfs
|
||||||
|
index 285a4b8ea0..1ec3d4c364 100644
|
||||||
|
--- a/tools/ocft/exportfs
|
||||||
|
+++ b/tools/ocft/exportfs
|
||||||
|
@@ -28,11 +28,6 @@ CASE "check base env"
|
||||||
|
Include prepare
|
||||||
|
AgentRun start OCF_SUCCESS
|
||||||
|
|
||||||
|
-CASE "check base env: no 'OCF_RESKEY_fsid'"
|
||||||
|
- Include prepare
|
||||||
|
- Env OCF_RESKEY_fsid=
|
||||||
|
- AgentRun start OCF_ERR_CONFIGURED
|
||||||
|
-
|
||||||
|
CASE "check base env: invalid 'OCF_RESKEY_directory'"
|
||||||
|
Include prepare
|
||||||
|
Env OCF_RESKEY_directory=/no_such
|
||||||
|
diff --git a/tools/ocft/exportfs-multidir b/tools/ocft/exportfs-multidir
|
||||||
|
index 00e41f0859..ac6d5c7f6a 100644
|
||||||
|
--- a/tools/ocft/exportfs-multidir
|
||||||
|
+++ b/tools/ocft/exportfs-multidir
|
||||||
|
@@ -28,11 +28,6 @@ CASE "check base env"
|
||||||
|
Include prepare
|
||||||
|
AgentRun start OCF_SUCCESS
|
||||||
|
|
||||||
|
-CASE "check base env: no 'OCF_RESKEY_fsid'"
|
||||||
|
- Include prepare
|
||||||
|
- Env OCF_RESKEY_fsid=
|
||||||
|
- AgentRun start OCF_ERR_CONFIGURED
|
||||||
|
-
|
||||||
|
CASE "check base env: invalid 'OCF_RESKEY_directory'"
|
||||||
|
Include prepare
|
||||||
|
Env OCF_RESKEY_directory=/no_such
|
45
SOURCES/RHEL-15305-1-findif.sh-fix-loopback-handling.patch
Normal file
45
SOURCES/RHEL-15305-1-findif.sh-fix-loopback-handling.patch
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
From e4f84ae185b6943d1ff461d53c7f1b5295783086 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Valentin Vidic <vvidic@valentin-vidic.from.hr>
|
||||||
|
Date: Wed, 1 Nov 2023 19:35:21 +0100
|
||||||
|
Subject: [PATCH] findif.sh: fix loopback handling
|
||||||
|
|
||||||
|
tools/ocft/IPaddr2 fails the loopback test because of the missing
|
||||||
|
table local parameter:
|
||||||
|
|
||||||
|
$ ip -o -f inet route list match 127.0.0.3 scope host
|
||||||
|
|
||||||
|
$ ip -o -f inet route list match 127.0.0.3 table local scope host
|
||||||
|
local 127.0.0.0/8 dev lo proto kernel src 127.0.0.1
|
||||||
|
|
||||||
|
Also rename the function because it is called only in for the special
|
||||||
|
loopback address case.
|
||||||
|
---
|
||||||
|
heartbeat/findif.sh | 6 +++---
|
||||||
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||||
|
index 5f1c19ec3..7c766e6e0 100644
|
||||||
|
--- a/heartbeat/findif.sh
|
||||||
|
+++ b/heartbeat/findif.sh
|
||||||
|
@@ -29,10 +29,10 @@ prefixcheck() {
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
-getnetworkinfo()
|
||||||
|
+getloopbackinfo()
|
||||||
|
{
|
||||||
|
local line netinfo
|
||||||
|
- ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table:=main}" scope host | (while read line;
|
||||||
|
+ ip -o -f inet route list match $OCF_RESKEY_ip table local scope host | (while read line;
|
||||||
|
do
|
||||||
|
netinfo=`echo $line | awk '{print $2}'`
|
||||||
|
case $netinfo in
|
||||||
|
@@ -222,7 +222,7 @@ findif()
|
||||||
|
if [ $# = 0 ] ; then
|
||||||
|
case $OCF_RESKEY_ip in
|
||||||
|
127.*)
|
||||||
|
- set -- `getnetworkinfo`
|
||||||
|
+ set -- `getloopbackinfo`
|
||||||
|
shift;;
|
||||||
|
esac
|
||||||
|
fi
|
@ -0,0 +1,20 @@
|
|||||||
|
--- a/heartbeat/findif.sh 2024-02-08 11:31:53.414257686 +0100
|
||||||
|
+++ b/heartbeat/findif.sh 2023-11-02 10:20:12.150853167 +0100
|
||||||
|
@@ -210,14 +210,14 @@
|
||||||
|
fi
|
||||||
|
findif_check_params $family || return $?
|
||||||
|
|
||||||
|
- if [ -n "$netmask" ] ; then
|
||||||
|
+ if [ -n "$netmask" ]; then
|
||||||
|
match=$match/$netmask
|
||||||
|
fi
|
||||||
|
if [ -n "$nic" ] ; then
|
||||||
|
# NIC supports more than two.
|
||||||
|
- set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
+ set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
else
|
||||||
|
- set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
+ set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
fi
|
||||||
|
if [ $# = 0 ] ; then
|
||||||
|
case $OCF_RESKEY_ip in
|
@ -0,0 +1,555 @@
|
|||||||
|
From f45f76600a7e02c860566db7d1350dc3b09449c2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 6 Nov 2023 15:49:44 +0100
|
||||||
|
Subject: [PATCH] aws-vpc-move-ip/aws-vpc-route53/awseip/awsvip: add auth_type
|
||||||
|
parameter and AWS Policy based authentication type
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/aws-vpc-move-ip | 43 +++++++++++++++++++----
|
||||||
|
heartbeat/aws-vpc-route53.in | 47 ++++++++++++++++++++-----
|
||||||
|
heartbeat/awseip | 68 +++++++++++++++++++++++++++---------
|
||||||
|
heartbeat/awsvip | 60 ++++++++++++++++++++++++-------
|
||||||
|
4 files changed, 173 insertions(+), 45 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||||
|
index dee040300f..54806f6eaa 100755
|
||||||
|
--- a/heartbeat/aws-vpc-move-ip
|
||||||
|
+++ b/heartbeat/aws-vpc-move-ip
|
||||||
|
@@ -36,6 +36,7 @@
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||||
|
+OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_ip_default=""
|
||||||
|
@@ -48,6 +49,7 @@ OCF_RESKEY_monapi_default="false"
|
||||||
|
OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}}
|
||||||
|
@@ -58,8 +60,6 @@ OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||||
|
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
||||||
|
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
||||||
|
-
|
||||||
|
-[ -n "$OCF_RESKEY_region" ] && region_opt="--region $OCF_RESKEY_region"
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
|
||||||
|
@@ -83,6 +83,10 @@ cat <<END
|
||||||
|
<longdesc lang="en">
|
||||||
|
Resource Agent to move IP addresses within a VPC of the Amazon Webservices EC2
|
||||||
|
by changing an entry in an specific routing table
|
||||||
|
+
|
||||||
|
+Credentials needs to be setup by running "aws configure", or by using AWS Policies.
|
||||||
|
+
|
||||||
|
+See https://aws.amazon.com/cli/ for more information about awscli.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Move IP within a VPC of the AWS EC2</shortdesc>
|
||||||
|
|
||||||
|
@@ -95,6 +99,15 @@ Path to command line tools for AWS
|
||||||
|
<content type="string" default="${OCF_RESKEY_awscli_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="auth_type">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure",
|
||||||
|
+or "role" to use AWS Policies.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Authentication type</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_auth_type_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="profile">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
|
||||||
|
@@ -198,7 +211,7 @@ END
|
||||||
|
execute_cmd_as_role(){
|
||||||
|
cmd=$1
|
||||||
|
role=$2
|
||||||
|
- output="$($OCF_RESKEY_awscli sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --profile $OCF_RESKEY_profile $region_opt --output=text)"
|
||||||
|
+ output="$($AWSCLI_CMD sts assume-role --role-arn $role --role-session-name AWSCLI-RouteTableUpdate --output=text)"
|
||||||
|
export AWS_ACCESS_KEY_ID="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $5}')"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $7}')"
|
||||||
|
export AWS_SESSION_TOKEN="$(echo $output | awk -F" " '$4=="CREDENTIALS" {print $8}')"
|
||||||
|
@@ -220,11 +233,11 @@ ec2ip_set_address_param_compat(){
|
||||||
|
}
|
||||||
|
|
||||||
|
ec2ip_validate() {
|
||||||
|
- for cmd in $OCF_RESKEY_awscli ip curl; do
|
||||||
|
+ for cmd in "$OCF_RESKEY_awscli" ip curl; do
|
||||||
|
check_binary "$cmd"
|
||||||
|
done
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
ocf_exit_reason "profile parameter not set"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -262,7 +275,7 @@ ec2ip_monitor() {
|
||||||
|
for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
|
||||||
|
ocf_log info "monitor: check routing table (API call) - $rtb"
|
||||||
|
if [ -z "${OCF_RESKEY_routing_table_role}" ]; then
|
||||||
|
- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
|
||||||
|
+ cmd="$AWSCLI_CMD --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type"
|
||||||
|
ocf_log debug "executing command: $cmd"
|
||||||
|
ROUTE_TO_INSTANCE="$($cmd)"
|
||||||
|
else
|
||||||
|
@@ -368,7 +381,7 @@ ec2ip_get_and_configure() {
|
||||||
|
EC2_NETWORK_INTERFACE_ID="$(ec2ip_get_instance_eni)"
|
||||||
|
for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do
|
||||||
|
if [ -z "${OCF_RESKEY_routing_table_role}" ]; then
|
||||||
|
- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile $region_opt --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID"
|
||||||
|
+ cmd="$AWSCLI_CMD --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID"
|
||||||
|
ocf_log debug "executing command: $cmd"
|
||||||
|
$cmd
|
||||||
|
else
|
||||||
|
@@ -475,6 +488,22 @@ if ! ocf_is_root; then
|
||||||
|
exit $OCF_ERR_PERM
|
||||||
|
fi
|
||||||
|
|
||||||
|
+AWSCLI_CMD="${OCF_RESKEY_awscli}"
|
||||||
|
+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}"
|
||||||
|
+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then
|
||||||
|
+ if [ -z "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ ocf_exit_reason "region needs to be set when using role-based authentication"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+else
|
||||||
|
+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+fi
|
||||||
|
+if [ -n "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||||
|
+fi
|
||||||
|
+
|
||||||
|
ec2ip_set_address_param_compat
|
||||||
|
|
||||||
|
ec2ip_validate
|
||||||
|
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||||
|
index 22cbb35833..18ab157e8a 100644
|
||||||
|
--- a/heartbeat/aws-vpc-route53.in
|
||||||
|
+++ b/heartbeat/aws-vpc-route53.in
|
||||||
|
@@ -46,24 +46,22 @@
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||||
|
+OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
+OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_hostedzoneid_default=""
|
||||||
|
OCF_RESKEY_fullname_default=""
|
||||||
|
OCF_RESKEY_ip_default="local"
|
||||||
|
OCF_RESKEY_ttl_default=10
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_hostedzoneid:=${OCF_RESKEY_hostedzoneid_default}}
|
||||||
|
: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}}
|
||||||
|
: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}}
|
||||||
|
: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
|
||||||
|
-#######################################################################
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile --cli-connect-timeout 10"
|
||||||
|
-#######################################################################
|
||||||
|
-
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<-EOT
|
||||||
|
@@ -123,6 +121,15 @@ Path to command line tools for AWS
|
||||||
|
<content type="string" default="${OCF_RESKEY_awscli_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="auth_type">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure",
|
||||||
|
+or "role" to use AWS Policies.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Authentication type</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_auth_type_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="profile">
|
||||||
|
<longdesc lang="en">
|
||||||
|
The name of the AWS CLI profile of the root account. This
|
||||||
|
@@ -196,7 +203,7 @@ r53_validate() {
|
||||||
|
|
||||||
|
# Check for required binaries
|
||||||
|
ocf_log debug "Checking for required binaries"
|
||||||
|
- for command in curl dig; do
|
||||||
|
+ for command in "${OCF_RESKEY_awscli}" curl dig; do
|
||||||
|
check_binary "$command"
|
||||||
|
done
|
||||||
|
|
||||||
|
@@ -216,7 +223,10 @@ r53_validate() {
|
||||||
|
esac
|
||||||
|
|
||||||
|
# profile
|
||||||
|
- [[ -z "$OCF_RESKEY_profile" ]] && ocf_log error "AWS CLI profile not set $OCF_RESKEY_profile!" && exit $OCF_ERR_CONFIGURED
|
||||||
|
+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
+ ocf_exit_reason "profile parameter not set"
|
||||||
|
+ return $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
|
||||||
|
# TTL
|
||||||
|
[[ -z "$OCF_RESKEY_ttl" ]] && ocf_log error "TTL not set $OCF_RESKEY_ttl!" && exit $OCF_ERR_CONFIGURED
|
||||||
|
@@ -417,7 +427,6 @@ _update_record() {
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
-
|
||||||
|
case $__OCF_ACTION in
|
||||||
|
usage|help)
|
||||||
|
usage
|
||||||
|
@@ -427,6 +436,26 @@ case $__OCF_ACTION in
|
||||||
|
metadata
|
||||||
|
exit $OCF_SUCCESS
|
||||||
|
;;
|
||||||
|
+esac
|
||||||
|
+
|
||||||
|
+AWSCLI_CMD="${OCF_RESKEY_awscli}"
|
||||||
|
+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}"
|
||||||
|
+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then
|
||||||
|
+ if [ -z "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ ocf_exit_reason "region needs to be set when using role-based authentication"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+else
|
||||||
|
+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+fi
|
||||||
|
+if [ -n "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||||
|
+fi
|
||||||
|
+AWSCLI_CMD="$AWSCLI_CMD --cli-connect-timeout 10"
|
||||||
|
+
|
||||||
|
+case $__OCF_ACTION in
|
||||||
|
start)
|
||||||
|
r53_validate || exit $?
|
||||||
|
r53_start
|
||||||
|
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||||
|
index dc48460c85..49b0ca6155 100755
|
||||||
|
--- a/heartbeat/awseip
|
||||||
|
+++ b/heartbeat/awseip
|
||||||
|
@@ -23,7 +23,8 @@
|
||||||
|
#
|
||||||
|
# Prerequisites:
|
||||||
|
#
|
||||||
|
-# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.)
|
||||||
|
+# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) or
|
||||||
|
+# (AWSRole) Setup up relevant AWS Policies to allow agent related functions to be executed.
|
||||||
|
# - a reserved secondary private IP address for EC2 instances high availability
|
||||||
|
# - IAM user role with the following permissions:
|
||||||
|
# * DescribeInstances
|
||||||
|
@@ -44,11 +45,15 @@
|
||||||
|
# Defaults
|
||||||
|
#
|
||||||
|
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||||
|
+OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
+OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_api_delay_default="3"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||||
|
|
||||||
|
meta_data() {
|
||||||
|
@@ -63,7 +68,7 @@ Resource Agent for Amazon AWS Elastic IP Addresses.
|
||||||
|
|
||||||
|
It manages AWS Elastic IP Addresses with awscli.
|
||||||
|
|
||||||
|
-Credentials needs to be setup by running "aws configure".
|
||||||
|
+Credentials needs to be setup by running "aws configure", or by using AWS Policies.
|
||||||
|
|
||||||
|
See https://aws.amazon.com/cli/ for more information about awscli.
|
||||||
|
</longdesc>
|
||||||
|
@@ -79,6 +84,15 @@ command line tools for aws services
|
||||||
|
<content type="string" default="${OCF_RESKEY_awscli_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="auth_type">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure",
|
||||||
|
+or "role" to use AWS Policies.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Authentication type</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_auth_type_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="profile">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
|
||||||
|
@@ -111,6 +125,14 @@ predefined private ip address for ec2 instance
|
||||||
|
<content type="string" default="" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="region" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Region for AWS resource (required for role-based authentication)
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Region</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_region_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="api_delay" unique="0">
|
||||||
|
<longdesc lang="en">
|
||||||
|
a short delay between API calls, to avoid sending API too quick
|
||||||
|
@@ -157,13 +179,13 @@ awseip_start() {
|
||||||
|
NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
- $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \
|
||||||
|
+ $AWSCLI_CMD ec2 associate-address \
|
||||||
|
--network-interface-id ${NETWORK_ID} \
|
||||||
|
--allocation-id ${ALLOCATION_ID} \
|
||||||
|
--private-ip-address ${PRIVATE_IP_ADDRESS}
|
||||||
|
RET=$?
|
||||||
|
else
|
||||||
|
- $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \
|
||||||
|
+ $AWSCLI_CMD ec2 associate-address \
|
||||||
|
--instance-id ${INSTANCE_ID} \
|
||||||
|
--allocation-id ${ALLOCATION_ID}
|
||||||
|
RET=$?
|
||||||
|
@@ -183,7 +205,7 @@ awseip_start() {
|
||||||
|
awseip_stop() {
|
||||||
|
awseip_monitor || return $OCF_SUCCESS
|
||||||
|
|
||||||
|
- ASSOCIATION_ID=$($AWSCLI --profile $OCF_RESKEY_profile --output json ec2 describe-addresses \
|
||||||
|
+ ASSOCIATION_ID=$($AWSCLI_CMD --output json ec2 describe-addresses \
|
||||||
|
--allocation-id ${ALLOCATION_ID} | grep -m 1 "AssociationId" | awk -F'"' '{print$4}')
|
||||||
|
|
||||||
|
if [ -z "${ASSOCIATION_ID}" ]; then
|
||||||
|
@@ -191,9 +213,7 @@ awseip_stop() {
|
||||||
|
return $OCF_NOT_RUNNING
|
||||||
|
fi
|
||||||
|
|
||||||
|
- $AWSCLI --profile ${OCF_RESKEY_profile} \
|
||||||
|
- ec2 disassociate-address \
|
||||||
|
- --association-id ${ASSOCIATION_ID}
|
||||||
|
+ $AWSCLI_CMD ec2 disassociate-address --association-id ${ASSOCIATION_ID}
|
||||||
|
RET=$?
|
||||||
|
|
||||||
|
# delay to avoid sending request too fast
|
||||||
|
@@ -208,7 +228,7 @@ awseip_stop() {
|
||||||
|
}
|
||||||
|
|
||||||
|
awseip_monitor() {
|
||||||
|
- $AWSCLI --profile $OCF_RESKEY_profile ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${ELASTIC_IP}"
|
||||||
|
+ $AWSCLI_CMD ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${ELASTIC_IP}"
|
||||||
|
RET=$?
|
||||||
|
|
||||||
|
if [ $RET -ne 0 ]; then
|
||||||
|
@@ -218,9 +238,9 @@ awseip_monitor() {
|
||||||
|
}
|
||||||
|
|
||||||
|
awseip_validate() {
|
||||||
|
- check_binary ${AWSCLI}
|
||||||
|
+ check_binary "${OCF_RESKEY_awscli}"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
ocf_exit_reason "profile parameter not set"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -238,9 +258,27 @@ case $__OCF_ACTION in
|
||||||
|
meta_data
|
||||||
|
exit $OCF_SUCCESS
|
||||||
|
;;
|
||||||
|
-esac
|
||||||
|
+ usage|help)
|
||||||
|
+ awseip_usage
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+esac
|
||||||
|
|
||||||
|
-AWSCLI="${OCF_RESKEY_awscli}"
|
||||||
|
+AWSCLI_CMD="${OCF_RESKEY_awscli}"
|
||||||
|
+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}"
|
||||||
|
+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then
|
||||||
|
+ if [ -z "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ ocf_exit_reason "region needs to be set when using role-based authentication"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+else
|
||||||
|
+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+fi
|
||||||
|
+if [ -n "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||||
|
+fi
|
||||||
|
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
|
||||||
|
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
||||||
|
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
||||||
|
@@ -272,10 +310,6 @@ case $__OCF_ACTION in
|
||||||
|
validate|validate-all)
|
||||||
|
awseip_validate
|
||||||
|
;;
|
||||||
|
- usage|help)
|
||||||
|
- awseip_usage
|
||||||
|
- exit $OCF_SUCCESS
|
||||||
|
- ;;
|
||||||
|
*)
|
||||||
|
awseip_usage
|
||||||
|
exit $OCF_ERR_UNIMPLEMENTED
|
||||||
|
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||||
|
index 037278e296..bdb4d68dd0 100755
|
||||||
|
--- a/heartbeat/awsvip
|
||||||
|
+++ b/heartbeat/awsvip
|
||||||
|
@@ -23,7 +23,8 @@
|
||||||
|
#
|
||||||
|
# Prerequisites:
|
||||||
|
#
|
||||||
|
-# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.)
|
||||||
|
+# - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.) or
|
||||||
|
+# (AWSRole) Setup up relevant AWS Policies to allow agent related functions to be executed.
|
||||||
|
# - a reserved secondary private IP address for EC2 instances high availablity
|
||||||
|
# - IAM user role with the following permissions:
|
||||||
|
# * DescribeInstances
|
||||||
|
@@ -43,11 +44,15 @@
|
||||||
|
# Defaults
|
||||||
|
#
|
||||||
|
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||||
|
+OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
+OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_api_delay_default="3"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
+: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
+: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||||
|
|
||||||
|
meta_data() {
|
||||||
|
@@ -62,7 +67,7 @@ Resource Agent for Amazon AWS Secondary Private IP Addresses.
|
||||||
|
|
||||||
|
It manages AWS Secondary Private IP Addresses with awscli.
|
||||||
|
|
||||||
|
-Credentials needs to be setup by running "aws configure".
|
||||||
|
+Credentials needs to be setup by running "aws configure", or by using AWS Policies.
|
||||||
|
|
||||||
|
See https://aws.amazon.com/cli/ for more information about awscli.
|
||||||
|
</longdesc>
|
||||||
|
@@ -78,6 +83,15 @@ command line tools for aws services
|
||||||
|
<content type="string" default="${OCF_RESKEY_awscli_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="auth_type">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Authentication type "key" for AccessKey and SecretAccessKey set via "aws configure",
|
||||||
|
+or "role" to use AWS Policies.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Authentication type</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_auth_type_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="profile">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
|
||||||
|
@@ -94,6 +108,14 @@ reserved secondary private ip for ec2 instance
|
||||||
|
<content type="string" default="" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="region" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Region for AWS resource (required for role-based authentication)
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Region</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_region_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="api_delay" unique="0">
|
||||||
|
<longdesc lang="en">
|
||||||
|
a short delay between API calls, to avoid sending API too quick
|
||||||
|
@@ -131,7 +153,7 @@ END
|
||||||
|
awsvip_start() {
|
||||||
|
awsvip_monitor && return $OCF_SUCCESS
|
||||||
|
|
||||||
|
- $AWSCLI --profile $OCF_RESKEY_profile ec2 assign-private-ip-addresses \
|
||||||
|
+ $AWSCLI_CMD ec2 assign-private-ip-addresses \
|
||||||
|
--network-interface-id ${NETWORK_ID} \
|
||||||
|
--private-ip-addresses ${SECONDARY_PRIVATE_IP} \
|
||||||
|
--allow-reassignment
|
||||||
|
@@ -151,7 +173,7 @@ awsvip_start() {
|
||||||
|
awsvip_stop() {
|
||||||
|
awsvip_monitor || return $OCF_SUCCESS
|
||||||
|
|
||||||
|
- $AWSCLI --profile $OCF_RESKEY_profile ec2 unassign-private-ip-addresses \
|
||||||
|
+ $AWSCLI_CMD ec2 unassign-private-ip-addresses \
|
||||||
|
--network-interface-id ${NETWORK_ID} \
|
||||||
|
--private-ip-addresses ${SECONDARY_PRIVATE_IP}
|
||||||
|
RET=$?
|
||||||
|
@@ -168,7 +190,7 @@ awsvip_stop() {
|
||||||
|
}
|
||||||
|
|
||||||
|
awsvip_monitor() {
|
||||||
|
- $AWSCLI --profile ${OCF_RESKEY_profile} ec2 describe-instances \
|
||||||
|
+ $AWSCLI_CMD ec2 describe-instances \
|
||||||
|
--instance-id "${INSTANCE_ID}" \
|
||||||
|
--query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
|
||||||
|
--output text | \
|
||||||
|
@@ -182,9 +204,9 @@ awsvip_monitor() {
|
||||||
|
}
|
||||||
|
|
||||||
|
awsvip_validate() {
|
||||||
|
- check_binary ${AWSCLI}
|
||||||
|
+ check_binary "${OCF_RESKEY_awscli}"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
+ if [ "x${OCF_RESKEY_auth_type}" = "xkey" ] && [ -z "$OCF_RESKEY_profile" ]; then
|
||||||
|
ocf_exit_reason "profile parameter not set"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -202,9 +224,27 @@ case $__OCF_ACTION in
|
||||||
|
meta_data
|
||||||
|
exit $OCF_SUCCESS
|
||||||
|
;;
|
||||||
|
+ usage|help)
|
||||||
|
+ awsvip_usage
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
-AWSCLI="${OCF_RESKEY_awscli}"
|
||||||
|
+AWSCLI_CMD="${OCF_RESKEY_awscli}"
|
||||||
|
+if [ "x${OCF_RESKEY_auth_type}" = "xkey" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --profile ${OCF_RESKEY_profile}"
|
||||||
|
+elif [ "x${OCF_RESKEY_auth_type}" = "xrole" ]; then
|
||||||
|
+ if [ -z "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ ocf_exit_reason "region needs to be set when using role-based authentication"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+else
|
||||||
|
+ ocf_exit_reason "Incorrect auth_type: ${OCF_RESKEY_auth_type}"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+fi
|
||||||
|
+if [ -n "${OCF_RESKEY_region}" ]; then
|
||||||
|
+ AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||||
|
+fi
|
||||||
|
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
||||||
|
TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
@@ -236,10 +276,6 @@ case $__OCF_ACTION in
|
||||||
|
validate|validate-all)
|
||||||
|
awsvip_validate
|
||||||
|
;;
|
||||||
|
- usage|help)
|
||||||
|
- awsvip_usage
|
||||||
|
- exit $OCF_SUCCESS
|
||||||
|
- ;;
|
||||||
|
*)
|
||||||
|
awsvip_usage
|
||||||
|
exit $OCF_ERR_UNIMPLEMENTED
|
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 20 Nov 2023 16:37:37 +0100
|
||||||
|
Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/findif.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/tools/findif.c b/tools/findif.c
|
||||||
|
index a25395fec..ab108a3c4 100644
|
||||||
|
--- a/tools/findif.c
|
||||||
|
+++ b/tools/findif.c
|
||||||
|
@@ -669,7 +669,7 @@ main(int argc, char ** argv) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- if (netmaskbits) {
|
||||||
|
+ if (netmaskbits != NULL && *netmaskbits != EOS) {
|
||||||
|
best_netmask = netmask;
|
||||||
|
}else if (best_netmask == 0L) {
|
||||||
|
/*
|
23
SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch
Normal file
23
SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
From a9c4aeb971e9f4963345d0e215b729def62dd27c Mon Sep 17 00:00:00 2001
|
||||||
|
From: pepadelic <162310096+pepadelic@users.noreply.github.com>
|
||||||
|
Date: Mon, 15 Apr 2024 13:52:54 +0200
|
||||||
|
Subject: [PATCH] Update db2: fix OCF_SUCESS name in db2_notify
|
||||||
|
|
||||||
|
fix OCF_SUCESS to OCF_SUCCESS in db2_notify
|
||||||
|
---
|
||||||
|
heartbeat/db2 | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||||
|
index 95447ab6cb..1cd66f15af 100755
|
||||||
|
--- a/heartbeat/db2
|
||||||
|
+++ b/heartbeat/db2
|
||||||
|
@@ -848,7 +848,7 @@ db2_notify() {
|
||||||
|
|
||||||
|
# only interested in pre-start
|
||||||
|
[ $OCF_RESKEY_CRM_meta_notify_type = pre \
|
||||||
|
- -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCESS
|
||||||
|
+ -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCCESS
|
||||||
|
|
||||||
|
# gets FIRST_ACTIVE_LOG
|
||||||
|
db2_get_cfg $dblist || return $?
|
343
SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch
Normal file
343
SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch
Normal file
@ -0,0 +1,343 @@
|
|||||||
|
From fc0657b936f6a58f741e33f851b22f82bc68bffa Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 6 Feb 2024 13:28:12 +0100
|
||||||
|
Subject: [PATCH 1/2] ocf-shellfuncs: add curl_retry()
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/ocf-shellfuncs.in | 34 ++++++++++++++++++++++++++++++++++
|
||||||
|
1 file changed, 34 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||||
|
index c5edb6f57..a69a9743d 100644
|
||||||
|
--- a/heartbeat/ocf-shellfuncs.in
|
||||||
|
+++ b/heartbeat/ocf-shellfuncs.in
|
||||||
|
@@ -672,6 +672,40 @@ EOF
|
||||||
|
systemctl daemon-reload
|
||||||
|
}
|
||||||
|
|
||||||
|
+# usage: curl_retry RETRIES SLEEP ARGS URL
|
||||||
|
+#
|
||||||
|
+# Use --show-error in ARGS to log HTTP error code
|
||||||
|
+#
|
||||||
|
+# returns:
|
||||||
|
+# 0 success
|
||||||
|
+# exit:
|
||||||
|
+# 1 fail
|
||||||
|
+curl_retry()
|
||||||
|
+{
|
||||||
|
+ local retries=$1 sleep=$2 opts=$3 url=$4
|
||||||
|
+ local tries=$(($retries + 1))
|
||||||
|
+ local args="--fail $opts $url"
|
||||||
|
+ local result rc
|
||||||
|
+
|
||||||
|
+ for try in $(seq $tries); do
|
||||||
|
+ ocf_log debug "curl $args try $try of $tries"
|
||||||
|
+ result=$(echo "$args" | xargs curl 2>&1)
|
||||||
|
+ rc=$?
|
||||||
|
+
|
||||||
|
+ ocf_log debug "result: $result"
|
||||||
|
+ [ $rc -eq 0 ] && break
|
||||||
|
+ sleep $sleep
|
||||||
|
+ done
|
||||||
|
+
|
||||||
|
+ if [ $rc -ne 0 ]; then
|
||||||
|
+ ocf_exit_reason "curl $args failed $tries tries"
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ echo "$result"
|
||||||
|
+ return $rc
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
# usage: crm_mon_no_validation args...
|
||||||
|
# run crm_mon without any cib schema validation
|
||||||
|
# This is useful when an agent runs in a bundle to avoid potential
|
||||||
|
|
||||||
|
From 80d330557319bdae9e45aad1279e435fc481d4e7 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 6 Feb 2024 13:28:25 +0100
|
||||||
|
Subject: [PATCH 2/2] AWS agents: use curl_retry()
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/aws-vpc-move-ip | 35 ++++++++++++++++++++++++++---------
|
||||||
|
heartbeat/aws-vpc-route53.in | 27 +++++++++++++++++++++++++--
|
||||||
|
heartbeat/awseip | 36 +++++++++++++++++++++++++++++++-----
|
||||||
|
heartbeat/awsvip | 32 ++++++++++++++++++++++++++++----
|
||||||
|
4 files changed, 110 insertions(+), 20 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||||
|
index 54806f6ea..6115e5ba8 100755
|
||||||
|
--- a/heartbeat/aws-vpc-move-ip
|
||||||
|
+++ b/heartbeat/aws-vpc-move-ip
|
||||||
|
@@ -47,6 +47,8 @@ OCF_RESKEY_interface_default="eth0"
|
||||||
|
OCF_RESKEY_iflabel_default=""
|
||||||
|
OCF_RESKEY_monapi_default="false"
|
||||||
|
OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
+OCF_RESKEY_curl_retries_default="3"
|
||||||
|
+OCF_RESKEY_curl_sleep_default="1"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
@@ -60,6 +62,8 @@ OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||||
|
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
||||||
|
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
|
||||||
|
@@ -194,6 +198,22 @@ Name of resource type to lookup in route table.
|
||||||
|
<content type="string" default="${OCF_RESKEY_lookup_type_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="curl_retries" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl retries before failing
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl retries</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curl_sleep" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl sleep between tries
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
@@ -250,8 +270,10 @@ ec2ip_validate() {
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
- EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||||
|
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+ EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||||
|
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
|
||||||
|
if [ -z "${EC2_INSTANCE_ID}" ]; then
|
||||||
|
ocf_exit_reason "Instance ID not found. Is this a EC2 instance?"
|
||||||
|
@@ -365,14 +387,9 @@ ec2ip_get_instance_eni() {
|
||||||
|
fi
|
||||||
|
ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
||||||
|
|
||||||
|
- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\""
|
||||||
|
- ocf_log debug "executing command: $cmd"
|
||||||
|
+ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\""
|
||||||
|
EC2_NETWORK_INTERFACE_ID="$(eval $cmd)"
|
||||||
|
- rc=$?
|
||||||
|
- if [ $rc != 0 ]; then
|
||||||
|
- ocf_log warn "command failed, rc: $rc"
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
- fi
|
||||||
|
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
|
||||||
|
echo $EC2_NETWORK_INTERFACE_ID
|
||||||
|
}
|
||||||
|
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||||
|
index 18ab157e8..eba2ed95c 100644
|
||||||
|
--- a/heartbeat/aws-vpc-route53.in
|
||||||
|
+++ b/heartbeat/aws-vpc-route53.in
|
||||||
|
@@ -53,6 +53,8 @@ OCF_RESKEY_hostedzoneid_default=""
|
||||||
|
OCF_RESKEY_fullname_default=""
|
||||||
|
OCF_RESKEY_ip_default="local"
|
||||||
|
OCF_RESKEY_ttl_default=10
|
||||||
|
+OCF_RESKEY_curl_retries_default="3"
|
||||||
|
+OCF_RESKEY_curl_sleep_default="1"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
@@ -62,6 +64,8 @@ OCF_RESKEY_ttl_default=10
|
||||||
|
: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}}
|
||||||
|
: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}}
|
||||||
|
: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<-EOT
|
||||||
|
@@ -185,6 +189,22 @@ Time to live for Route53 ARECORD
|
||||||
|
<shortdesc lang="en">ARECORD TTL</shortdesc>
|
||||||
|
<content type="string" default="${OCF_RESKEY_ttl_default}" />
|
||||||
|
</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curl_retries" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl retries before failing
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl retries</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curl_sleep" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl sleep between tries
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||||
|
+</parameter>
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
@@ -357,8 +377,11 @@ r53_monitor() {
|
||||||
|
_get_ip() {
|
||||||
|
case $OCF_RESKEY_ip in
|
||||||
|
local|public)
|
||||||
|
- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
- IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");;
|
||||||
|
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||||
|
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+ IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4")
|
||||||
|
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+ ;;
|
||||||
|
*.*.*.*)
|
||||||
|
IPADDRESS="${OCF_RESKEY_ip}";;
|
||||||
|
esac
|
||||||
|
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||||
|
index 49b0ca615..ffb6223a1 100755
|
||||||
|
--- a/heartbeat/awseip
|
||||||
|
+++ b/heartbeat/awseip
|
||||||
|
@@ -49,12 +49,16 @@ OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_api_delay_default="3"
|
||||||
|
+OCF_RESKEY_curl_retries_default="3"
|
||||||
|
+OCF_RESKEY_curl_sleep_default="1"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||||
|
|
||||||
|
meta_data() {
|
||||||
|
cat <<END
|
||||||
|
@@ -141,6 +145,22 @@ a short delay between API calls, to avoid sending API too quick
|
||||||
|
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="curl_retries" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl retries before failing
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl retries</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curl_sleep" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl sleep between tries
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
@@ -171,14 +191,18 @@ awseip_start() {
|
||||||
|
awseip_monitor && return $OCF_SUCCESS
|
||||||
|
|
||||||
|
if [ -n "${PRIVATE_IP_ADDRESS}" ]; then
|
||||||
|
- NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
+ NETWORK_INTERFACES_MACS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/")
|
||||||
|
for MAC in ${NETWORK_INTERFACES_MACS}; do
|
||||||
|
- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" |
|
||||||
|
+ curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/local-ipv4s" |
|
||||||
|
grep -q "^${PRIVATE_IP_ADDRESS}$"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
- NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
+ NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/interface-id")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
+ if [ -z "$NETWORK_ID" ]; then
|
||||||
|
+ ocf_exit_reason "Could not find network interface for private_ip_address: $PRIVATE_IP_ADDRESS"
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
$AWSCLI_CMD ec2 associate-address \
|
||||||
|
--network-interface-id ${NETWORK_ID} \
|
||||||
|
--allocation-id ${ALLOCATION_ID} \
|
||||||
|
@@ -282,8 +306,10 @@ fi
|
||||||
|
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
|
||||||
|
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
||||||
|
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
||||||
|
-TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
-INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
|
||||||
|
case $__OCF_ACTION in
|
||||||
|
start)
|
||||||
|
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||||
|
index bdb4d68dd..f2b238a0f 100755
|
||||||
|
--- a/heartbeat/awsvip
|
||||||
|
+++ b/heartbeat/awsvip
|
||||||
|
@@ -48,12 +48,16 @@ OCF_RESKEY_auth_type_default="key"
|
||||||
|
OCF_RESKEY_profile_default="default"
|
||||||
|
OCF_RESKEY_region_default=""
|
||||||
|
OCF_RESKEY_api_delay_default="3"
|
||||||
|
+OCF_RESKEY_curl_retries_default="3"
|
||||||
|
+OCF_RESKEY_curl_sleep_default="1"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||||
|
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||||
|
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||||
|
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||||
|
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||||
|
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||||
|
|
||||||
|
meta_data() {
|
||||||
|
cat <<END
|
||||||
|
@@ -124,6 +128,22 @@ a short delay between API calls, to avoid sending API too quick
|
||||||
|
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="curl_retries" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl retries before failing
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl retries</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curl_sleep" unique="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+curl sleep between tries
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||||
|
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
@@ -246,10 +266,14 @@ if [ -n "${OCF_RESKEY_region}" ]; then
|
||||||
|
AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||||
|
fi
|
||||||
|
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
||||||
|
-TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
-INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
-MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
-NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
+NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id")
|
||||||
|
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||||
|
|
||||||
|
case $__OCF_ACTION in
|
||||||
|
start)
|
@ -0,0 +1,48 @@
|
|||||||
|
From accff72ecc2f6cf5a76d9570198a93ac7c90270e Mon Sep 17 00:00:00 2001
|
||||||
|
From: Quentin Pradet <quentin.pradet@gmail.com>
|
||||||
|
Date: Mon, 17 Jun 2024 11:09:06 +0400
|
||||||
|
Subject: [PATCH] Merge pull request from GHSA-34jh-p97f-mpxf
|
||||||
|
|
||||||
|
* Strip Proxy-Authorization header on redirects
|
||||||
|
|
||||||
|
* Fix test_retry_default_remove_headers_on_redirect
|
||||||
|
|
||||||
|
* Set release date
|
||||||
|
---
|
||||||
|
CHANGES.rst | 5 +++++
|
||||||
|
src/urllib3/util/retry.py | 4 +++-
|
||||||
|
test/test_retry.py | 6 ++++-
|
||||||
|
test/with_dummyserver/test_poolmanager.py | 27 ++++++++++++++++++++---
|
||||||
|
4 files changed, 37 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||||
|
index 7a76a4a6ad..0456cceba4 100644
|
||||||
|
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||||
|
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||||
|
@@ -189,7 +189,9 @@ class Retry:
|
||||||
|
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||||
|
|
||||||
|
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||||
|
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||||
|
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||||
|
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||||
|
+ )
|
||||||
|
|
||||||
|
#: Default maximum backoff time.
|
||||||
|
DEFAULT_BACKOFF_MAX = 120
|
||||||
|
|
||||||
|
diff --git a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||||
|
index 7a76a4a6ad..0456cceba4 100644
|
||||||
|
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||||
|
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||||
|
@@ -189,7 +189,9 @@ class Retry:
|
||||||
|
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||||
|
|
||||||
|
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||||
|
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||||
|
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||||
|
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||||
|
+ )
|
||||||
|
|
||||||
|
#: Default maximum backoff time.
|
||||||
|
DEFAULT_BACKOFF_MAX = 120
|
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
--- a/setuptools/package_index.py 1980-01-01 09:00:00.000000000 +0100
|
||||||
|
+++ b/setuptools/package_index.py 2024-07-25 10:11:40.537307665 +0200
|
||||||
|
@@ -1,5 +1,6 @@
|
||||||
|
"""PyPI and direct package downloading"""
|
||||||
|
import sys
|
||||||
|
+import subprocess
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
@@ -563,7 +564,7 @@
|
||||||
|
scheme = URL_SCHEME(spec)
|
||||||
|
if scheme:
|
||||||
|
# It's a url, download it to tmpdir
|
||||||
|
- found = self._download_url(scheme.group(1), spec, tmpdir)
|
||||||
|
+ found = self._download_url(spec, tmpdir)
|
||||||
|
base, fragment = egg_info_for_url(spec)
|
||||||
|
if base.endswith('.py'):
|
||||||
|
found = self.gen_setup(found, fragment, tmpdir)
|
||||||
|
@@ -775,7 +776,7 @@
|
||||||
|
raise DistutilsError("Download error for %s: %s"
|
||||||
|
% (url, v))
|
||||||
|
|
||||||
|
- def _download_url(self, scheme, url, tmpdir):
|
||||||
|
+ def _download_url(self, url, tmpdir):
|
||||||
|
# Determine download filename
|
||||||
|
#
|
||||||
|
name, fragment = egg_info_for_url(url)
|
||||||
|
@@ -790,19 +791,59 @@
|
||||||
|
|
||||||
|
filename = os.path.join(tmpdir, name)
|
||||||
|
|
||||||
|
- # Download the file
|
||||||
|
- #
|
||||||
|
- if scheme == 'svn' or scheme.startswith('svn+'):
|
||||||
|
- return self._download_svn(url, filename)
|
||||||
|
- elif scheme == 'git' or scheme.startswith('git+'):
|
||||||
|
- return self._download_git(url, filename)
|
||||||
|
- elif scheme.startswith('hg+'):
|
||||||
|
- return self._download_hg(url, filename)
|
||||||
|
- elif scheme == 'file':
|
||||||
|
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
|
||||||
|
- else:
|
||||||
|
- self.url_ok(url, True) # raises error if not allowed
|
||||||
|
- return self._attempt_download(url, filename)
|
||||||
|
+ return self._download_vcs(url, filename) or self._download_other(url, filename)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def _resolve_vcs(url):
|
||||||
|
+ """
|
||||||
|
+ >>> rvcs = PackageIndex._resolve_vcs
|
||||||
|
+ >>> rvcs('git+http://foo/bar')
|
||||||
|
+ 'git'
|
||||||
|
+ >>> rvcs('hg+https://foo/bar')
|
||||||
|
+ 'hg'
|
||||||
|
+ >>> rvcs('git:myhost')
|
||||||
|
+ 'git'
|
||||||
|
+ >>> rvcs('hg:myhost')
|
||||||
|
+ >>> rvcs('http://foo/bar')
|
||||||
|
+ """
|
||||||
|
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||||
|
+ pre, sep, post = scheme.partition('+')
|
||||||
|
+ # svn and git have their own protocol; hg does not
|
||||||
|
+ allowed = set(['svn', 'git'] + ['hg'] * bool(sep))
|
||||||
|
+ return next(iter({pre} & allowed), None)
|
||||||
|
+
|
||||||
|
+ def _download_vcs(self, url, spec_filename):
|
||||||
|
+ vcs = self._resolve_vcs(url)
|
||||||
|
+ if not vcs:
|
||||||
|
+ return
|
||||||
|
+ if vcs == 'svn':
|
||||||
|
+ raise DistutilsError(
|
||||||
|
+ f"Invalid config, SVN download is not supported: {url}"
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ filename, _, _ = spec_filename.partition('#')
|
||||||
|
+ url, rev = self._vcs_split_rev_from_url(url)
|
||||||
|
+
|
||||||
|
+ self.info(f"Doing {vcs} clone from {url} to {filename}")
|
||||||
|
+ subprocess.check_call([vcs, 'clone', '--quiet', url, filename])
|
||||||
|
+
|
||||||
|
+ co_commands = dict(
|
||||||
|
+ git=[vcs, '-C', filename, 'checkout', '--quiet', rev],
|
||||||
|
+ hg=[vcs, '--cwd', filename, 'up', '-C', '-r', rev, '-q'],
|
||||||
|
+ )
|
||||||
|
+ if rev is not None:
|
||||||
|
+ self.info(f"Checking out {rev}")
|
||||||
|
+ subprocess.check_call(co_commands[vcs])
|
||||||
|
+
|
||||||
|
+ return filename
|
||||||
|
+
|
||||||
|
+ def _download_other(self, url, filename):
|
||||||
|
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||||
|
+ if scheme == 'file': # pragma: no cover
|
||||||
|
+ return urllib.request.url2pathname(urllib.parse.urlparse(url).path)
|
||||||
|
+ # raise error if not allowed
|
||||||
|
+ self.url_ok(url, True)
|
||||||
|
+ return self._attempt_download(url, filename)
|
||||||
|
|
||||||
|
def scan_url(self, url):
|
||||||
|
self.process_url(url, True)
|
||||||
|
@@ -829,76 +870,37 @@
|
||||||
|
os.unlink(filename)
|
||||||
|
raise DistutilsError("Unexpected HTML page found at " + url)
|
||||||
|
|
||||||
|
- def _download_svn(self, url, filename):
|
||||||
|
- url = url.split('#', 1)[0] # remove any fragment for svn's sake
|
||||||
|
- creds = ''
|
||||||
|
- if url.lower().startswith('svn:') and '@' in url:
|
||||||
|
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
|
||||||
|
- if not netloc and path.startswith('//') and '/' in path[2:]:
|
||||||
|
- netloc, path = path[2:].split('/', 1)
|
||||||
|
- auth, host = splituser(netloc)
|
||||||
|
- if auth:
|
||||||
|
- if ':' in auth:
|
||||||
|
- user, pw = auth.split(':', 1)
|
||||||
|
- creds = " --username=%s --password=%s" % (user, pw)
|
||||||
|
- else:
|
||||||
|
- creds = " --username=" + auth
|
||||||
|
- netloc = host
|
||||||
|
- parts = scheme, netloc, url, p, q, f
|
||||||
|
- url = urllib.parse.urlunparse(parts)
|
||||||
|
- self.info("Doing subversion checkout from %s to %s", url, filename)
|
||||||
|
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
|
||||||
|
- return filename
|
||||||
|
-
|
||||||
|
@staticmethod
|
||||||
|
- def _vcs_split_rev_from_url(url, pop_prefix=False):
|
||||||
|
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
|
||||||
|
-
|
||||||
|
- scheme = scheme.split('+', 1)[-1]
|
||||||
|
-
|
||||||
|
- # Some fragment identification fails
|
||||||
|
- path = path.split('#', 1)[0]
|
||||||
|
-
|
||||||
|
- rev = None
|
||||||
|
- if '@' in path:
|
||||||
|
- path, rev = path.rsplit('@', 1)
|
||||||
|
-
|
||||||
|
- # Also, discard fragment
|
||||||
|
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
|
||||||
|
-
|
||||||
|
- return url, rev
|
||||||
|
-
|
||||||
|
- def _download_git(self, url, filename):
|
||||||
|
- filename = filename.split('#', 1)[0]
|
||||||
|
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||||
|
-
|
||||||
|
- self.info("Doing git clone from %s to %s", url, filename)
|
||||||
|
- os.system("git clone --quiet %s %s" % (url, filename))
|
||||||
|
+ def _vcs_split_rev_from_url(url):
|
||||||
|
+ """
|
||||||
|
+ Given a possible VCS URL, return a clean URL and resolved revision if any.
|
||||||
|
|
||||||
|
- if rev is not None:
|
||||||
|
- self.info("Checking out %s", rev)
|
||||||
|
- os.system("(cd %s && git checkout --quiet %s)" % (
|
||||||
|
- filename,
|
||||||
|
- rev,
|
||||||
|
- ))
|
||||||
|
+ >>> vsrfu = PackageIndex._vcs_split_rev_from_url
|
||||||
|
+ >>> vsrfu('git+https://github.com/pypa/setuptools@v69.0.0#egg-info=setuptools')
|
||||||
|
+ ('https://github.com/pypa/setuptools', 'v69.0.0')
|
||||||
|
+ >>> vsrfu('git+https://github.com/pypa/setuptools#egg-info=setuptools')
|
||||||
|
+ ('https://github.com/pypa/setuptools', None)
|
||||||
|
+ >>> vsrfu('http://foo/bar')
|
||||||
|
+ ('http://foo/bar', None)
|
||||||
|
+ """
|
||||||
|
+ parts = urllib.parse.urlsplit(url)
|
||||||
|
|
||||||
|
- return filename
|
||||||
|
+ clean_scheme = parts.scheme.split('+', 1)[-1]
|
||||||
|
|
||||||
|
- def _download_hg(self, url, filename):
|
||||||
|
- filename = filename.split('#', 1)[0]
|
||||||
|
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||||
|
+ # Some fragment identification fails
|
||||||
|
+ no_fragment_path, _, _ = parts.path.partition('#')
|
||||||
|
|
||||||
|
- self.info("Doing hg clone from %s to %s", url, filename)
|
||||||
|
- os.system("hg clone --quiet %s %s" % (url, filename))
|
||||||
|
+ pre, sep, post = no_fragment_path.rpartition('@')
|
||||||
|
+ clean_path, rev = (pre, post) if sep else (post, None)
|
||||||
|
|
||||||
|
- if rev is not None:
|
||||||
|
- self.info("Updating to %s", rev)
|
||||||
|
- os.system("(cd %s && hg up -C -r %s >&-)" % (
|
||||||
|
- filename,
|
||||||
|
- rev,
|
||||||
|
- ))
|
||||||
|
+ resolved = parts._replace(
|
||||||
|
+ scheme=clean_scheme,
|
||||||
|
+ path=clean_path,
|
||||||
|
+ # discard the fragment
|
||||||
|
+ fragment='',
|
||||||
|
+ ).geturl()
|
||||||
|
|
||||||
|
- return filename
|
||||||
|
+ return resolved, rev
|
||||||
|
|
||||||
|
def debug(self, msg, *args):
|
||||||
|
log.debug(msg, *args)
|
@ -0,0 +1,38 @@
|
|||||||
|
From 38eaf00bc81af7530c56eba282918762a47a9326 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 19 Sep 2024 13:01:53 +0200
|
||||||
|
Subject: [PATCH] nfsserver: also stop rpc-statd for nfsv4_only to avoid stop
|
||||||
|
failing in some cases
|
||||||
|
|
||||||
|
E.g. nfs_no_notify=true nfsv4_only=true nfs_shared_infodir=/nfsmq/nfsinfo would cause a "Failed to unmount a bind mount" error
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 16 +++++++---------
|
||||||
|
1 file changed, 7 insertions(+), 9 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 5793d7a70..fd9268afc 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -947,15 +947,13 @@ nfsserver_stop ()
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
- if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
- nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||||
|
- ocf_log info "Stop: rpc-statd"
|
||||||
|
- rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- ocf_exit_reason "Failed to stop rpc-statd"
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
- fi
|
||||||
|
+ nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||||
|
+ ocf_log info "Stop: rpc-statd"
|
||||||
|
+ rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ ocf_exit_reason "Failed to stop rpc-statd"
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
nfs_exec stop nfs-idmapd > /dev/null 2>&1
|
@ -0,0 +1,22 @@
|
|||||||
|
From 4b09b3e467a7f8076bbf20f5b027efecf16303e7 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Gianluca Piccolo <gianluca.piccolo@wuerth-phoenix.com>
|
||||||
|
Date: Thu, 6 Jun 2024 17:34:41 +0200
|
||||||
|
Subject: [PATCH] Fix #1944
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index a445349b9..59b6c1b51 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -664,7 +664,7 @@ get_pids()
|
||||||
|
if [ "X${HOSTOS}" = "XOpenBSD" ];then
|
||||||
|
fstat | grep $dir | awk '{print $3}'
|
||||||
|
else
|
||||||
|
- $FUSER -m $dir 2>/dev/null
|
||||||
|
+ $FUSER -Mm $dir 2>/dev/null
|
||||||
|
fi
|
||||||
|
elif [ "$FORCE_UNMOUNT" = "safe" ]; then
|
||||||
|
procs=$(find /proc/[0-9]*/ -type l -lname "${dir}/*" -or -lname "${dir}" 2>/dev/null | awk -F/ '{print $3}')
|
26
SOURCES/RHEL-69297-2-Filesystem-update-bsd-logic.patch
Normal file
26
SOURCES/RHEL-69297-2-Filesystem-update-bsd-logic.patch
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
From c9ba6ac66ee27a70c69e1156f17aa6beac277bc5 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Fri, 7 Jun 2024 14:23:28 +0200
|
||||||
|
Subject: [PATCH] Filesystem: use fuser -c on FreeBSD, as -m and -M are used
|
||||||
|
for other functionality
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 4 +++-
|
||||||
|
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 59b6c1b51..88fe2e2eb 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -661,8 +661,10 @@ get_pids()
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ocf_is_true "$FORCE_UNMOUNT"; then
|
||||||
|
- if [ "X${HOSTOS}" = "XOpenBSD" ];then
|
||||||
|
+ if [ "X${HOSTOS}" = "XOpenBSD" ]; then
|
||||||
|
fstat | grep $dir | awk '{print $3}'
|
||||||
|
+ elif [ "X${HOSTOS}" = "XFreeBSD" ]; then
|
||||||
|
+ $FUSER -c $dir 2>/dev/null
|
||||||
|
else
|
||||||
|
$FUSER -Mm $dir 2>/dev/null
|
||||||
|
fi
|
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200
|
||||||
|
+++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200
|
||||||
|
@@ -35,10 +35,10 @@
|
||||||
|
USAGE="usage: $0 {start|stop|status|meta-data}";
|
||||||
|
|
||||||
|
if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then
|
||||||
|
- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||||
|
+ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
-if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||||
|
+if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli-ra' ] || [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||||
|
OUTPUT="text"
|
||||||
|
EXECUTING='{ print $3 }'
|
||||||
|
IFS_=" "
|
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
@ -0,0 +1,398 @@
|
|||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100
|
||||||
|
@@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
def getFileName(self,keyValues):
|
||||||
|
filename = None
|
||||||
|
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||||
|
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||||
|
filename = keyValues['--filename'][0]
|
||||||
|
else:
|
||||||
|
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100
|
||||||
|
@@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
def getFileName(self,keyValues):
|
||||||
|
filename = None
|
||||||
|
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||||
|
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||||
|
filename = keyValues['--filename'][0]
|
||||||
|
else:
|
||||||
|
print("A profile is needed! please use \'--filename\' and add the profile name.")
|
||||||
|
@@ -21,7 +21,7 @@
|
||||||
|
|
||||||
|
def getInstanceCount(self,keyValues):
|
||||||
|
count = 1
|
||||||
|
- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0:
|
||||||
|
+ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0:
|
||||||
|
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
|
||||||
|
count = keyValues['--instancecount'][0]
|
||||||
|
else:
|
||||||
|
@@ -113,7 +113,7 @@
|
||||||
|
|
||||||
|
def isAllocatePublicIpAddress(self,keyValues):
|
||||||
|
_publicIp = False
|
||||||
|
- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0:
|
||||||
|
+ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0:
|
||||||
|
if keyValues['--allocatepublicip'][0] == "yes":
|
||||||
|
_publicIp = True
|
||||||
|
return _publicIp
|
||||||
|
@@ -125,7 +125,7 @@
|
||||||
|
'''
|
||||||
|
data = json.loads(jsonbody)
|
||||||
|
'''
|
||||||
|
- if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
|
||||||
|
+ if 'InstanceId' in data and len(data['InstanceId']) > 0:
|
||||||
|
instanceId = data['InstanceId']
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100
|
||||||
|
@@ -38,7 +38,7 @@
|
||||||
|
|
||||||
|
def getFileName(self,keyValues):
|
||||||
|
filename = None
|
||||||
|
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||||
|
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||||
|
filename = keyValues['--filename'][0]
|
||||||
|
else:
|
||||||
|
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100
|
||||||
|
@@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
def getFileName(self,keyValues):
|
||||||
|
filename = None
|
||||||
|
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||||
|
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||||
|
filename = keyValues['--filename'][0]
|
||||||
|
else:
|
||||||
|
return filename, "A filename is needed! please use \'--filename\' and add the file name."
|
||||||
|
@@ -21,7 +21,7 @@
|
||||||
|
def getInstanceCount(self,keyValues):
|
||||||
|
count = 1
|
||||||
|
import_count = "--count"
|
||||||
|
- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0:
|
||||||
|
+ if import_count in keyValues and len(keyValues[import_count]) > 0:
|
||||||
|
if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0:
|
||||||
|
count = keyValues[import_count][0]
|
||||||
|
else:
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100
|
||||||
|
@@ -17,37 +17,37 @@
|
||||||
|
|
||||||
|
def getConfigHandlerOptions(self):
|
||||||
|
return [ConfigCmd.name]
|
||||||
|
-
|
||||||
|
+
|
||||||
|
def showConfig(self):
|
||||||
|
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
|
||||||
|
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
|
||||||
|
config = dict()
|
||||||
|
configContent = dict()
|
||||||
|
- credentialsContent = dict ()
|
||||||
|
- if os.path.exists(_configurePath):
|
||||||
|
+ credentialsContent = dict ()
|
||||||
|
+ if os.path.exists(_configurePath):
|
||||||
|
for line in open(_configurePath):
|
||||||
|
line = line.strip('\n')
|
||||||
|
if line.find('=') > 0:
|
||||||
|
list = line.split("=",1)
|
||||||
|
- configContent[list[0]] = list[1]
|
||||||
|
- else:
|
||||||
|
- pass
|
||||||
|
- config['configure'] = configContent
|
||||||
|
- if os.path.exists(_credentialsPath):
|
||||||
|
- for line in open(_credentialsPath):
|
||||||
|
+ configContent[list[0]] = list[1]
|
||||||
|
+ else:
|
||||||
|
+ pass
|
||||||
|
+ config['configure'] = configContent
|
||||||
|
+ if os.path.exists(_credentialsPath):
|
||||||
|
+ for line in open(_credentialsPath):
|
||||||
|
line = line.strip('\n')
|
||||||
|
if line.find('=') > 0:
|
||||||
|
list = line.split("=",1)
|
||||||
|
- credentialsContent[list[0]] = list[1]
|
||||||
|
- else:
|
||||||
|
- pass
|
||||||
|
- config ['credentials'] = credentialsContent
|
||||||
|
- response.display_response("showConfigure",config,'table')
|
||||||
|
+ credentialsContent[list[0]] = list[1]
|
||||||
|
+ else:
|
||||||
|
+ pass
|
||||||
|
+ config ['credentials'] = credentialsContent
|
||||||
|
+ response.display_response("showConfigure",config,'table')
|
||||||
|
def importConfig():
|
||||||
|
pass
|
||||||
|
def exportConfig():
|
||||||
|
pass
|
||||||
|
-
|
||||||
|
+
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100
|
||||||
|
@@ -20,7 +20,7 @@
|
||||||
|
def handleProfileCmd(self, cmd, keyValues):
|
||||||
|
if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right
|
||||||
|
#check --name is valid
|
||||||
|
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||||
|
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||||
|
_value = keyValues[ProfileCmd.name][0] # use the first value
|
||||||
|
self.extensionCliHandler.setUserProfile(_value)
|
||||||
|
else:
|
||||||
|
@@ -34,7 +34,7 @@
|
||||||
|
newProfileName = ''
|
||||||
|
if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right
|
||||||
|
#check --name is valid
|
||||||
|
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||||
|
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||||
|
_value = keyValues[ProfileCmd.name][0] # check the first value
|
||||||
|
# only input key and secret
|
||||||
|
newProfileName = _value
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100
|
||||||
|
@@ -137,9 +137,9 @@
|
||||||
|
values.append(self.args[index])
|
||||||
|
index = index + 1
|
||||||
|
keyValues[currentValue] = values
|
||||||
|
- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
|
||||||
|
+ if keystr in keyValues and keyValues[keystr].__len__() > 0:
|
||||||
|
_key = keyValues[keystr][0]
|
||||||
|
- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
|
||||||
|
+ if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
|
||||||
|
_secret = keyValues[secretstr][0]
|
||||||
|
#print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
|
||||||
|
return _key, _secret
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100
|
||||||
|
@@ -19,8 +19,9 @@
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
-reload(sys)
|
||||||
|
-sys.setdefaultencoding('utf-8')
|
||||||
|
+if sys.version_info[0] < 3:
|
||||||
|
+ reload(sys)
|
||||||
|
+ sys.setdefaultencoding('utf-8')
|
||||||
|
__author__ = 'xixi.xxx'
|
||||||
|
import aliyunCliMain
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100
|
||||||
|
@@ -18,7 +18,7 @@
|
||||||
|
'''
|
||||||
|
|
||||||
|
import aliyunCliConfiugre
|
||||||
|
-import urllib2
|
||||||
|
+import urllib3
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
@@ -151,7 +151,7 @@
|
||||||
|
# this functino will get the latest version
|
||||||
|
def _getLatestTimeFromServer(self):
|
||||||
|
try:
|
||||||
|
- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||||
|
+ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||||
|
s = f.read()
|
||||||
|
return s
|
||||||
|
except Exception as e:
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100
|
||||||
|
@@ -26,7 +26,7 @@
|
||||||
|
import aliyunSdkConfigure
|
||||||
|
import json
|
||||||
|
import cliError
|
||||||
|
-import urllib2
|
||||||
|
+import urllib3
|
||||||
|
import handleEndPoint
|
||||||
|
|
||||||
|
from __init__ import __version__
|
||||||
|
@@ -259,7 +259,7 @@
|
||||||
|
def changeEndPoint(self, classname, keyValues):
|
||||||
|
endpoint = "Endpoint"
|
||||||
|
try:
|
||||||
|
- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0:
|
||||||
|
+ if endpoint in keyValues and keyValues[endpoint].__len__() > 0:
|
||||||
|
classname._RestApi__domain = keyValues[endpoint][0]
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
@@ -444,10 +444,10 @@
|
||||||
|
|
||||||
|
def getTempVersion(self,keyValues):
|
||||||
|
key='--version'
|
||||||
|
- if keyValues is not None and keyValues.has_key(key):
|
||||||
|
+ if keyValues is not None and key in keyValues:
|
||||||
|
return keyValues.get(key)
|
||||||
|
key = 'version'
|
||||||
|
- if keyValues is not None and keyValues.has_key(key):
|
||||||
|
+ if keyValues is not None and key in keyValues:
|
||||||
|
return keyValues.get(key)
|
||||||
|
|
||||||
|
def getVersionFromFile(self,cmd):
|
||||||
|
@@ -513,7 +513,7 @@
|
||||||
|
self.checkForServer(response,cmd,operation)
|
||||||
|
def getRequestId(self,response):
|
||||||
|
try:
|
||||||
|
- if response.has_key('RequestId') and len(response['RequestId']) > 0:
|
||||||
|
+ if 'RequestId' in response and len(response['RequestId']) > 0:
|
||||||
|
requestId = response['RequestId']
|
||||||
|
return requestId
|
||||||
|
except Exception:
|
||||||
|
@@ -532,7 +532,7 @@
|
||||||
|
ua = ""
|
||||||
|
url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation
|
||||||
|
try:
|
||||||
|
- f = urllib2.urlopen(url,data=None,timeout=5)
|
||||||
|
+ f = urllib3.urlopen(url,data=None,timeout=5)
|
||||||
|
s = f.read()
|
||||||
|
return s
|
||||||
|
except Exception :
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100
|
||||||
|
@@ -39,7 +39,7 @@
|
||||||
|
|
||||||
|
def sdkConfigure(self,cmd,operation):
|
||||||
|
keyValues = self.parser._getKeyValues()
|
||||||
|
- if keyValues.has_key('--version') and len(keyValues['--version']) > 0:
|
||||||
|
+ if '--version' in keyValues and len(keyValues['--version']) > 0:
|
||||||
|
version=keyValues['--version'][0]
|
||||||
|
filename=self.fileName
|
||||||
|
self.writeCmdVersionToFile(cmd,version,filename)
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100
|
||||||
|
@@ -23,6 +23,8 @@
|
||||||
|
import aliyunCliParser
|
||||||
|
import platform
|
||||||
|
|
||||||
|
+if sys.version_info[0] > 2:
|
||||||
|
+ raw_input = input
|
||||||
|
|
||||||
|
OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
|
||||||
|
OSS_CONFIG_SECTION = 'OSSCredentials'
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100
|
||||||
|
@@ -19,7 +19,7 @@
|
||||||
|
#/usr/bin/env python
|
||||||
|
#!-*- coding:utf-8 -*-
|
||||||
|
import os
|
||||||
|
-import urllib2
|
||||||
|
+import urllib3
|
||||||
|
import cliError
|
||||||
|
|
||||||
|
|
||||||
|
@@ -64,9 +64,9 @@
|
||||||
|
print(e)
|
||||||
|
def _getParamFromUrl(prefix,value,mode):
|
||||||
|
|
||||||
|
- req = urllib2.Request(value)
|
||||||
|
+ req = urllib3.Request(value)
|
||||||
|
try:
|
||||||
|
- response=urllib2.urlopen(req)
|
||||||
|
+ response=urllib3.urlopen(req)
|
||||||
|
if response.getcode() == 200:
|
||||||
|
return response.read()
|
||||||
|
else:
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100
|
||||||
|
@@ -340,8 +340,8 @@
|
||||||
|
|
||||||
|
|
||||||
|
_urllib_error_moved_attributes = [
|
||||||
|
- MovedAttribute("URLError", "urllib2", "urllib.error"),
|
||||||
|
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
|
||||||
|
+ MovedAttribute("URLError", "urllib3", "urllib.error"),
|
||||||
|
+ MovedAttribute("HTTPError", "urllib3", "urllib.error"),
|
||||||
|
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
|
||||||
|
]
|
||||||
|
for attr in _urllib_error_moved_attributes:
|
||||||
|
@@ -359,34 +359,34 @@
|
||||||
|
|
||||||
|
|
||||||
|
_urllib_request_moved_attributes = [
|
||||||
|
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
|
||||||
|
+ MovedAttribute("urlopen", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("install_opener", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("build_opener", "urllib3", "urllib.request"),
|
||||||
|
MovedAttribute("pathname2url", "urllib", "urllib.request"),
|
||||||
|
MovedAttribute("url2pathname", "urllib", "urllib.request"),
|
||||||
|
MovedAttribute("getproxies", "urllib", "urllib.request"),
|
||||||
|
- MovedAttribute("Request", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
|
||||||
|
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
|
||||||
|
+ MovedAttribute("Request", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("BaseHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("FileHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("FTPHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"),
|
||||||
|
+ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"),
|
||||||
|
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
|
||||||
|
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
|
||||||
|
MovedAttribute("URLopener", "urllib", "urllib.request"),
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100
|
||||||
|
@@ -24,7 +24,7 @@
|
||||||
|
|
||||||
|
install_requires = [
|
||||||
|
'colorama>=0.2.5,<=0.3.3',
|
||||||
|
- 'jmespath>=0.7.0,<=0.7.1',
|
||||||
|
+ 'jmespath>=0.7.0',
|
||||||
|
]
|
||||||
|
def main():
|
||||||
|
setup(
|
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 1980-01-01 09:00:00.000000000 +0100
|
||||||
|
+++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2021-10-14 11:30:17.726138166 +0200
|
||||||
|
@@ -128,6 +128,11 @@
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
+if [ -z "$CLOUDSDK_PYTHON" ]; then
|
||||||
|
+ CLOUDSDK_PYTHON="/usr/libexec/platform-python"
|
||||||
|
+ CLOUDSDK_PYTHON_SITEPACKAGES=1
|
||||||
|
+fi
|
||||||
|
+
|
||||||
|
setup_cloudsdk_python
|
||||||
|
|
||||||
|
# $PYTHONHOME can interfere with gcloud. Users should use
|
@ -1,7 +1,7 @@
|
|||||||
diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||||
--- a/doc/man/Makefile.am 2023-10-11 09:03:53.000000000 +0200
|
--- a/doc/man/Makefile.am 2021-04-12 12:51:56.831835953 +0200
|
||||||
+++ b/doc/man/Makefile.am 2024-06-12 09:14:42.898393461 +0200
|
+++ b/doc/man/Makefile.am 2021-04-13 13:38:14.198361848 +0200
|
||||||
@@ -184,6 +184,7 @@
|
@@ -154,6 +154,7 @@
|
||||||
ocf_heartbeat_ovsmonitor.7 \
|
ocf_heartbeat_ovsmonitor.7 \
|
||||||
ocf_heartbeat_pgagent.7 \
|
ocf_heartbeat_pgagent.7 \
|
||||||
ocf_heartbeat_pgsql.7 \
|
ocf_heartbeat_pgsql.7 \
|
||||||
@ -10,9 +10,9 @@ diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
|||||||
ocf_heartbeat_podman.7 \
|
ocf_heartbeat_podman.7 \
|
||||||
ocf_heartbeat_portblock.7 \
|
ocf_heartbeat_portblock.7 \
|
||||||
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||||
--- a/heartbeat/Makefile.am 2023-10-11 09:03:53.000000000 +0200
|
--- a/heartbeat/Makefile.am 2021-04-12 12:51:56.831835953 +0200
|
||||||
+++ b/heartbeat/Makefile.am 2024-06-12 09:14:42.898393461 +0200
|
+++ b/heartbeat/Makefile.am 2021-04-13 13:37:45.741292178 +0200
|
||||||
@@ -156,6 +156,7 @@
|
@@ -149,6 +149,7 @@
|
||||||
ovsmonitor \
|
ovsmonitor \
|
||||||
pgagent \
|
pgagent \
|
||||||
pgsql \
|
pgsql \
|
||||||
@ -20,7 +20,7 @@ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
|||||||
pingd \
|
pingd \
|
||||||
podman \
|
podman \
|
||||||
portblock \
|
portblock \
|
||||||
@@ -224,7 +225,10 @@
|
@@ -209,7 +210,10 @@
|
||||||
mysql-common.sh \
|
mysql-common.sh \
|
||||||
nfsserver-redhat.sh \
|
nfsserver-redhat.sh \
|
||||||
findif.sh \
|
findif.sh \
|
||||||
@ -34,7 +34,7 @@ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
|||||||
hbdir = $(sysconfdir)/ha.d
|
hbdir = $(sysconfdir)/ha.d
|
||||||
diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
|
diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
|
||||||
--- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100
|
--- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/heartbeat/OCF_Directories.pm 2024-06-12 09:23:45.434638170 +0200
|
+++ b/heartbeat/OCF_Directories.pm 2021-04-13 13:37:35.621267404 +0200
|
||||||
@@ -0,0 +1,139 @@
|
@@ -0,0 +1,139 @@
|
||||||
+#!/usr/bin/perl
|
+#!/usr/bin/perl
|
||||||
+# This program is open source, licensed under the PostgreSQL License.
|
+# This program is open source, licensed under the PostgreSQL License.
|
||||||
@ -146,7 +146,7 @@ diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
|
|||||||
+ our @EXPORT_OK = ( @EXPORT );
|
+ our @EXPORT_OK = ( @EXPORT );
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+our $INITDIR = ( $ENV{'INITDIR'} || '/etc/rc.d/init.d' );
|
+our $INITDIR = ( $ENV{'INITDIR'} || '/etc/init.d' );
|
||||||
+our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' );
|
+our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' );
|
||||||
+our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' );
|
+our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' );
|
||||||
+our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' );
|
+our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' );
|
||||||
@ -177,7 +177,7 @@ diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
|
|||||||
+
|
+
|
||||||
diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm
|
diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm
|
||||||
--- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100
|
--- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/heartbeat/OCF_Functions.pm 2023-01-04 12:25:21.724889658 +0100
|
+++ b/heartbeat/OCF_Functions.pm 2021-04-13 13:37:35.621267404 +0200
|
||||||
@@ -0,0 +1,631 @@
|
@@ -0,0 +1,631 @@
|
||||||
+#!/usr/bin/perl
|
+#!/usr/bin/perl
|
||||||
+# This program is open source, licensed under the PostgreSQL License.
|
+# This program is open source, licensed under the PostgreSQL License.
|
||||||
@ -812,7 +812,7 @@ diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm
|
|||||||
+Licensed under the PostgreSQL License.
|
+Licensed under the PostgreSQL License.
|
||||||
diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm
|
diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm
|
||||||
--- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100
|
--- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/heartbeat/OCF_ReturnCodes.pm 2023-01-04 12:25:21.724889658 +0100
|
+++ b/heartbeat/OCF_ReturnCodes.pm 2021-04-13 13:37:35.621267404 +0200
|
||||||
@@ -0,0 +1,97 @@
|
@@ -0,0 +1,97 @@
|
||||||
+#!/usr/bin/perl
|
+#!/usr/bin/perl
|
||||||
+# This program is open source, licensed under the PostgreSQL License.
|
+# This program is open source, licensed under the PostgreSQL License.
|
||||||
@ -913,8 +913,8 @@ diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm
|
|||||||
+Licensed under the PostgreSQL License.
|
+Licensed under the PostgreSQL License.
|
||||||
diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
||||||
--- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100
|
--- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/heartbeat/pgsqlms 2024-06-12 10:48:57.220019549 +0200
|
+++ b/heartbeat/pgsqlms 2021-04-13 13:37:40.934280411 +0200
|
||||||
@@ -0,0 +1,2337 @@
|
@@ -0,0 +1,2308 @@
|
||||||
+#!/usr/bin/perl
|
+#!/usr/bin/perl
|
||||||
+# This program is open source, licensed under the PostgreSQL License.
|
+# This program is open source, licensed under the PostgreSQL License.
|
||||||
+# For license terms, see the LICENSE file.
|
+# For license terms, see the LICENSE file.
|
||||||
@ -945,15 +945,17 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+use File::Temp;
|
+use File::Temp;
|
||||||
+use Data::Dumper;
|
+use Data::Dumper;
|
||||||
+
|
+
|
||||||
+use FindBin;
|
+my $OCF_FUNCTIONS_DIR;
|
||||||
+use lib "$FindBin::RealBin/../heartbeat/";
|
+BEGIN {
|
||||||
+use lib "$FindBin::RealBin/../../lib/heartbeat/";
|
+ $OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat";
|
||||||
|
+}
|
||||||
|
+use lib "$OCF_FUNCTIONS_DIR";
|
||||||
+
|
+
|
||||||
+use OCF_ReturnCodes;
|
+use OCF_ReturnCodes;
|
||||||
+use OCF_Directories;
|
+use OCF_Directories;
|
||||||
+use OCF_Functions;
|
+use OCF_Functions;
|
||||||
+
|
+
|
||||||
+our $VERSION = '2.3.0';
|
+our $VERSION = 'v2.3.0';
|
||||||
+our $PROGRAM = 'pgsqlms';
|
+our $PROGRAM = 'pgsqlms';
|
||||||
+
|
+
|
||||||
+# OCF environment
|
+# OCF environment
|
||||||
@ -1143,14 +1145,11 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ # We check locations of connected standbies by querying the
|
+ # We check locations of connected standbies by querying the
|
||||||
+ # "pg_stat_replication" view.
|
+ # "pg_stat_replication" view.
|
||||||
+ # The row_number applies on the result set ordered on write_location DESC so
|
+ # The row_number applies on the result set ordered on write_location ASC so
|
||||||
+ # the smallest row_number should be given to the closest node from the
|
+ # the highest row_number should be given to the closest node from the
|
||||||
+ # primary (1), then the lowest node name (alphanumeric sort) in case of
|
+ # master, then the lowest node name (alphanumeric sort) in case of equality.
|
||||||
+ # equality. This row_number - 1 is then used to decrease the priority (score) by
|
+ # The result set itself is order by priority DESC to process best known
|
||||||
+ # step of 10 units starting from 1000.
|
+ # candidate first.
|
||||||
+ # E.g. row_number = 1 and maxlag = 0, ( 1000 - (row_number - 1) * 10 ) * 1 = 1000
|
|
||||||
+ # The result set itself is order by priority DESC to process best
|
|
||||||
+ # known candidate first.
|
|
||||||
+ $query = qq{
|
+ $query = qq{
|
||||||
+ SELECT application_name, priority, location, state, current_lag
|
+ SELECT application_name, priority, location, state, current_lag
|
||||||
+ FROM (
|
+ FROM (
|
||||||
@ -1158,7 +1157,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ (1000 - (
|
+ (1000 - (
|
||||||
+ row_number() OVER (
|
+ row_number() OVER (
|
||||||
+ PARTITION BY state IN ('startup', 'backup')
|
+ PARTITION BY state IN ('startup', 'backup')
|
||||||
+ ORDER BY location DESC, application_name ASC
|
+ ORDER BY location ASC, application_name ASC
|
||||||
+ ) - 1
|
+ ) - 1
|
||||||
+ ) * 10
|
+ ) * 10
|
||||||
+ ) * CASE WHEN ( $maxlag > 0
|
+ ) * CASE WHEN ( $maxlag > 0
|
||||||
@ -1327,8 +1326,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ return 0;
|
+ return 0;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# Check if the current transition is a recover of the primary on given node.
|
+# Check if the current transiation is a recover of a master clone on given node.
|
||||||
+sub _is_primary_recover {
|
+sub _is_master_recover {
|
||||||
+ my ( $n ) = @_;
|
+ my ( $n ) = @_;
|
||||||
+
|
+
|
||||||
+ return (
|
+ return (
|
||||||
@ -1337,8 +1336,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ );
|
+ );
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# Check if the current transition is a recover of a standby clone on given node.
|
+# Check if the current transition is a recover of a slave clone on given node.
|
||||||
+sub _is_standby_recover {
|
+sub _is_slave_recover {
|
||||||
+ my ( $n ) = @_;
|
+ my ( $n ) = @_;
|
||||||
+
|
+
|
||||||
+ return (
|
+ return (
|
||||||
@ -1347,7 +1346,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ );
|
+ );
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# check if the current transition is a switchover to the given node.
|
+# check if th current transition is a switchover to the given node.
|
||||||
+sub _is_switchover {
|
+sub _is_switchover {
|
||||||
+ my ( $n ) = @_;
|
+ my ( $n ) = @_;
|
||||||
+ my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'};
|
+ my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'};
|
||||||
@ -1627,18 +1626,18 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# Check the write_location of all secondaries, and adapt their master score so
|
+# Check the write_location of all secondaries, and adapt their master score so
|
||||||
+# that the instance closest to the primary will be the selected candidate
|
+# that the instance closest to the master will be the selected candidate should
|
||||||
+# should a promotion be triggered.
|
+# a promotion be triggered.
|
||||||
+# NOTE: This is only a hint to pacemaker! The selected candidate to promotion
|
+# NOTE: This is only a hint to pacemaker! The selected candidate to promotion
|
||||||
+# actually re-check it is the best candidate and force a re-election by failing
|
+# actually re-check it is the best candidate and force a re-election by failing
|
||||||
+# if a better one exists. This avoid a race condition between the call of the
|
+# if a better one exists. This avoid a race condition between the call of the
|
||||||
+# monitor action and the promotion where another standby might have catchup
|
+# monitor action and the promotion where another slave might have catchup faster
|
||||||
+# faster with the primary.
|
+# with the master.
|
||||||
+# NOTE: we cannot directly use the write_location, neither a lsn_diff value as
|
+# NOTE: we cannot directly use the write_location, neither a lsn_diff value as
|
||||||
+# promotion score as Pacemaker considers any value greater than 1,000,000 as
|
+# promotion score as Pacemaker considers any value greater than 1,000,000 as
|
||||||
+# INFINITY.
|
+# INFINITY.
|
||||||
+#
|
+#
|
||||||
+# This sub must be executed from a Master-role monitor action.
|
+# This sub must be executed from a master monitor action.
|
||||||
+#
|
+#
|
||||||
+sub _check_locations {
|
+sub _check_locations {
|
||||||
+ my $partition_nodes;
|
+ my $partition_nodes;
|
||||||
@ -1660,7 +1659,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ # If no lag are reported at this point, it means that there is no
|
+ # If no lag are reported at this point, it means that there is no
|
||||||
+ # secondary instance connected.
|
+ # secondary instance connected.
|
||||||
+ ocf_log( 'warning', 'No secondary connected to the primary' )
|
+ ocf_log( 'warning', 'No secondary connected to the master' )
|
||||||
+ if $row_num == 0;
|
+ if $row_num == 0;
|
||||||
+
|
+
|
||||||
+ # For each standby connected, set their master score based on the following
|
+ # For each standby connected, set their master score based on the following
|
||||||
@ -1740,10 +1739,10 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+# _check_switchover
|
+# _check_switchover
|
||||||
+# check if the pgsql switchover to the localnode is safe.
|
+# check if the pgsql switchover to the localnode is safe.
|
||||||
+# This is supposed to be called **after** the primary has been stopped or
|
+# This is supposed to be called **after** the master has been stopped or demoted.
|
||||||
+# demoted. It checks if the local standby received the shutdown checkpoint
|
+# This sub checks if the local standby received the shutdown checkpoint from the
|
||||||
+# from the old primary to make sure it can promote safely and the old
|
+# old master to make sure it can take over the master role and the old master
|
||||||
+# primary will be able to catchup as a standby after.
|
+# will be able to catchup as a standby after.
|
||||||
+#
|
+#
|
||||||
+# Returns 0 if switchover is safe
|
+# Returns 0 if switchover is safe
|
||||||
+# Returns 1 if swithcover is not safe
|
+# Returns 1 if swithcover is not safe
|
||||||
@ -1763,20 +1762,20 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ .' Need to check the last record in WAL',
|
+ .' Need to check the last record in WAL',
|
||||||
+ $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename );
|
+ $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename );
|
||||||
+
|
+
|
||||||
+ # check if we received the shutdown checkpoint of the primary during its
|
+ # check if we received the shutdown checkpoint of the master during its
|
||||||
+ # demote process.
|
+ # demote process.
|
||||||
+ # We need the last local checkpoint LSN and the last received LSN from
|
+ # We need the last local checkpoint LSN and the last received LSN from
|
||||||
+ # primary to check in the WAL between these addresses if we have a
|
+ # master to check in the WAL between these adresses if we have a
|
||||||
+ # "checkpoint shutdown" using pg_xlogdump/pg_waldump.
|
+ # "checkpoint shutdown" using pg_xlogdump/pg_waldump.
|
||||||
+ #
|
+ #
|
||||||
+ # Force a checkpoint to make sure the controldata shows the very last TL
|
+ # Force a checkpoint to make sure the controldata shows the very last TL
|
||||||
+ # and the primary's shutdown checkpoint
|
+ # and the master's shutdown checkpoint
|
||||||
+ _query( q{ CHECKPOINT }, {} );
|
+ _query( q{ CHECKPOINT }, {} );
|
||||||
+ %cdata = _get_controldata();
|
+ %cdata = _get_controldata();
|
||||||
+ $tl = $cdata{'tl'};
|
+ $tl = $cdata{'tl'};
|
||||||
+ $last_redo = $cdata{'redo'};
|
+ $last_redo = $cdata{'redo'};
|
||||||
+
|
+
|
||||||
+ # Get the last received LSN from primary
|
+ # Get the last received LSN from master
|
||||||
+ $last_lsn = _get_last_received_lsn();
|
+ $last_lsn = _get_last_received_lsn();
|
||||||
+
|
+
|
||||||
+ unless ( defined $last_lsn ) {
|
+ unless ( defined $last_lsn ) {
|
||||||
@ -1797,12 +1796,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ if ( $rc == 0 and
|
+ if ( $rc == 0 and
|
||||||
+ $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m
|
+ $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m
|
||||||
+ ) {
|
+ ) {
|
||||||
+ ocf_log( 'info', 'Standby received the shutdown checkpoint' );
|
+ ocf_log( 'info', 'Slave received the shutdown checkpoint' );
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'Did not receive the shutdown checkpoint from the old primary!' );
|
+ 'Did not receive the shutdown checkpoint from the old master!' );
|
||||||
+
|
+
|
||||||
+ return 1;
|
+ return 1;
|
||||||
+}
|
+}
|
||||||
@ -1829,7 +1828,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ elsif ( $is_in_recovery eq 'f' ) {
|
+ elsif ( $is_in_recovery eq 'f' ) {
|
||||||
+ # The instance is a primary.
|
+ # The instance is a primary.
|
||||||
+ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary");
|
+ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary");
|
||||||
+ # Check lsn diff with current standbys if any
|
+ # Check lsn diff with current slaves if any
|
||||||
+ _check_locations() if $__OCF_ACTION eq 'monitor';
|
+ _check_locations() if $__OCF_ACTION eq 'monitor';
|
||||||
+ return $OCF_RUNNING_MASTER;
|
+ return $OCF_RUNNING_MASTER;
|
||||||
+ }
|
+ }
|
||||||
@ -1905,10 +1904,9 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ elsif ( $controldata_rc == $OCF_SUCCESS ) {
|
+ elsif ( $controldata_rc == $OCF_SUCCESS ) {
|
||||||
+ # The controldata has not been updated to "shutdown in recovery".
|
+ # The controldata has not been updated to "shutdown in recovery".
|
||||||
+ # It should mean we had a crash on a secondary instance.
|
+ # It should mean we had a crash on a secondary instance.
|
||||||
+ # There is no "FAILED_STANDBY" return code, so we return a generic
|
+ # There is no "FAILED_SLAVE" return code, so we return a generic error.
|
||||||
+ # error.
|
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'Instance "%s" controldata indicates a running standby instance, the instance has probably crashed',
|
+ 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+ return $OCF_ERR_GENERIC;
|
+ return $OCF_ERR_GENERIC;
|
||||||
+ }
|
+ }
|
||||||
@ -1982,9 +1980,9 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+=item B<maxlag>
|
+=item B<maxlag>
|
||||||
+
|
+
|
||||||
+Maximum lag allowed on a standby before forbidding any promotion to it.
|
+Maximum lag allowed on a standby before we set a negative master score on it.
|
||||||
+The calculation is based on the difference between the current xlog location on
|
+The calculation is based on the difference between the current xlog location on
|
||||||
+the primary and the write location on the standby.
|
+the master and the write location on the standby.
|
||||||
+
|
+
|
||||||
+(optional, integer, default "0" disables this feature)
|
+(optional, integer, default "0" disables this feature)
|
||||||
+
|
+
|
||||||
@ -2016,16 +2014,13 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+sub ocf_meta_data {
|
+sub ocf_meta_data {
|
||||||
+ print qq{<?xml version="1.0"?>
|
+ print qq{<?xml version="1.0"?>
|
||||||
+ <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
+ <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||||
+ <resource-agent name="pgsqlms" version="$VERSION">
|
+ <resource-agent name="pgsqlms">
|
||||||
+ <version>1.0</version>
|
+ <version>1.0</version>
|
||||||
+
|
+
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Resource script for PostgreSQL in replication. It manages PostgreSQL
|
+ Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource.
|
||||||
+ servers using streaming replication as an HA resource.
|
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">
|
+ <shortdesc lang="en">Manages PostgreSQL servers in replication</shortdesc>
|
||||||
+ Manages PostgreSQL servers in replication
|
|
||||||
+ </shortdesc>
|
|
||||||
+ <parameters>
|
+ <parameters>
|
||||||
+ <parameter name="system_user" unique="0" required="0">
|
+ <parameter name="system_user" unique="0" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
@ -2037,8 +2032,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ <parameter name="bindir" unique="0" required="0">
|
+ <parameter name="bindir" unique="0" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Path to the directory storing the PostgreSQL binaries. The agent
|
+ Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl.
|
||||||
+ uses psql, pg_isready, pg_controldata and pg_ctl.
|
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">Path to the PostgreSQL binaries</shortdesc>
|
+ <shortdesc lang="en">Path to the PostgreSQL binaries</shortdesc>
|
||||||
+ <content type="string" default="$bindir_default" />
|
+ <content type="string" default="$bindir_default" />
|
||||||
@ -2054,23 +2048,17 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ <parameter name="datadir" unique="1" required="0">
|
+ <parameter name="datadir" unique="1" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Path to the directory set in data_directory from your
|
+ Path to the directory set in data_directory from your postgresql.conf file. This parameter
|
||||||
+ postgresql.conf file. This parameter has the same default than
|
+ has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a
|
||||||
+ PostgreSQL itself: the pgdata parameter value. Unless you have a
|
+ special PostgreSQL setup and you understand this parameter, ignore it.
|
||||||
+ special PostgreSQL setup and you understand this parameter,
|
|
||||||
+ ignore it.
|
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">
|
+ <shortdesc lang="en">Path to the directory set in data_directory from your postgresql.conf file</shortdesc>
|
||||||
+ Path to the directory set in data_directory from your
|
|
||||||
+ postgresql.conf file
|
|
||||||
+ </shortdesc>
|
|
||||||
+ <content type="string" default="PGDATA" />
|
+ <content type="string" default="PGDATA" />
|
||||||
+ </parameter>
|
+ </parameter>
|
||||||
+
|
+
|
||||||
+ <parameter name="pghost" unique="0" required="0">
|
+ <parameter name="pghost" unique="0" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Host IP address or unix socket folder the instance is listening
|
+ Host IP address or unix socket folder the instance is listening on.
|
||||||
+ on.
|
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">Instance IP or unix socket folder</shortdesc>
|
+ <shortdesc lang="en">Instance IP or unix socket folder</shortdesc>
|
||||||
+ <content type="string" default="$pghost_default" />
|
+ <content type="string" default="$pghost_default" />
|
||||||
@ -2086,31 +2074,25 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ <parameter name="maxlag" unique="0" required="0">
|
+ <parameter name="maxlag" unique="0" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Maximum lag allowed on a standby before forbidding any promotion
|
+ Maximum lag allowed on a standby before we set a negative master score on it. The calculation
|
||||||
+ on it. The calculation is based on the difference between the
|
+ is based on the difference between the current LSN on the master and the LSN
|
||||||
+ current LSN on the primary and the LSN written on the standby.
|
+ written on the standby.
|
||||||
+ This parameter must be a valid positive number as described in
|
+ This parameter must be a valid positive number as described in PostgreSQL documentation.
|
||||||
+ PostgreSQL documentation.
|
|
||||||
+ See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC
|
+ See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">
|
+ <shortdesc lang="en">Maximum write lag before we mark a standby as inappropriate to promote</shortdesc>
|
||||||
+ Maximum write lag before we mark a standby as inappropriate to
|
|
||||||
+ promote
|
|
||||||
+ </shortdesc>
|
|
||||||
+ <content type="integer" default="$maxlag_default" />
|
+ <content type="integer" default="$maxlag_default" />
|
||||||
+ </parameter>
|
+ </parameter>
|
||||||
+
|
+
|
||||||
+ <parameter name="recovery_template" unique="1" required="0">
|
+ <parameter name="recovery_template" unique="1" required="0">
|
||||||
+ <longdesc lang="en">
|
+ <longdesc lang="en">
|
||||||
+ Path to the recovery.conf template. This file is simply copied
|
+ Path to the recovery.conf template. This file is simply copied to \$PGDATA
|
||||||
+ to \$PGDATA before starting the instance as standby.
|
+ before starting the instance as slave.
|
||||||
+ ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for
|
+ ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for
|
||||||
+ PostgreSQL 12 and higher. The cluster will refuse to start if a
|
+ PostgreSQL 12 and higher. The cluster will refuse to start if a template
|
||||||
+ template file is found.
|
+ file is found.
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">
|
+ <shortdesc lang="en">Path to the recovery.conf template for PostgreSQL 11 and older.</shortdesc>
|
||||||
+ Path to the recovery.conf template for PostgreSQL 11 and older.
|
|
||||||
+ </shortdesc>
|
|
||||||
+ <content type="string" default="PGDATA/recovery.conf.pcmk" />
|
+ <content type="string" default="PGDATA/recovery.conf.pcmk" />
|
||||||
+ </parameter>
|
+ </parameter>
|
||||||
+
|
+
|
||||||
@ -2121,9 +2103,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ postgresql.conf file is not in the data directory (PGDATA), eg.:
|
+ postgresql.conf file is not in the data directory (PGDATA), eg.:
|
||||||
+ "-c config_file=/etc/postgresql/9.3/main/postgresql.conf".
|
+ "-c config_file=/etc/postgresql/9.3/main/postgresql.conf".
|
||||||
+ </longdesc>
|
+ </longdesc>
|
||||||
+ <shortdesc lang="en">
|
+ <shortdesc lang="en">Additionnal arguments given to the postgres process on startup.</shortdesc>
|
||||||
+ Additionnal arguments given to the postgres process on startup.
|
|
||||||
+ </shortdesc>
|
|
||||||
+ <content type="string" default="$start_opts_default" />
|
+ <content type="string" default="$start_opts_default" />
|
||||||
+ </parameter>
|
+ </parameter>
|
||||||
+
|
+
|
||||||
@ -2134,6 +2114,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ <action name="reload" timeout="20" />
|
+ <action name="reload" timeout="20" />
|
||||||
+ <action name="promote" timeout="30" />
|
+ <action name="promote" timeout="30" />
|
||||||
+ <action name="demote" timeout="120" />
|
+ <action name="demote" timeout="120" />
|
||||||
|
+ <action name="monitor" depth="0" timeout="10" interval="15"/>
|
||||||
+ <action name="monitor" depth="0" timeout="10" interval="15" role="Master"/>
|
+ <action name="monitor" depth="0" timeout="10" interval="15" role="Master"/>
|
||||||
+ <action name="monitor" depth="0" timeout="10" interval="16" role="Slave"/>
|
+ <action name="monitor" depth="0" timeout="10" interval="16" role="Slave"/>
|
||||||
+ <action name="notify" timeout="60" />
|
+ <action name="notify" timeout="60" />
|
||||||
@ -2167,11 +2148,11 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+=item B<promote>
|
+=item B<promote>
|
||||||
+
|
+
|
||||||
+Promotes the resource to the primary role. Suggested minimum timeout: 30.
|
+Promotes the resource to the Master role. Suggested minimum timeout: 30.
|
||||||
+
|
+
|
||||||
+=item B<demote>
|
+=item B<demote>
|
||||||
+
|
+
|
||||||
+Demotes the resource to the standby role. Suggested minimum timeout: 120.
|
+Demotes the resource to the Slave role. Suggested minimum timeout: 120.
|
||||||
+
|
+
|
||||||
+=item B<monitor (Master role)>
|
+=item B<monitor (Master role)>
|
||||||
+
|
+
|
||||||
@ -2234,17 +2215,19 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2
|
+ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2
|
||||||
+ ) {
|
+ ) {
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'PAF v%s is compatible with Pacemaker 1.1.13 and greater',
|
+ 'PAF %s is compatible with Pacemaker 1.1.13 and greater',
|
||||||
+ $VERSION
|
+ $VERSION
|
||||||
+ );
|
+ );
|
||||||
+ return $OCF_ERR_INSTALLED;
|
+ return $OCF_ERR_INSTALLED;
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # check notify=true
|
+ # check notify=true
|
||||||
+ unless ( defined $ENV{'OCF_RESKEY_CRM_meta_notify'}
|
+ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\
|
||||||
+ and lc($ENV{'OCF_RESKEY_CRM_meta_notify'}) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
|
+ --meta --get-parameter notify 2>/dev/null };
|
||||||
|
+ chomp $ans;
|
||||||
|
+ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'You must set meta parameter notify=true for your "master" resource'
|
+ 'You must set meta parameter notify=true for your master resource'
|
||||||
+ );
|
+ );
|
||||||
+ return $OCF_ERR_INSTALLED;
|
+ return $OCF_ERR_INSTALLED;
|
||||||
+ }
|
+ }
|
||||||
@ -2255,7 +2238,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
|
+ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
|
||||||
+ ) {
|
+ ) {
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'You must set meta parameter master-max=1 for your "master" resource'
|
+ 'You must set meta parameter master-max=1 for your master resource'
|
||||||
+ );
|
+ );
|
||||||
+ return $OCF_ERR_INSTALLED;
|
+ return $OCF_ERR_INSTALLED;
|
||||||
+ }
|
+ }
|
||||||
@ -2416,14 +2399,14 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ # Check if a master score exists in the cluster.
|
+ # Check if a master score exists in the cluster.
|
||||||
+ # During the very first start of the cluster, no master score will
|
+ # During the very first start of the cluster, no master score will
|
||||||
+ # exists on any of the existing standbys, unless an admin designated
|
+ # exists on any of the existing slaves, unless an admin designated
|
||||||
+ # one of them using crm_master. If no master score exists the
|
+ # one of them using crm_master. If no master exists the cluster will
|
||||||
+ # cluster can not pick a standby to promote.
|
+ # not promote a master among the slaves.
|
||||||
+ # To solve this situation, we check if there is at least one master
|
+ # To solve this situation, we check if there is at least one master
|
||||||
+ # score existing on one node in the cluster. Do nothing if at least
|
+ # score existing on one node in the cluster. Do nothing if at least
|
||||||
+ # one master score is found among the clones of the resource.
|
+ # one master score is found among the clones of the resource. If no
|
||||||
+ # If no master score exists, set a score of 1 only if the resource
|
+ # master score exists, set a score of 1 only if the resource was a
|
||||||
+ # was a shut downed primary before the start.
|
+ # shut downed master before the start.
|
||||||
+ if ( $prev_state eq "shut down" and not _master_score_exists() ) {
|
+ if ( $prev_state eq "shut down" and not _master_score_exists() ) {
|
||||||
+ ocf_log( 'info', 'No master score around. Set mine to 1' );
|
+ ocf_log( 'info', 'No master score around. Set mine to 1' );
|
||||||
+
|
+
|
||||||
@ -2434,7 +2417,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ ocf_exit_reason(
|
+ ocf_exit_reason(
|
||||||
+ 'Instance "%s" is not running as a standby (returned %d)',
|
+ 'Instance "%s" is not running as a slave (returned %d)',
|
||||||
+ $OCF_RESOURCE_INSTANCE, $rc );
|
+ $OCF_RESOURCE_INSTANCE, $rc );
|
||||||
+
|
+
|
||||||
+ return $OCF_ERR_GENERIC;
|
+ return $OCF_ERR_GENERIC;
|
||||||
@ -2641,7 +2624,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+ }
|
+ }
|
||||||
+ elsif ( $rc == $OCF_NOT_RUNNING ) {
|
+ elsif ( $rc == $OCF_NOT_RUNNING ) {
|
||||||
+ # Instance is stopped. Need to start as standby.
|
+ # Instance is stopped. Nothing to do.
|
||||||
+ ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down',
|
+ ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+ }
|
+ }
|
||||||
@ -2655,12 +2638,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ return $OCF_ERR_GENERIC;
|
+ return $OCF_ERR_GENERIC;
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # TODO Do we need to make sure at least one standby is connected?
|
+ # TODO we need to make sure at least one slave is connected!!
|
||||||
+
|
+
|
||||||
+ # WARNING if the resource state is stopped instead of primary, the ocf ra
|
+ # WARNING if the resource state is stopped instead of master, the ocf ra dev
|
||||||
+ # dev rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop
|
+ # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where
|
||||||
+ # where it computes transitions of demote(failing)->stop->start->promote
|
+ # it computes transitions of demote(failing)->stop->start->promote actions
|
||||||
+ # actions until failcount == migration-threshold.
|
+ # until failcount == migration-threshold.
|
||||||
+ # This is a really ugly trick to keep going with the demode action if the
|
+ # This is a really ugly trick to keep going with the demode action if the
|
||||||
+ # rsc is already stopped gracefully.
|
+ # rsc is already stopped gracefully.
|
||||||
+ # See discussion "CRM trying to demote a stopped resource" on
|
+ # See discussion "CRM trying to demote a stopped resource" on
|
||||||
@ -2728,12 +2711,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ $rc = pgsql_monitor();
|
+ $rc = pgsql_monitor();
|
||||||
+
|
+
|
||||||
+ if ( $rc == $OCF_SUCCESS ) {
|
+ if ( $rc == $OCF_SUCCESS ) {
|
||||||
+ # Running as standby. Normal, expected behavior.
|
+ # Running as slave. Normal, expected behavior.
|
||||||
+ ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby',
|
+ ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+ }
|
+ }
|
||||||
+ elsif ( $rc == $OCF_RUNNING_MASTER ) {
|
+ elsif ( $rc == $OCF_RUNNING_MASTER ) {
|
||||||
+ # Already a primary. Unexpected, but not a problem.
|
+ # Already a master. Unexpected, but not a problem.
|
||||||
+ ocf_log( 'info', '"%s" already running as a primary',
|
+ ocf_log( 'info', '"%s" already running as a primary',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
@ -2773,20 +2756,19 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ # internal error during _check_switchover
|
+ # internal error during _check_switchover
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # Do not check for a better candidate if we try to recover the primary.
|
+ # Do not check for a better candidate if we try to recover the master
|
||||||
+ # Recover of a primary is detected during the pre-promote action. It sets
|
+ # Recover of a master is detected during the pre-promote action. It sets the
|
||||||
+ # the private attribute 'recover_primary' to '1' if this is a primary
|
+ # private attribute 'recover_master' to '1' if this is a master recover.
|
||||||
+ # recover.
|
+ if ( _get_priv_attr( 'recover_master' ) eq '1' ) {
|
||||||
+ if ( _get_priv_attr( 'recover_primary' ) eq '1' ) {
|
+ ocf_log( 'info', 'Recovering old master, no election needed');
|
||||||
+ ocf_log( 'info', 'Recovering old primary, no election needed')
|
|
||||||
+ }
|
+ }
|
||||||
+ else {
|
+ else {
|
||||||
+
|
+
|
||||||
+ # The promotion is occurring on the best known candidate (highest
|
+ # The promotion is occurring on the best known candidate (highest
|
||||||
+ # master score), as chosen by pacemaker during the last working monitor
|
+ # master score), as chosen by pacemaker during the last working monitor
|
||||||
+ # on previous primary (see pgsql_monitor/_check_locations subs).
|
+ # on previous master (see pgsql_monitor/_check_locations subs).
|
||||||
+ # To avoid any race condition between the last monitor action on the
|
+ # To avoid any race condition between the last monitor action on the
|
||||||
+ # previous primary and the **real** most up-to-date standby, we
|
+ # previous master and the **real** most up-to-date standby, we
|
||||||
+ # set each standby location during the "pre-promote" action, and stored
|
+ # set each standby location during the "pre-promote" action, and stored
|
||||||
+ # them using the "lsn_location" resource attribute.
|
+ # them using the "lsn_location" resource attribute.
|
||||||
+ #
|
+ #
|
||||||
@ -2909,8 +2891,8 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# This action is called **before** the actual promotion when a failing primary
|
+# This action is called **before** the actual promotion when a failing master is
|
||||||
+# is considered unreclaimable, recoverable or a new primary must be promoted
|
+# considered unreclaimable, recoverable or a new master must be promoted
|
||||||
+# (switchover or first start).
|
+# (switchover or first start).
|
||||||
+# As every "notify" action, it is executed almost simultaneously on all
|
+# As every "notify" action, it is executed almost simultaneously on all
|
||||||
+# available nodes.
|
+# available nodes.
|
||||||
@ -2925,11 +2907,11 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ ocf_log( 'info', 'Promoting instance on node "%s"',
|
+ ocf_log( 'info', 'Promoting instance on node "%s"',
|
||||||
+ $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} );
|
+ $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} );
|
||||||
+
|
+
|
||||||
+ # No need to do an election if this is a recovery of the primary
|
+ # No need to do an election between slaves if this is recovery of the master
|
||||||
+ if ( _is_primary_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) {
|
+ if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) {
|
||||||
+ ocf_log( 'warning', 'This is a primary recovery!' );
|
+ ocf_log( 'warning', 'This is a master recovery!' );
|
||||||
+
|
+
|
||||||
+ _set_priv_attr( 'recover_primary', '1' )
|
+ _set_priv_attr( 'recover_master', '1' )
|
||||||
+ if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename;
|
+ if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename;
|
||||||
+
|
+
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
@ -2937,7 +2919,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ # Environment cleanup!
|
+ # Environment cleanup!
|
||||||
+ _delete_priv_attr( 'lsn_location' );
|
+ _delete_priv_attr( 'lsn_location' );
|
||||||
+ _delete_priv_attr( 'recover_primary' );
|
+ _delete_priv_attr( 'recover_master' );
|
||||||
+ _delete_priv_attr( 'nodes' );
|
+ _delete_priv_attr( 'nodes' );
|
||||||
+ _delete_priv_attr( 'cancel_switchover' );
|
+ _delete_priv_attr( 'cancel_switchover' );
|
||||||
+
|
+
|
||||||
@ -2961,19 +2943,19 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ # FIXME: should we allow a switchover to a lagging slave?
|
+ # FIXME: should we allow a switchover to a lagging slave?
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # We need to trigger an election between existing standbys to promote the
|
+ # We need to trigger an election between existing slaves to promote the best
|
||||||
+ # best one based on its current LSN location. Each node set a private
|
+ # one based on its current LSN location. Each node set a private attribute
|
||||||
+ # attribute "lsn_location" with its TL and LSN location.
|
+ # "lsn_location" with its TL and LSN location.
|
||||||
+ #
|
+ #
|
||||||
+ # During the following promote action, The designated standby for
|
+ # During the following promote action, The designated standby for
|
||||||
+ # promotion use these attributes to check if the instance to be promoted
|
+ # promotion use these attributes to check if the instance to be promoted
|
||||||
+ # is the best one, so we can avoid a race condition between the last
|
+ # is the best one, so we can avoid a race condition between the last
|
||||||
+ # successful monitor on the previous primary and the current promotion.
|
+ # successful monitor on the previous master and the current promotion.
|
||||||
+
|
+
|
||||||
+ # As we can not break the transition from a notification action, we check
|
+ # As we can not break the transition from a notification action, we check
|
||||||
+ # during the promotion if each node TL and LSN are valid.
|
+ # during the promotion if each node TL and LSN are valid.
|
||||||
+
|
+
|
||||||
+ # Force a checkpoint to make sure the controldata shows the very last TL
|
+ # Force a checpoint to make sure the controldata shows the very last TL
|
||||||
+ _query( q{ CHECKPOINT }, {} );
|
+ _query( q{ CHECKPOINT }, {} );
|
||||||
+ %cdata = _get_controldata();
|
+ %cdata = _get_controldata();
|
||||||
+ $node_lsn = _get_last_received_lsn( 'in decimal' );
|
+ $node_lsn = _get_last_received_lsn( 'in decimal' );
|
||||||
@ -2995,12 +2977,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ ocf_log( 'warning', 'Could not set the current node LSN' )
|
+ ocf_log( 'warning', 'Could not set the current node LSN' )
|
||||||
+ if $? != 0 ;
|
+ if $? != 0 ;
|
||||||
+
|
+
|
||||||
+ # If this node is the future primary, keep track of the standbys that
|
+ # If this node is the future master, keep track of the slaves that
|
||||||
+ # received the same notification to compare our LSN with them during
|
+ # received the same notification to compare our LSN with them during
|
||||||
+ # promotion
|
+ # promotion
|
||||||
+ if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) {
|
+ if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) {
|
||||||
+ # Build the list of active nodes:
|
+ # Build the list of active nodes:
|
||||||
+ # primary + standby + start - stop
|
+ # master + slave + start - stop
|
||||||
+ # FIXME: Deal with rsc started during the same transaction but **after**
|
+ # FIXME: Deal with rsc started during the same transaction but **after**
|
||||||
+ # the promotion ?
|
+ # the promotion ?
|
||||||
+ $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} },
|
+ $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} },
|
||||||
@ -3013,27 +2995,26 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ _set_priv_attr( 'nodes', $attr_nodes );
|
+ _set_priv_attr( 'nodes', $attr_nodes );
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # whatever the result, it is ignored by pacemaker.
|
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# This action is called after a promote action.
|
+# This action is called after a promote action.
|
||||||
+sub pgsql_notify_post_promote {
|
+sub pgsql_notify_post_promote {
|
||||||
+
|
+
|
||||||
+ # We have a new primary (or the previous one recovered).
|
+ # We have a new master (or the previous one recovered).
|
||||||
+ # Environment cleanup!
|
+ # Environment cleanup!
|
||||||
+ _delete_priv_attr( 'lsn_location' );
|
+ _delete_priv_attr( 'lsn_location' );
|
||||||
+ _delete_priv_attr( 'recover_primary' );
|
+ _delete_priv_attr( 'recover_master' );
|
||||||
+ _delete_priv_attr( 'nodes' );
|
+ _delete_priv_attr( 'nodes' );
|
||||||
+ _delete_priv_attr( 'cancel_switchover' );
|
+ _delete_priv_attr( 'cancel_switchover' );
|
||||||
+
|
+
|
||||||
+ # whatever the result, it is ignored by pacemaker.
|
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+# This is called before a demote occurs.
|
+# This is called before a demote occurs.
|
||||||
+sub pgsql_notify_pre_demote {
|
+sub pgsql_notify_pre_demote {
|
||||||
+ my $rc;
|
+ my $rc;
|
||||||
|
+ my %cdata;
|
||||||
+
|
+
|
||||||
+ # do nothing if the local node will not be demoted
|
+ # do nothing if the local node will not be demoted
|
||||||
+ return $OCF_SUCCESS unless scalar
|
+ return $OCF_SUCCESS unless scalar
|
||||||
@ -3041,12 +3022,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ $rc = pgsql_monitor();
|
+ $rc = pgsql_monitor();
|
||||||
+
|
+
|
||||||
+ # do nothing if this is not a primary recovery
|
+ # do nothing if this is not a master recovery
|
||||||
+ return $OCF_SUCCESS unless _is_primary_recover( $nodename )
|
+ return $OCF_SUCCESS unless _is_master_recover( $nodename )
|
||||||
+ and $rc == $OCF_FAILED_MASTER;
|
+ and $rc == $OCF_FAILED_MASTER;
|
||||||
+
|
+
|
||||||
+ # in case of primary crash, we need to detect if the CRM tries to recover
|
+ # in case of master crash, we need to detect if the CRM tries to recover
|
||||||
+ # the primary. The usual transition is to do:
|
+ # the master clone. The usual transition is to do:
|
||||||
+ # demote->stop->start->promote
|
+ # demote->stop->start->promote
|
||||||
+ #
|
+ #
|
||||||
+ # There are multiple flaws with this transition:
|
+ # There are multiple flaws with this transition:
|
||||||
@ -3059,26 +3040,18 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ # If it success, at least it will be demoted correctly with a normal
|
+ # If it success, at least it will be demoted correctly with a normal
|
||||||
+ # status. If it fails, it will be catched up in next steps.
|
+ # status. If it fails, it will be catched up in next steps.
|
||||||
+
|
+
|
||||||
+ ocf_log( 'info', 'Trying to start failing primary "%s"',
|
+ ocf_log( 'info', 'Trying to start failing master "%s"...',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+
|
+
|
||||||
+ # Either the instance managed to start or it couldn't.
|
+ # Either the instance managed to start or it couldn't.
|
||||||
+ # We rely on the pg_ctl '-w' switch to take care of this. If it couldn't
|
+ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't
|
||||||
+ # start, this error will be catched up later during the various checks
|
+ # start, this error will be catched up later during the various checks
|
||||||
+ if( _pg_ctl_start() == 0 ) {
|
+ _pg_ctl_start();
|
||||||
+ my %cdata = _get_controldata();
|
|
||||||
+
|
+
|
||||||
+ ocf_log( 'info', 'Recovery of %s succeed', $OCF_RESOURCE_INSTANCE );
|
+ %cdata = _get_controldata();
|
||||||
+ ocf_log( 'info', 'State is "%s" after recovery attempt',
|
|
||||||
+ $cdata{'state'} );
|
|
||||||
+ }
|
|
||||||
+ else {
|
|
||||||
+ ocf_log( 'err', 'Could not recover failing primary %s',
|
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
|
||||||
+ }
|
|
||||||
+
|
+
|
||||||
|
+ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
|
||||||
+
|
+
|
||||||
+ # whatever the result, it is ignored by pacemaker.
|
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
@ -3093,12 +3066,12 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ $rc = _controldata_to_ocf();
|
+ $rc = _controldata_to_ocf();
|
||||||
+
|
+
|
||||||
+ # do nothing if this is not a standby recovery
|
+ # do nothing if this is not a slave recovery
|
||||||
+ return $OCF_SUCCESS unless _is_standby_recover( $nodename )
|
+ return $OCF_SUCCESS unless _is_slave_recover( $nodename )
|
||||||
+ and $rc == $OCF_RUNNING_SLAVE;
|
+ and $rc == $OCF_RUNNING_SLAVE;
|
||||||
+
|
+
|
||||||
+ # in case of standby crash, we need to detect if the CRM tries to recover
|
+ # in case of slave crash, we need to detect if the CRM tries to recover
|
||||||
+ # it. The usual transition is to do: stop->start
|
+ # the slaveclone. The usual transition is to do: stop->start
|
||||||
+ #
|
+ #
|
||||||
+ # This transition can no twork because the instance is in
|
+ # This transition can no twork because the instance is in
|
||||||
+ # OCF_ERR_GENERIC step. So the stop action will fail, leading most
|
+ # OCF_ERR_GENERIC step. So the stop action will fail, leading most
|
||||||
@ -3108,7 +3081,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ # If it success, at least it will be stopped correctly with a normal
|
+ # If it success, at least it will be stopped correctly with a normal
|
||||||
+ # status. If it fails, it will be catched up in next steps.
|
+ # status. If it fails, it will be catched up in next steps.
|
||||||
+
|
+
|
||||||
+ ocf_log( 'info', 'Trying to start failing standby "%s"...',
|
+ ocf_log( 'info', 'Trying to start failing slave "%s"...',
|
||||||
+ $OCF_RESOURCE_INSTANCE );
|
+ $OCF_RESOURCE_INSTANCE );
|
||||||
+
|
+
|
||||||
+ # Either the instance managed to start or it couldn't.
|
+ # Either the instance managed to start or it couldn't.
|
||||||
@ -3120,7 +3093,6 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+
|
+
|
||||||
+ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
|
+ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
|
||||||
+
|
+
|
||||||
+ # whatever the result, it is ignored by pacemaker.
|
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
@ -3144,7 +3116,6 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+ elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() }
|
+ elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() }
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ # whatever the result, it is ignored by pacemaker.
|
|
||||||
+ return $OCF_SUCCESS;
|
+ return $OCF_SUCCESS;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
@ -3254,7 +3225,7 @@ diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
|
|||||||
+=cut
|
+=cut
|
||||||
diff --color -uNr a/paf_LICENSE b/paf_LICENSE
|
diff --color -uNr a/paf_LICENSE b/paf_LICENSE
|
||||||
--- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100
|
--- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/paf_LICENSE 2023-01-04 12:25:21.721889640 +0100
|
+++ b/paf_LICENSE 2021-04-14 09:16:39.083555835 +0200
|
||||||
@@ -0,0 +1,19 @@
|
@@ -0,0 +1,19 @@
|
||||||
+Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault.
|
+Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault.
|
||||||
+
|
+
|
||||||
@ -3277,7 +3248,7 @@ diff --color -uNr a/paf_LICENSE b/paf_LICENSE
|
|||||||
+
|
+
|
||||||
diff --color -uNr a/paf_README.md b/paf_README.md
|
diff --color -uNr a/paf_README.md b/paf_README.md
|
||||||
--- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100
|
--- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100
|
||||||
+++ b/paf_README.md 2023-01-04 12:25:21.721889640 +0100
|
+++ b/paf_README.md 2021-04-14 09:18:57.450968048 +0200
|
||||||
@@ -0,0 +1,86 @@
|
@@ -0,0 +1,86 @@
|
||||||
+# PostgreSQL Automatic Failover
|
+# PostgreSQL Automatic Failover
|
||||||
+
|
+
|
68
SOURCES/bz1904465-mysql-common-improve-error-message.patch
Normal file
68
SOURCES/bz1904465-mysql-common-improve-error-message.patch
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
From fcceb714085836de9db4493b527e94d85dd72626 Mon Sep 17 00:00:00 2001
|
||||||
|
From: ut002970 <liuxingwei@uniontech.com>
|
||||||
|
Date: Wed, 6 Sep 2023 15:27:05 +0800
|
||||||
|
Subject: [PATCH 1/3] modify error message
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql-common.sh | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||||
|
index 8104019b03..a93acc4c60 100755
|
||||||
|
--- a/heartbeat/mysql-common.sh
|
||||||
|
+++ b/heartbeat/mysql-common.sh
|
||||||
|
@@ -254,7 +254,7 @@ mysql_common_start()
|
||||||
|
while [ $start_wait = 1 ]; do
|
||||||
|
if ! ps $pid > /dev/null 2>&1; then
|
||||||
|
wait $pid
|
||||||
|
- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation"
|
||||||
|
+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation, log message you can check $OCF_RESKEY_log"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
mysql_common_status info
|
||||||
|
|
||||||
|
From 8f9b344cd5b3cb96ea0f94b7ab0306da2234ac00 Mon Sep 17 00:00:00 2001
|
||||||
|
From: ut002970 <liuxingwei@uniontech.com>
|
||||||
|
Date: Wed, 6 Sep 2023 15:56:24 +0800
|
||||||
|
Subject: [PATCH 2/3] modify error message
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql-common.sh | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||||
|
index a93acc4c60..d5b2286737 100755
|
||||||
|
--- a/heartbeat/mysql-common.sh
|
||||||
|
+++ b/heartbeat/mysql-common.sh
|
||||||
|
@@ -254,7 +254,7 @@ mysql_common_start()
|
||||||
|
while [ $start_wait = 1 ]; do
|
||||||
|
if ! ps $pid > /dev/null 2>&1; then
|
||||||
|
wait $pid
|
||||||
|
- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation, log message you can check $OCF_RESKEY_log"
|
||||||
|
+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), Check $OCF_RESKEY_log for details"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
mysql_common_status info
|
||||||
|
|
||||||
|
From a292b3c552bf3f2beea5f73e0d171546c0a1273c Mon Sep 17 00:00:00 2001
|
||||||
|
From: ut002970 <liuxingwei@uniontech.com>
|
||||||
|
Date: Wed, 6 Sep 2023 16:10:48 +0800
|
||||||
|
Subject: [PATCH 3/3] modify error message
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql-common.sh | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||||
|
index d5b2286737..d6b4e3cdf4 100755
|
||||||
|
--- a/heartbeat/mysql-common.sh
|
||||||
|
+++ b/heartbeat/mysql-common.sh
|
||||||
|
@@ -254,7 +254,7 @@ mysql_common_start()
|
||||||
|
while [ $start_wait = 1 ]; do
|
||||||
|
if ! ps $pid > /dev/null 2>&1; then
|
||||||
|
wait $pid
|
||||||
|
- ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), Check $OCF_RESKEY_log for details"
|
||||||
|
+ ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?). Check $OCF_RESKEY_log for details"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
mysql_common_status info
|
195
SOURCES/bz1905820-LVM-activate-fix-return-codes.patch
Normal file
195
SOURCES/bz1905820-LVM-activate-fix-return-codes.patch
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
From 640c2b57f0f3e7256d587ddd5960341cb38b1982 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Sun, 13 Dec 2020 14:58:34 -0800
|
||||||
|
Subject: [PATCH] LVM-activate: Fix return codes
|
||||||
|
|
||||||
|
OCF_ERR_ARGS should be used when the configuration isn't valid for the
|
||||||
|
**local** node, and so the resource should not attempt to start again
|
||||||
|
locally until the issue is corrected.
|
||||||
|
|
||||||
|
OCF_ERR_CONFIGURED should be used when the configuration isn't valid on
|
||||||
|
**any** node, and so the resource should not attempt to start again
|
||||||
|
anywhere until the issue is corrected.
|
||||||
|
|
||||||
|
One remaining gray area: Should lvmlockd/lvmetad/clvmd improperly
|
||||||
|
running (or improperly not running) be an OCF_ERR_GENERIC or
|
||||||
|
OCF_ERR_ARGS? The fact that it's a state issue rather than a config
|
||||||
|
issue suggests OCF_ERR_GENERIC. The fact that it won't be fixed without
|
||||||
|
user intervention suggests OCF_ERR_ARGS. The approach here is to use
|
||||||
|
GENERIC for all of these. One can make the case that "improperly
|
||||||
|
running" should use ARGS, since a process must be manually stopped to
|
||||||
|
fix the issue, and that "improperly not running" should use GENERIC,
|
||||||
|
since there's a small chance the process died and will be recovered in
|
||||||
|
some way.
|
||||||
|
|
||||||
|
More info about return code meanings:
|
||||||
|
- https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Administration/html/agents.html#how-are-ocf-return-codes-interpreted
|
||||||
|
|
||||||
|
Resolves: RHBZ#1905820
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/LVM-activate | 47 +++++++++++++++++++++---------------------
|
||||||
|
1 file changed, 23 insertions(+), 24 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||||
|
index c86606637..e951a08e9 100755
|
||||||
|
--- a/heartbeat/LVM-activate
|
||||||
|
+++ b/heartbeat/LVM-activate
|
||||||
|
@@ -333,8 +333,7 @@ config_verify()
|
||||||
|
real=$(lvmconfig "$name" | cut -d'=' -f2)
|
||||||
|
if [ "$real" != "$expect" ]; then
|
||||||
|
ocf_exit_reason "config item $name: expect=$expect but real=$real"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
-
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -366,12 +365,12 @@ lvmlockd_check()
|
||||||
|
fi
|
||||||
|
|
||||||
|
ocf_exit_reason "lvmlockd daemon is not running!"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
if pgrep clvmd >/dev/null 2>&1 ; then
|
||||||
|
ocf_exit_reason "clvmd daemon is running unexpectedly."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -402,17 +401,17 @@ clvmd_check()
|
||||||
|
# Good: clvmd is running, and lvmlockd is not running
|
||||||
|
if ! pgrep clvmd >/dev/null 2>&1 ; then
|
||||||
|
ocf_exit_reason "clvmd daemon is not running!"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
if pgrep lvmetad >/dev/null 2>&1 ; then
|
||||||
|
ocf_exit_reason "Please stop lvmetad daemon when clvmd is running."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
if pgrep lvmlockd >/dev/null 2>&1 ; then
|
||||||
|
ocf_exit_reason "lvmlockd daemon is running unexpectedly."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -424,12 +423,12 @@ systemid_check()
|
||||||
|
source=$(lvmconfig 'global/system_id_source' 2>/dev/null | cut -d"=" -f2)
|
||||||
|
if [ "$source" = "" ] || [ "$source" = "none" ]; then
|
||||||
|
ocf_exit_reason "system_id_source in lvm.conf is not set correctly!"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z ${SYSTEM_ID} ]; then
|
||||||
|
ocf_exit_reason "local/system_id is not set!"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -441,18 +440,18 @@ tagging_check()
|
||||||
|
# The volume_list must be initialized to something in order to
|
||||||
|
# guarantee our tag will be filtered on startup
|
||||||
|
if ! lvm dumpconfig activation/volume_list; then
|
||||||
|
- ocf_log err "LVM: Improper setup detected"
|
||||||
|
+ ocf_log err "LVM: Improper setup detected"
|
||||||
|
ocf_exit_reason "The volume_list filter must be initialized in lvm.conf for exclusive activation without clvmd"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Our tag must _NOT_ be in the volume_list. This agent
|
||||||
|
# overrides the volume_list during activation using the
|
||||||
|
# special tag reserved for cluster activation
|
||||||
|
if lvm dumpconfig activation/volume_list | grep -e "\"@${OUR_TAG}\"" -e "\"${VG}\""; then
|
||||||
|
- ocf_log err "LVM: Improper setup detected"
|
||||||
|
+ ocf_log err "LVM: Improper setup detected"
|
||||||
|
ocf_exit_reason "The volume_list in lvm.conf must not contain the cluster tag, \"${OUR_TAG}\", or volume group, ${VG}"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -463,13 +462,13 @@ read_parameters()
|
||||||
|
if [ -z "$VG" ]
|
||||||
|
then
|
||||||
|
ocf_exit_reason "You must identify the volume group name!"
|
||||||
|
- exit $OCF_ERR_ARGS
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$LV_activation_mode" != "shared" ] && [ "$LV_activation_mode" != "exclusive" ]
|
||||||
|
then
|
||||||
|
ocf_exit_reason "Invalid value for activation_mode: $LV_activation_mode"
|
||||||
|
- exit $OCF_ERR_ARGS
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert VG_access_mode from string to index
|
||||||
|
@@ -519,8 +518,10 @@ lvm_validate() {
|
||||||
|
exit $OCF_NOT_RUNNING
|
||||||
|
fi
|
||||||
|
|
||||||
|
+ # Could be a transient error (e.g., iSCSI connection
|
||||||
|
+ # issue) so use OCF_ERR_GENERIC
|
||||||
|
ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!"
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Inconsistency might be due to missing physical volumes, which doesn't
|
||||||
|
@@ -549,7 +550,7 @@ lvm_validate() {
|
||||||
|
mode=$?
|
||||||
|
if [ $VG_access_mode_num -ne 4 ] && [ $mode -ne $VG_access_mode_num ]; then
|
||||||
|
ocf_exit_reason "The specified vg_access_mode doesn't match the lock_type on VG metadata!"
|
||||||
|
- exit $OCF_ERR_ARGS
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Nothing to do if the VG has no logical volume
|
||||||
|
@@ -561,11 +562,11 @@ lvm_validate() {
|
||||||
|
|
||||||
|
# Check if the given $LV is in the $VG
|
||||||
|
if [ -n "$LV" ]; then
|
||||||
|
- OUT=$(lvs --foreign --noheadings ${VG}/${LV} 2>&1)
|
||||||
|
+ output=$(lvs --foreign --noheadings ${VG}/${LV} 2>&1)
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
- ocf_log err "lvs: ${OUT}"
|
||||||
|
+ ocf_log err "lvs: ${output}"
|
||||||
|
ocf_exit_reason "LV ($LV) is not in the given VG ($VG)."
|
||||||
|
- exit $OCF_ERR_ARGS
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
@@ -580,7 +581,6 @@ lvm_validate() {
|
||||||
|
3)
|
||||||
|
systemid_check
|
||||||
|
;;
|
||||||
|
-
|
||||||
|
4)
|
||||||
|
tagging_check
|
||||||
|
;;
|
||||||
|
@@ -808,10 +808,9 @@ lvm_status() {
|
||||||
|
dd if=${dm_name} of=/dev/null bs=1 count=1 >/dev/null \
|
||||||
|
2>&1
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
- else
|
||||||
|
- return $OCF_SUCCESS
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
ocf_exit_reason "unsupported monitor level $OCF_CHECK_LEVEL"
|
@ -0,0 +1,55 @@
|
|||||||
|
From bb5cfa172ca58cd8adcedcaca92bde54d0645661 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 14 Jul 2022 10:55:19 +0200
|
||||||
|
Subject: [PATCH] openstack-agents: set domain parameter's default to Default
|
||||||
|
and fix missing parameter name in ocf_exit_reason
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/openstack-common.sh | 10 +++++++---
|
||||||
|
1 file changed, 7 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh
|
||||||
|
index b6eec09c..14d290bd 100644
|
||||||
|
--- a/heartbeat/openstack-common.sh
|
||||||
|
+++ b/heartbeat/openstack-common.sh
|
||||||
|
@@ -1,6 +1,10 @@
|
||||||
|
+OCF_RESKEY_user_domain_name_default="Default"
|
||||||
|
+OCF_RESKEY_project_domain_name_default="Default"
|
||||||
|
OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
OCF_RESKEY_insecure_default="false"
|
||||||
|
|
||||||
|
+: ${OCF_RESKEY_user_domain_name=${OCF_RESKEY_user_domain_name_default}}
|
||||||
|
+: ${OCF_RESKEY_project_domain_name=${OCF_RESKEY_project_domain_name_default}}
|
||||||
|
: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
: ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}}
|
||||||
|
|
||||||
|
@@ -64,7 +68,7 @@ Keystone Project.
|
||||||
|
Keystone User Domain Name.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Keystone User Domain Name</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_user_domain_name_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
<parameter name="project_domain_name" required="0">
|
||||||
|
@@ -72,7 +76,7 @@ Keystone User Domain Name.
|
||||||
|
Keystone Project Domain Name.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Keystone Project Domain Name</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_project_domain_name_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
<parameter name="openstackcli">
|
||||||
|
@@ -133,7 +137,7 @@ get_config() {
|
||||||
|
exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
if [ -z "$OCF_RESKEY_project_domain_name" ]; then
|
||||||
|
- ocf_exit_reason " not set"
|
||||||
|
+ ocf_exit_reason "project_domain_name not set"
|
||||||
|
exit $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
|
||||||
|
--
|
||||||
|
2.36.1
|
||||||
|
|
@ -0,0 +1,282 @@
|
|||||||
|
From ebea4c3620261c529cad908c0e52064df84b0c61 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 11 Jul 2022 10:28:11 +0200
|
||||||
|
Subject: [PATCH] openstack-agents: warn when openstackcli is slow
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/openstack-cinder-volume | 19 +++++++++++--------
|
||||||
|
heartbeat/openstack-common.sh | 22 ++++++++++++++++++++++
|
||||||
|
heartbeat/openstack-floating-ip | 17 ++++++++++-------
|
||||||
|
heartbeat/openstack-info.in | 20 ++++++++++----------
|
||||||
|
heartbeat/openstack-virtual-ip | 20 ++++++++++----------
|
||||||
|
5 files changed, 63 insertions(+), 35 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume
|
||||||
|
index 19bf04faf..116442c41 100755
|
||||||
|
--- a/heartbeat/openstack-cinder-volume
|
||||||
|
+++ b/heartbeat/openstack-cinder-volume
|
||||||
|
@@ -113,11 +113,14 @@ _get_node_id() {
|
||||||
|
}
|
||||||
|
|
||||||
|
osvol_validate() {
|
||||||
|
+ local result
|
||||||
|
+
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
|
||||||
|
get_config
|
||||||
|
|
||||||
|
- if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then
|
||||||
|
+ result=$(run_openstackcli "volume list")
|
||||||
|
+ if ! echo "$result" | grep -q $OCF_RESKEY_volume_id; then
|
||||||
|
ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -156,17 +159,17 @@ osvol_monitor() {
|
||||||
|
# Is the volue attached?
|
||||||
|
# We use the API
|
||||||
|
#
|
||||||
|
- result=$($OCF_RESKEY_openstackcli volume show \
|
||||||
|
+ result=$(run_openstackcli "volume show \
|
||||||
|
--column status \
|
||||||
|
--column attachments \
|
||||||
|
--format value \
|
||||||
|
- $OCF_RESKEY_volume_id)
|
||||||
|
+ $OCF_RESKEY_volume_id")
|
||||||
|
|
||||||
|
- if echo "$result" | grep -q available ; then
|
||||||
|
+ if echo "$result" | grep -q available; then
|
||||||
|
ocf_log warn "$OCF_RESKEY_volume_id is not attached to any instance"
|
||||||
|
return $OCF_NOT_RUNNING
|
||||||
|
else
|
||||||
|
- export attached_server_id=$(echo $result|head -n1|
|
||||||
|
+ export attached_server_id=$(echo "$result"|head -n1|
|
||||||
|
grep -P -o "'server_id': '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}'"|
|
||||||
|
grep -P -o "[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
|
||||||
|
ocf_log info "$OCF_RESKEY_volume_id is attached to instance $attached_server_id"
|
||||||
|
@@ -199,7 +202,7 @@ osvol_stop() {
|
||||||
|
#
|
||||||
|
# Detach the volume
|
||||||
|
#
|
||||||
|
- if ! $OCF_RESKEY_openstackcli server remove volume $node_id $OCF_RESKEY_volume_id ; then
|
||||||
|
+ if ! run_openstackcli "server remove volume $node_id $OCF_RESKEY_volume_id"; then
|
||||||
|
ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $node_id"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
@@ -225,7 +228,7 @@ osvol_start() {
|
||||||
|
# TODO: make it optional in case multi-attachment is allowed by Cinder
|
||||||
|
#
|
||||||
|
if [ ! -z $attached_server_id ] ; then
|
||||||
|
- if ! $OCF_RESKEY_openstackcli server remove volume $attached_server_id $OCF_RESKEY_volume_id ; then
|
||||||
|
+ if ! run_openstackcli "server remove volume $attached_server_id $OCF_RESKEY_volume_id"; then
|
||||||
|
ocf_log error "Couldn't remove volume $OCF_RESKEY_volume_id from instance $attached_server_id"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
@@ -238,7 +241,7 @@ osvol_start() {
|
||||||
|
#
|
||||||
|
# Attach the volume
|
||||||
|
#
|
||||||
|
- $OCF_RESKEY_openstackcli server add volume $node_id $OCF_RESKEY_volume_id
|
||||||
|
+ run_openstackcli "server add volume $node_id $OCF_RESKEY_volume_id"
|
||||||
|
if [ $? != $OCF_SUCCESS ]; then
|
||||||
|
ocf_log error "Couldn't add volume $OCF_RESKEY_volume_id to instance $node_id"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
diff --git a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh
|
||||||
|
index 4763c90db..b6eec09c2 100644
|
||||||
|
--- a/heartbeat/openstack-common.sh
|
||||||
|
+++ b/heartbeat/openstack-common.sh
|
||||||
|
@@ -145,3 +145,25 @@ get_config() {
|
||||||
|
OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+run_openstackcli() {
|
||||||
|
+ local cmd="${OCF_RESKEY_openstackcli} $1"
|
||||||
|
+ local result
|
||||||
|
+ local rc
|
||||||
|
+ local start_time=$(date +%s)
|
||||||
|
+ local end_time
|
||||||
|
+ local elapsed_time
|
||||||
|
+
|
||||||
|
+ result=$($cmd)
|
||||||
|
+ rc=$?
|
||||||
|
+ end_time=$(date +%s)
|
||||||
|
+ elapsed_time=$(expr $end_time - $start_time)
|
||||||
|
+
|
||||||
|
+ if [ $elapsed_time -gt 20 ]; then
|
||||||
|
+ ocf_log warn "$cmd took ${elapsed_time}s to complete"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ echo "$result"
|
||||||
|
+
|
||||||
|
+ return $rc
|
||||||
|
+}
|
||||||
|
diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip
|
||||||
|
index 6e2895654..7317f19a8 100755
|
||||||
|
--- a/heartbeat/openstack-floating-ip
|
||||||
|
+++ b/heartbeat/openstack-floating-ip
|
||||||
|
@@ -101,11 +101,14 @@ END
|
||||||
|
}
|
||||||
|
|
||||||
|
osflip_validate() {
|
||||||
|
+ local result
|
||||||
|
+
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
|
||||||
|
get_config
|
||||||
|
|
||||||
|
- if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then
|
||||||
|
+ result=$(run_openstackcli "floating ip list")
|
||||||
|
+ if ! echo "$result" | grep -q $OCF_RESKEY_ip_id; then
|
||||||
|
ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
@@ -132,14 +135,14 @@ osflip_monitor() {
|
||||||
|
| awk '{gsub("[^ ]*:", "");print}')
|
||||||
|
|
||||||
|
# Is the IP active and attached?
|
||||||
|
- result=$($OCF_RESKEY_openstackcli floating ip show \
|
||||||
|
+ result=$(run_openstackcli "floating ip show \
|
||||||
|
--column port_id --column floating_ip_address \
|
||||||
|
--format yaml \
|
||||||
|
- $OCF_RESKEY_ip_id)
|
||||||
|
+ $OCF_RESKEY_ip_id")
|
||||||
|
|
||||||
|
for port in $node_port_ids ; do
|
||||||
|
- if echo $result | grep -q $port ; then
|
||||||
|
- floating_ip=$(echo $result | awk '/floating_ip_address/ {print $2}')
|
||||||
|
+ if echo "$result" | grep -q $port ; then
|
||||||
|
+ floating_ip=$(echo "$result" | awk '/floating_ip_address/ {print $2}')
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_floating_ip -v $floating_ip
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -160,7 +163,7 @@ osflip_stop() {
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
|
||||||
|
- if ! $OCF_RESKEY_openstackcli floating ip unset --port $OCF_RESKEY_ip_id ; then
|
||||||
|
+ if ! run_openstackcli "floating ip unset --port $OCF_RESKEY_ip_id"; then
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
@@ -194,7 +197,7 @@ osflip_start() {
|
||||||
|
|
||||||
|
ocf_log info "Moving IP address $OCF_RESKEY_ip_id to port ID $node_port_id"
|
||||||
|
|
||||||
|
- $OCF_RESKEY_openstackcli floating ip set --port $node_port_id $OCF_RESKEY_ip_id
|
||||||
|
+ run_openstackcli "floating ip set --port $node_port_id $OCF_RESKEY_ip_id"
|
||||||
|
if [ $? != $OCF_SUCCESS ]; then
|
||||||
|
ocf_log error "$OCF_RESKEY_ip_id Cannot be set to port $node_port_id"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in
|
||||||
|
index f3a59fc7a..6502f1df1 100755
|
||||||
|
--- a/heartbeat/openstack-info.in
|
||||||
|
+++ b/heartbeat/openstack-info.in
|
||||||
|
@@ -119,9 +119,7 @@ END
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
OSInfoStats() {
|
||||||
|
- local result
|
||||||
|
local value
|
||||||
|
- local node
|
||||||
|
local node_id
|
||||||
|
|
||||||
|
get_config
|
||||||
|
@@ -141,31 +139,33 @@ OSInfoStats() {
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id"
|
||||||
|
|
||||||
|
# Nova data: flavor
|
||||||
|
- value=$($OCF_RESKEY_openstackcli server show \
|
||||||
|
+ value=$(run_openstackcli "server show \
|
||||||
|
--format value \
|
||||||
|
--column flavor \
|
||||||
|
- $node_id)
|
||||||
|
+ $node_id")
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value"
|
||||||
|
|
||||||
|
# Nova data: availability zone
|
||||||
|
- value=$($OCF_RESKEY_openstackcli server show \
|
||||||
|
+ value=$(run_openstackcli "server show \
|
||||||
|
--format value \
|
||||||
|
--column OS-EXT-AZ:availability_zone \
|
||||||
|
- $node_id)
|
||||||
|
+ $node_id")
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value"
|
||||||
|
|
||||||
|
# Network data: ports
|
||||||
|
value=""
|
||||||
|
- for port_id in $($OCF_RESKEY_openstackcli port list \
|
||||||
|
+ for port_id in $(run_openstackcli "port list \
|
||||||
|
--format value \
|
||||||
|
--column id \
|
||||||
|
- --server $node_id); do
|
||||||
|
- subnet_id=$($OCF_RESKEY_openstackcli port show \
|
||||||
|
+ --server $node_id"); do
|
||||||
|
+ subnet_result=$(run_openstackcli "port show \
|
||||||
|
--format json \
|
||||||
|
--column fixed_ips \
|
||||||
|
- ${port_id} | grep -P '\"subnet_id\": \".*\",$' |
|
||||||
|
+ ${port_id}")
|
||||||
|
+ subnet_id=$(echo "$subnet_result" |
|
||||||
|
+ grep -P '\"subnet_id\": \".*\",$' |
|
||||||
|
grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
|
||||||
|
value="${value}${subnet_id}:${port_id},"
|
||||||
|
done
|
||||||
|
diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip
|
||||||
|
index c654d980a..361357d55 100755
|
||||||
|
--- a/heartbeat/openstack-virtual-ip
|
||||||
|
+++ b/heartbeat/openstack-virtual-ip
|
||||||
|
@@ -132,11 +132,11 @@ osvip_monitor() {
|
||||||
|
|
||||||
|
node_port_id=$(osvip_port_id)
|
||||||
|
|
||||||
|
- result=$($OCF_RESKEY_openstackcli port show \
|
||||||
|
+ result=$(run_openstackcli "port show \
|
||||||
|
--format value \
|
||||||
|
--column allowed_address_pairs \
|
||||||
|
- ${node_port_id})
|
||||||
|
- if echo $result | grep -q "$OCF_RESKEY_ip"; then
|
||||||
|
+ ${node_port_id}")
|
||||||
|
+ if echo "$result" | grep -q "$OCF_RESKEY_ip"; then
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -158,20 +158,20 @@ osvip_stop() {
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
|
||||||
|
- mac_address=$($OCF_RESKEY_openstackcli port show \
|
||||||
|
+ mac_address=$(run_openstackcli "port show \
|
||||||
|
--format value \
|
||||||
|
--column mac_address \
|
||||||
|
- $node_port_id)
|
||||||
|
- echo ${mac_address} | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$"
|
||||||
|
+ $node_port_id")
|
||||||
|
+ echo "${mac_address}" | grep -q -P "^([0-9a-f]{2}:){5}[0-9a-f]{2}$"
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
ocf_log error "MAC address '${mac_address}' is not valid."
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
- if ! $OCF_RESKEY_openstackcli port unset \
|
||||||
|
+ if ! run_openstackcli "port unset \
|
||||||
|
--allowed-address \
|
||||||
|
ip-address=$OCF_RESKEY_ip,mac-address=${mac_address} \
|
||||||
|
- $node_port_id; then
|
||||||
|
+ $node_port_id"; then
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
@@ -196,9 +196,9 @@ osvip_start() {
|
||||||
|
|
||||||
|
ocf_log info "Moving IP address $OCF_RESKEY_ip to port ID $node_port_id"
|
||||||
|
|
||||||
|
- $OCF_RESKEY_openstackcli port set \
|
||||||
|
+ run_openstackcli "port set \
|
||||||
|
--allowed-address ip-address=$OCF_RESKEY_ip \
|
||||||
|
- $node_port_id
|
||||||
|
+ $node_port_id"
|
||||||
|
if [ $? != $OCF_SUCCESS ]; then
|
||||||
|
ocf_log error "$OCF_RESKEY_ip Cannot be set to port $node_port_id"
|
||||||
|
return $OCF_ERR_GENERIC
|
@ -0,0 +1,770 @@
|
|||||||
|
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||||
|
--- a/heartbeat/Makefile.am 2022-03-15 16:14:29.355209012 +0100
|
||||||
|
+++ b/heartbeat/Makefile.am 2022-03-15 16:18:35.917048467 +0100
|
||||||
|
@@ -217,6 +217,7 @@
|
||||||
|
lvm-clvm.sh \
|
||||||
|
lvm-plain.sh \
|
||||||
|
lvm-tag.sh \
|
||||||
|
+ openstack-common.sh \
|
||||||
|
ora-common.sh \
|
||||||
|
mysql-common.sh \
|
||||||
|
nfsserver-redhat.sh \
|
||||||
|
diff --color -uNr a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume
|
||||||
|
--- a/heartbeat/openstack-cinder-volume 2022-03-15 16:14:29.370209063 +0100
|
||||||
|
+++ b/heartbeat/openstack-cinder-volume 2022-03-15 16:17:36.231840008 +0100
|
||||||
|
@@ -34,11 +34,11 @@
|
||||||
|
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
|
||||||
|
+
|
||||||
|
# Defaults
|
||||||
|
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
OCF_RESKEY_volume_local_check_default="true"
|
||||||
|
|
||||||
|
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
: ${OCF_RESKEY_volume_local_check=${OCF_RESKEY_volume_local_check_default}}
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
@@ -68,14 +68,11 @@
|
||||||
|
<shortdesc lang="en">Attach a cinder volume</shortdesc>
|
||||||
|
|
||||||
|
<parameters>
|
||||||
|
-<parameter name="openstackcli">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Path to command line tools for openstack.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
|
||||||
|
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
|
||||||
|
-</parameter>
|
||||||
|
+END
|
||||||
|
|
||||||
|
+common_meta_data
|
||||||
|
+
|
||||||
|
+cat <<END
|
||||||
|
<parameter name="volume_local_check">
|
||||||
|
<longdesc lang="en">
|
||||||
|
This option allows the cluster to monitor the cinder volume presence without
|
||||||
|
@@ -85,28 +82,19 @@
|
||||||
|
<content type="boolean" default="${OCF_RESKEY_volume_local_check_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
-<parameter name="openrc" required="1">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Valid Openstack credentials as openrc file from api_access/openrc.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">openrc file</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
-</parameter>
|
||||||
|
-
|
||||||
|
<parameter name="volume_id" required="1">
|
||||||
|
<longdesc lang="en">
|
||||||
|
-Cinder volume identifier to use to attach the bloc storage.
|
||||||
|
+Cinder volume identifier to use to attach the block storage.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Volume ID</shortdesc>
|
||||||
|
<content type="string" />
|
||||||
|
</parameter>
|
||||||
|
-
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
<action name="start" timeout="180s" />
|
||||||
|
<action name="stop" timeout="180s" />
|
||||||
|
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
|
||||||
|
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
|
||||||
|
<action name="validate-all" timeout="5s" />
|
||||||
|
<action name="meta-data" timeout="5s" />
|
||||||
|
</actions>
|
||||||
|
@@ -127,17 +115,7 @@
|
||||||
|
osvol_validate() {
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_openrc" ]; then
|
||||||
|
- ocf_exit_reason "openrc parameter not set"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
|
||||||
|
- ocf_exit_reason "openrc file not found"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- . $OCF_RESKEY_openrc
|
||||||
|
+ get_config
|
||||||
|
|
||||||
|
if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then
|
||||||
|
ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found"
|
||||||
|
diff --color -uNr a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh
|
||||||
|
--- a/heartbeat/openstack-common.sh 1970-01-01 01:00:00.000000000 +0100
|
||||||
|
+++ b/heartbeat/openstack-common.sh 2022-03-15 16:17:36.232840011 +0100
|
||||||
|
@@ -0,0 +1,147 @@
|
||||||
|
+OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
+OCF_RESKEY_insecure_default="false"
|
||||||
|
+
|
||||||
|
+: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
+: ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}}
|
||||||
|
+
|
||||||
|
+if ocf_is_true "${OCF_RESKEY_insecure}"; then
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --insecure"
|
||||||
|
+fi
|
||||||
|
+
|
||||||
|
+common_meta_data() {
|
||||||
|
+ cat <<END
|
||||||
|
+
|
||||||
|
+<parameter name="cloud" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Openstack cloud (from ~/.config/openstack/clouds.yaml or /etc/openstack/clouds.yaml).
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Cloud from clouds.yaml</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="openrc" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Openstack credentials as openrc file from api_access/openrc.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">openrc file</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="auth_url" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Keystone Auth URL
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone Auth URL</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="username" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Username.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Username</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="password" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Password.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Password</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="project_name" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Keystone Project.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone Project</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="user_domain_name" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Keystone User Domain Name.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone User Domain Name</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="project_domain_name" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Keystone Project Domain Name.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone Project Domain Name</shortdesc>
|
||||||
|
+<content type="string" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="openstackcli">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Path to command line tools for openstack.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="insecure">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Allow insecure connections
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Allow insecure connections</shortdesc>
|
||||||
|
+<content type="boolean" default="${OCF_RESKEY_insecure_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+get_config() {
|
||||||
|
+ if [ -n "$OCF_RESKEY_cloud" ]; then
|
||||||
|
+ TILDE=$(echo ~)
|
||||||
|
+ clouds_yaml="$TILDE/.config/openstack/clouds.yaml"
|
||||||
|
+ if [ ! -f "$clouds_yaml" ]; then
|
||||||
|
+ clouds_yaml="/etc/openstack/clouds.yaml"
|
||||||
|
+ fi
|
||||||
|
+ if [ ! -f "$clouds_yaml" ]; then
|
||||||
|
+ ocf_exit_reason "~/.config/openstack/clouds.yaml and /etc/openstack/clouds.yaml does not exist"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-cloud $OCF_RESKEY_cloud"
|
||||||
|
+ elif [ -n "$OCF_RESKEY_openrc" ]; then
|
||||||
|
+ if [ ! -f "$OCF_RESKEY_openrc" ]; then
|
||||||
|
+ ocf_exit_reason "$OCF_RESKEY_openrc does not exist"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ . $OCF_RESKEY_openrc
|
||||||
|
+ else
|
||||||
|
+ if [ -z "$OCF_RESKEY_auth_url" ]; then
|
||||||
|
+ ocf_exit_reason "auth_url not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ if [ -z "$OCF_RESKEY_username" ]; then
|
||||||
|
+ ocf_exit_reason "username not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ if [ -z "$OCF_RESKEY_password" ]; then
|
||||||
|
+ ocf_exit_reason "password not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ if [ -z "$OCF_RESKEY_project_name" ]; then
|
||||||
|
+ ocf_exit_reason "project_name not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ if [ -z "$OCF_RESKEY_user_domain_name" ]; then
|
||||||
|
+ ocf_exit_reason "user_domain_name not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ if [ -z "$OCF_RESKEY_project_domain_name" ]; then
|
||||||
|
+ ocf_exit_reason " not set"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-auth-url $OCF_RESKEY_auth_url"
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-username $OCF_RESKEY_username"
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-password $OCF_RESKEY_password"
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-name $OCF_RESKEY_project_name"
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-user-domain-name $OCF_RESKEY_user_domain_name"
|
||||||
|
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name"
|
||||||
|
+ fi
|
||||||
|
+}
|
||||||
|
diff --color -uNr a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip
|
||||||
|
--- a/heartbeat/openstack-floating-ip 2022-03-15 16:14:29.370209063 +0100
|
||||||
|
+++ b/heartbeat/openstack-floating-ip 2022-03-15 16:17:36.233840014 +0100
|
||||||
|
@@ -34,10 +34,9 @@
|
||||||
|
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
|
||||||
|
-# Defaults
|
||||||
|
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
|
||||||
|
|
||||||
|
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
+# Defaults
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
@@ -67,22 +66,11 @@
|
||||||
|
<shortdesc lang="en">Move a floating IP</shortdesc>
|
||||||
|
|
||||||
|
<parameters>
|
||||||
|
-<parameter name="openstackcli">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Path to command line tools for openstack.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
|
||||||
|
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
|
||||||
|
-</parameter>
|
||||||
|
+END
|
||||||
|
|
||||||
|
-<parameter name="openrc" required="1">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Valid Openstack credentials as openrc file from api_access/openrc.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">openrc file</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
-</parameter>
|
||||||
|
+common_meta_data
|
||||||
|
|
||||||
|
+cat <<END
|
||||||
|
<parameter name="ip_id" required="1">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Floating IP Identifier.
|
||||||
|
@@ -104,7 +92,7 @@
|
||||||
|
<actions>
|
||||||
|
<action name="start" timeout="180s" />
|
||||||
|
<action name="stop" timeout="180s" />
|
||||||
|
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
|
||||||
|
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
|
||||||
|
<action name="validate-all" timeout="5s" />
|
||||||
|
<action name="meta-data" timeout="5s" />
|
||||||
|
</actions>
|
||||||
|
@@ -115,17 +103,7 @@
|
||||||
|
osflip_validate() {
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_openrc" ]; then
|
||||||
|
- ocf_exit_reason "openrc parameter not set"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
|
||||||
|
- ocf_exit_reason "openrc file not found"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- . $OCF_RESKEY_openrc
|
||||||
|
+ get_config
|
||||||
|
|
||||||
|
if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then
|
||||||
|
ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found"
|
||||||
|
diff --color -uNr a/heartbeat/openstack-info b/heartbeat/openstack-info
|
||||||
|
--- a/heartbeat/openstack-info 1970-01-01 01:00:00.000000000 +0100
|
||||||
|
+++ b/heartbeat/openstack-info 2022-03-15 16:17:36.234840018 +0100
|
||||||
|
@@ -0,0 +1,270 @@
|
||||||
|
+#!/bin/sh
|
||||||
|
+#
|
||||||
|
+#
|
||||||
|
+# OCF resource agent to set attributes from Openstack instance details.
|
||||||
|
+# It records (in the CIB) various attributes of a node
|
||||||
|
+#
|
||||||
|
+# Copyright (c) 2018 Mathieu Grzybek
|
||||||
|
+# All Rights Reserved.
|
||||||
|
+#
|
||||||
|
+# This program is free software; you can redistribute it and/or modify
|
||||||
|
+# it under the terms of version 2 of the GNU General Public License as
|
||||||
|
+# published by the Free Software Foundation.
|
||||||
|
+#
|
||||||
|
+# This program is distributed in the hope that it would be useful, but
|
||||||
|
+# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
+#
|
||||||
|
+# Further, this software is distributed without any warranty that it is
|
||||||
|
+# free of the rightful claim of any third person regarding infringement
|
||||||
|
+# or the like. Any license provided herein, whether implied or
|
||||||
|
+# otherwise, applies only to this software file. Patent licenses, if
|
||||||
|
+# any, provided herein do not apply to combinations of this program with
|
||||||
|
+# other software, or any other product whatsoever.
|
||||||
|
+#
|
||||||
|
+# You should have received a copy of the GNU General Public License
|
||||||
|
+# along with this program; if not, write the Free Software Foundation,
|
||||||
|
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||||
|
+#
|
||||||
|
+#######################################################################
|
||||||
|
+# Initialization:
|
||||||
|
+
|
||||||
|
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
+
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
|
||||||
|
+
|
||||||
|
+# Defaults
|
||||||
|
+OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}"
|
||||||
|
+OCF_RESKEY_delay_default="0"
|
||||||
|
+OCF_RESKEY_clone_default="0"
|
||||||
|
+OCF_RESKEY_curlcli_default="/usr/bin/curl"
|
||||||
|
+OCF_RESKEY_pythoncli_default="/usr/bin/python"
|
||||||
|
+
|
||||||
|
+: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}}
|
||||||
|
+: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}}
|
||||||
|
+: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}}
|
||||||
|
+: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}}
|
||||||
|
+: ${OCF_RESKEY_clone=${OCF_RESKEY_clone_default}}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+meta_data() {
|
||||||
|
+ cat <<END
|
||||||
|
+<?xml version="1.0"?>
|
||||||
|
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||||
|
+<resource-agent name="openstack-info" version="1.0">
|
||||||
|
+<version>1.0</version>
|
||||||
|
+
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+OCF resource agent to set attributes from Openstack instance details.
|
||||||
|
+It records (in the CIB) various attributes of a node.
|
||||||
|
+Sample output:
|
||||||
|
+ openstack_az : nova
|
||||||
|
+ openstack_flavor : c1.small
|
||||||
|
+ openstack_id : 60ac4343-5828-49b1-8aac-7c69b1417f31
|
||||||
|
+ openstack_ports : 7960d889-9750-4160-bf41-c69a41ad72d9:96530d18-57a3-4718-af32-30f2a74c22a2,b0e55a06-bd75-468d-8baa-22cfeb65799f:a55ae917-8016-4b1e-8ffa-04311b9dc7d6
|
||||||
|
+
|
||||||
|
+The layout of openstack_ports is a comma-separated list of tuples "subnet_id:port_id".
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Records various node attributes in the CIB</shortdesc>
|
||||||
|
+
|
||||||
|
+<parameters>
|
||||||
|
+END
|
||||||
|
+
|
||||||
|
+common_meta_data
|
||||||
|
+
|
||||||
|
+ cat <<END
|
||||||
|
+<parameter name="pidfile" unique="0">
|
||||||
|
+<longdesc lang="en">PID file</longdesc>
|
||||||
|
+<shortdesc lang="en">PID file</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_pidfile_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="delay" unique="0">
|
||||||
|
+<longdesc lang="en">Interval to allow values to stabilize</longdesc>
|
||||||
|
+<shortdesc lang="en">Dampening Delay</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_delay_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="curlcli">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Path to command line cURL binary.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Path to cURL binary</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_curlcli_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="pythoncli">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Path to command line Python interpreter.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Path to Python interpreter</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_pythoncli_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+</parameters>
|
||||||
|
+
|
||||||
|
+<actions>
|
||||||
|
+<action name="start" timeout="180s" />
|
||||||
|
+<action name="stop" timeout="180s" />
|
||||||
|
+<action name="monitor" timeout="30s" interval="60s"/>
|
||||||
|
+<action name="meta-data" timeout="5s" />
|
||||||
|
+<action name="validate-all" timeout="20s" />
|
||||||
|
+</actions>
|
||||||
|
+</resource-agent>
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+OSInfoStats() {
|
||||||
|
+ local result
|
||||||
|
+ local value
|
||||||
|
+ local node
|
||||||
|
+ local node_id
|
||||||
|
+
|
||||||
|
+ get_config
|
||||||
|
+
|
||||||
|
+ # Nova data: server ID
|
||||||
|
+ node_id=$($OCF_RESKEY_curlcli \
|
||||||
|
+ -s http://169.254.169.254/openstack/latest/meta_data.json |
|
||||||
|
+ $OCF_RESKEY_pythoncli -m json.tool |
|
||||||
|
+ grep -P '\"uuid\": \".*\",$' |
|
||||||
|
+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
|
||||||
|
+
|
||||||
|
+ if [ $? -ne 0 ] ; then
|
||||||
|
+ ocf_exit_reason "Cannot find server ID"
|
||||||
|
+ exit $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id"
|
||||||
|
+
|
||||||
|
+ # Nova data: flavor
|
||||||
|
+ value=$($OCF_RESKEY_openstackcli server show \
|
||||||
|
+ --format value \
|
||||||
|
+ --column flavor \
|
||||||
|
+ $node_id)
|
||||||
|
+
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value"
|
||||||
|
+
|
||||||
|
+ # Nova data: availability zone
|
||||||
|
+ value=$($OCF_RESKEY_openstackcli server show \
|
||||||
|
+ --format value \
|
||||||
|
+ --column OS-EXT-AZ:availability_zone \
|
||||||
|
+ $node_id)
|
||||||
|
+
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value"
|
||||||
|
+
|
||||||
|
+ # Network data: ports
|
||||||
|
+ value=""
|
||||||
|
+ for port_id in $($OCF_RESKEY_openstackcli port list \
|
||||||
|
+ --format value \
|
||||||
|
+ --column id \
|
||||||
|
+ --server $node_id); do
|
||||||
|
+ subnet_id=$($OCF_RESKEY_openstackcli port show \
|
||||||
|
+ --format json \
|
||||||
|
+ --column fixed_ips \
|
||||||
|
+ ${port_id} | grep -P '\"subnet_id\": \".*\",$' |
|
||||||
|
+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
|
||||||
|
+ value+="${subnet_id}:${port_id},"
|
||||||
|
+ done
|
||||||
|
+ value=$(echo ${value} | sed -e 's/,$//g')
|
||||||
|
+
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value"
|
||||||
|
+
|
||||||
|
+ if [ ! -z "$OS_REGION_NAME" ] ; then
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_region -v "$OS_REGION_NAME"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ ! -z "$OS_TENANT_ID" ] ; then
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_id -v "$OS_TENANT_ID"
|
||||||
|
+
|
||||||
|
+ if [ ! -z "$OS_TENANT_NAME" ] ; then
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_name -v "$OS_TENANT_NAME"
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_id -v "$OS_PROJECT_ID"
|
||||||
|
+
|
||||||
|
+ if [ ! -z "$OS_PROJECT_NAME" ] ; then
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_name -v "$OS_PROJECT_NAME"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+OSInfo_usage() {
|
||||||
|
+ cat <<END
|
||||||
|
+usage: $0 {start|stop|monitor|validate-all|meta-data}
|
||||||
|
+
|
||||||
|
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+OSInfo_start() {
|
||||||
|
+ echo $OCF_RESKEY_clone > $OCF_RESKEY_pidfile
|
||||||
|
+ OSInfoStats
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+OSInfo_stop() {
|
||||||
|
+ rm -f $OCF_RESKEY_pidfile
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_id
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_flavor
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_az
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_ports
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_region
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_id
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_name
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_id
|
||||||
|
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_name
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+OSInfo_monitor() {
|
||||||
|
+ if [ -f "$OCF_RESKEY_pidfile" ] ; then
|
||||||
|
+ OSInfoStats
|
||||||
|
+ exit $OCF_RUNNING
|
||||||
|
+ fi
|
||||||
|
+ exit $OCF_NOT_RUNNING
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+OSInfo_validate() {
|
||||||
|
+ check_binary "$OCF_RESKEY_curlcli"
|
||||||
|
+ check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
+ check_binary "$OCF_RESKEY_pythoncli"
|
||||||
|
+
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+if [ $# -ne 1 ]; then
|
||||||
|
+ OSInfo_usage
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
+fi
|
||||||
|
+
|
||||||
|
+if [ x != x${OCF_RESKEY_delay} ]; then
|
||||||
|
+ OCF_RESKEY_delay="-d ${OCF_RESKEY_delay}"
|
||||||
|
+fi
|
||||||
|
+
|
||||||
|
+case $__OCF_ACTION in
|
||||||
|
+meta-data) meta_data
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+start) OSInfo_validate || exit $?
|
||||||
|
+ OSInfo_start
|
||||||
|
+ ;;
|
||||||
|
+stop) OSInfo_stop
|
||||||
|
+ ;;
|
||||||
|
+monitor) OSInfo_monitor
|
||||||
|
+ ;;
|
||||||
|
+validate-all) OSInfo_validate
|
||||||
|
+ ;;
|
||||||
|
+usage|help) OSInfo_usage
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+*) OSInfo_usage
|
||||||
|
+ exit $OCF_ERR_UNIMPLEMENTED
|
||||||
|
+ ;;
|
||||||
|
+esac
|
||||||
|
+
|
||||||
|
+exit $?
|
||||||
|
diff --color -uNr a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in
|
||||||
|
--- a/heartbeat/openstack-info.in 2022-03-15 16:14:29.370209063 +0100
|
||||||
|
+++ b/heartbeat/openstack-info.in 2022-03-15 16:17:36.234840018 +0100
|
||||||
|
@@ -32,16 +32,16 @@
|
||||||
|
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
|
||||||
|
+
|
||||||
|
# Defaults
|
||||||
|
OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}"
|
||||||
|
OCF_RESKEY_delay_default="0"
|
||||||
|
OCF_RESKEY_clone_default="0"
|
||||||
|
OCF_RESKEY_curlcli_default="/usr/bin/curl"
|
||||||
|
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
OCF_RESKEY_pythoncli_default="@PYTHON@"
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}}
|
||||||
|
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}}
|
||||||
|
: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}}
|
||||||
|
: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}}
|
||||||
|
@@ -70,25 +70,23 @@
|
||||||
|
<shortdesc lang="en">Records various node attributes in the CIB</shortdesc>
|
||||||
|
|
||||||
|
<parameters>
|
||||||
|
+END
|
||||||
|
+
|
||||||
|
+common_meta_data
|
||||||
|
+
|
||||||
|
+ cat <<END
|
||||||
|
<parameter name="pidfile" unique="0">
|
||||||
|
<longdesc lang="en">PID file</longdesc>
|
||||||
|
<shortdesc lang="en">PID file</shortdesc>
|
||||||
|
<content type="string" default="${OCF_RESKEY_pidfile_default}" />
|
||||||
|
</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="delay" unique="0">
|
||||||
|
<longdesc lang="en">Interval to allow values to stabilize</longdesc>
|
||||||
|
<shortdesc lang="en">Dampening Delay</shortdesc>
|
||||||
|
<content type="string" default="${OCF_RESKEY_delay_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
-<parameter name="openrc" required="1">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Valid Openstack credentials as openrc file from api_access/openrc.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">openrc file</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
-</parameter>
|
||||||
|
-
|
||||||
|
<parameter name="curlcli">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Path to command line cURL binary.
|
||||||
|
@@ -97,14 +95,6 @@
|
||||||
|
<content type="string" default="${OCF_RESKEY_curlcli_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
-<parameter name="openstackcli">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Path to command line tools for openstack.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
|
||||||
|
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
|
||||||
|
-</parameter>
|
||||||
|
-
|
||||||
|
<parameter name="pythoncli">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Path to command line Python interpreter.
|
||||||
|
@@ -116,9 +106,9 @@
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
-<action name="start" timeout="20s" />
|
||||||
|
-<action name="stop" timeout="20s" />
|
||||||
|
-<action name="monitor" timeout="20s" interval="60s"/>
|
||||||
|
+<action name="start" timeout="180s" />
|
||||||
|
+<action name="stop" timeout="180s" />
|
||||||
|
+<action name="monitor" timeout="180s" interval="60s"/>
|
||||||
|
<action name="meta-data" timeout="5s" />
|
||||||
|
<action name="validate-all" timeout="20s" />
|
||||||
|
</actions>
|
||||||
|
@@ -134,7 +124,7 @@
|
||||||
|
local node
|
||||||
|
local node_id
|
||||||
|
|
||||||
|
- . $OCF_RESKEY_openrc
|
||||||
|
+ get_config
|
||||||
|
|
||||||
|
# Nova data: server ID
|
||||||
|
node_id=$($OCF_RESKEY_curlcli \
|
||||||
|
@@ -244,16 +234,6 @@
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
check_binary "$OCF_RESKEY_pythoncli"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_openrc" ]; then
|
||||||
|
- ocf_exit_reason "openrc parameter not set"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
|
||||||
|
- ocf_exit_reason "openrc file not found"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
}
|
||||||
|
|
||||||
|
diff --color -uNr a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip
|
||||||
|
--- a/heartbeat/openstack-virtual-ip 2022-03-15 16:14:29.370209063 +0100
|
||||||
|
+++ b/heartbeat/openstack-virtual-ip 2022-03-15 16:17:36.235840021 +0100
|
||||||
|
@@ -34,10 +34,9 @@
|
||||||
|
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
|
||||||
|
-# Defaults
|
||||||
|
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
|
||||||
|
|
||||||
|
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
|
||||||
|
+# Defaults
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
@@ -68,22 +67,11 @@
|
||||||
|
<shortdesc lang="en">Move a virtual IP</shortdesc>
|
||||||
|
|
||||||
|
<parameters>
|
||||||
|
-<parameter name="openstackcli">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Path to command line tools for openstack.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
|
||||||
|
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
|
||||||
|
-</parameter>
|
||||||
|
+END
|
||||||
|
|
||||||
|
-<parameter name="openrc" required="1">
|
||||||
|
-<longdesc lang="en">
|
||||||
|
-Valid Openstack credentials as openrc file from api_access/openrc.
|
||||||
|
-</longdesc>
|
||||||
|
-<shortdesc lang="en">openrc file</shortdesc>
|
||||||
|
-<content type="string" />
|
||||||
|
-</parameter>
|
||||||
|
+common_meta_data
|
||||||
|
|
||||||
|
+cat <<END
|
||||||
|
<parameter name="ip" required="1">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Virtual IP Address.
|
||||||
|
@@ -105,7 +93,7 @@
|
||||||
|
<actions>
|
||||||
|
<action name="start" timeout="180s" />
|
||||||
|
<action name="stop" timeout="180s" />
|
||||||
|
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
|
||||||
|
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
|
||||||
|
<action name="validate-all" timeout="5s" />
|
||||||
|
<action name="meta-data" timeout="5s" />
|
||||||
|
</actions>
|
||||||
|
@@ -128,17 +116,7 @@
|
||||||
|
osvip_validate() {
|
||||||
|
check_binary "$OCF_RESKEY_openstackcli"
|
||||||
|
|
||||||
|
- if [ -z "$OCF_RESKEY_openrc" ]; then
|
||||||
|
- ocf_exit_reason "openrc parameter not set"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
|
||||||
|
- ocf_exit_reason "openrc file not found"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- . $OCF_RESKEY_openrc
|
||||||
|
+ get_config
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1
|
||||||
|
if [ $? -ne 0 ] ; then
|
@ -0,0 +1,72 @@
|
|||||||
|
From 64f434014bc198055478a139532c7cc133967c5d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Fri, 8 Jul 2022 15:41:34 +0200
|
||||||
|
Subject: [PATCH] openstack-agents: fixes
|
||||||
|
|
||||||
|
- openstack-cinder-volume: dont do volume_local_check during start/stop-action
|
||||||
|
- openstack-floating-ip/openstack-virtual-ip: dont fail in validate()
|
||||||
|
during probe-calls
|
||||||
|
- openstack-floating-ip: fix awk only catching last id for node_port_ids
|
||||||
|
---
|
||||||
|
heartbeat/openstack-cinder-volume | 2 +-
|
||||||
|
heartbeat/openstack-floating-ip | 4 ++--
|
||||||
|
heartbeat/openstack-virtual-ip | 4 ++--
|
||||||
|
3 files changed, 5 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume
|
||||||
|
index cc12e58ae..19bf04faf 100755
|
||||||
|
--- a/heartbeat/openstack-cinder-volume
|
||||||
|
+++ b/heartbeat/openstack-cinder-volume
|
||||||
|
@@ -138,7 +138,7 @@ osvol_monitor() {
|
||||||
|
|
||||||
|
node_id=$(_get_node_id)
|
||||||
|
|
||||||
|
- if ocf_is_true $OCF_RESKEY_volume_local_check ; then
|
||||||
|
+ if [ "$__OCF_ACTION" = "monitor" ] && ocf_is_true $OCF_RESKEY_volume_local_check ; then
|
||||||
|
#
|
||||||
|
# Is the volue attached?
|
||||||
|
# We check the local devices
|
||||||
|
diff --git a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip
|
||||||
|
index 8c135cc24..6e2895654 100755
|
||||||
|
--- a/heartbeat/openstack-floating-ip
|
||||||
|
+++ b/heartbeat/openstack-floating-ip
|
||||||
|
@@ -111,7 +111,7 @@ osflip_validate() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1
|
||||||
|
- if [ $? -ne 0 ] ; then
|
||||||
|
+ if [ $? -ne 0 ] && ! ocf_is_probe; then
|
||||||
|
ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
@@ -129,7 +129,7 @@ osflip_monitor() {
|
||||||
|
node_port_ids=$(${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) \
|
||||||
|
| awk -F= '{gsub("\"","");print $NF}' \
|
||||||
|
| tr ',' ' ' \
|
||||||
|
- | awk -F: '{print $NF}')
|
||||||
|
+ | awk '{gsub("[^ ]*:", "");print}')
|
||||||
|
|
||||||
|
# Is the IP active and attached?
|
||||||
|
result=$($OCF_RESKEY_openstackcli floating ip show \
|
||||||
|
diff --git a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip
|
||||||
|
index a1084c420..c654d980a 100755
|
||||||
|
--- a/heartbeat/openstack-virtual-ip
|
||||||
|
+++ b/heartbeat/openstack-virtual-ip
|
||||||
|
@@ -119,7 +119,7 @@ osvip_validate() {
|
||||||
|
get_config
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1
|
||||||
|
- if [ $? -ne 0 ] ; then
|
||||||
|
+ if [ $? -ne 0 ] && ! ocf_is_probe; then
|
||||||
|
ocf_log warn "attr_updater failed to get openstack_ports attribute of node $OCF_RESOURCE_INSTANCE"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
@@ -136,7 +136,7 @@ osvip_monitor() {
|
||||||
|
--format value \
|
||||||
|
--column allowed_address_pairs \
|
||||||
|
${node_port_id})
|
||||||
|
- if echo $result | grep -q $OCF_RESKEY_ip ; then
|
||||||
|
+ if echo $result | grep -q "$OCF_RESKEY_ip"; then
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -S status -n openstack_virtual_ip -v $OCF_RESKEY_ip
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
26
SOURCES/bz1908148-openstack-info-fix-bashism.patch
Normal file
26
SOURCES/bz1908148-openstack-info-fix-bashism.patch
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
From 8b1d3257e5176a2f50a843a21888c4b4f51f370b Mon Sep 17 00:00:00 2001
|
||||||
|
From: Valentin Vidic <vvidic@valentin-vidic.from.hr>
|
||||||
|
Date: Sun, 3 Apr 2022 20:31:50 +0200
|
||||||
|
Subject: [PATCH] openstack-info: fix bashism
|
||||||
|
|
||||||
|
Also simplify striping of trailing comma.
|
||||||
|
---
|
||||||
|
heartbeat/openstack-info.in | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in
|
||||||
|
index f6dc1ee4d..f3a59fc7a 100755
|
||||||
|
--- a/heartbeat/openstack-info.in
|
||||||
|
+++ b/heartbeat/openstack-info.in
|
||||||
|
@@ -167,9 +167,9 @@ OSInfoStats() {
|
||||||
|
--column fixed_ips \
|
||||||
|
${port_id} | grep -P '\"subnet_id\": \".*\",$' |
|
||||||
|
grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
|
||||||
|
- value+="${subnet_id}:${port_id},"
|
||||||
|
+ value="${value}${subnet_id}:${port_id},"
|
||||||
|
done
|
||||||
|
- value=$(echo ${value} | sed -e 's/,$//g')
|
||||||
|
+ value=${value%,}
|
||||||
|
|
||||||
|
${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value"
|
||||||
|
|
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Georg Brandl <georg@python.org>
|
||||||
|
Date: Thu, 10 Dec 2020 08:19:21 +0100
|
||||||
|
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
|
||||||
|
|
||||||
|
Reason was a lookahead-only pattern which was included in the state
|
||||||
|
where the lookahead was transitioning to.
|
||||||
|
---
|
||||||
|
pygments/lexers/ml.py | 12 ++++++------
|
||||||
|
2 files changed, 14 insertions(+), 6 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
|
||||||
|
index 8ca8ce3eb..f2ac367c5 100644
|
||||||
|
--- a/pygments/lexers/ml.py
|
||||||
|
+++ b/pygments/lexers/ml.py
|
||||||
|
@@ -142,7 +142,7 @@ def id_callback(self, match):
|
||||||
|
(r'#\s+(%s)' % symbolicid_re, Name.Label),
|
||||||
|
# Some reserved words trigger a special, local lexer state change
|
||||||
|
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
|
||||||
|
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
|
||||||
|
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
|
||||||
|
(r'\b(functor|include|open|signature|structure)\b(?!\')',
|
||||||
|
Keyword.Reserved, 'sname'),
|
||||||
|
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
|
||||||
|
@@ -315,15 +315,14 @@ def id_callback(self, match):
|
||||||
|
'ename': [
|
||||||
|
include('whitespace'),
|
||||||
|
|
||||||
|
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
|
||||||
|
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
|
||||||
|
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||||
|
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
|
||||||
|
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
|
||||||
|
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||||
|
(r'\b(of)\b(?!\')', Keyword.Reserved),
|
||||||
|
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
|
||||||
|
|
||||||
|
- include('breakout'),
|
||||||
|
- include('core'),
|
||||||
|
- (r'\S+', Error),
|
||||||
|
+ default('#pop'),
|
||||||
|
],
|
||||||
|
|
||||||
|
'datcon': [
|
||||||
|
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
+
|
||||||
|
class OpaLexer(RegexLexer):
|
||||||
|
"""
|
||||||
|
Lexer for the Opa language (http://opalang.org).
|
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Georg Brandl <georg@python.org>
|
||||||
|
Date: Mon, 11 Jan 2021 09:46:34 +0100
|
||||||
|
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
|
||||||
|
Caller/Doyensec
|
||||||
|
|
||||||
|
---
|
||||||
|
pygments/lexers/archetype.py | 2 +-
|
||||||
|
pygments/lexers/factor.py | 4 ++--
|
||||||
|
pygments/lexers/jvm.py | 1 -
|
||||||
|
pygments/lexers/matlab.py | 6 +++---
|
||||||
|
pygments/lexers/objective.py | 4 ++--
|
||||||
|
pygments/lexers/templates.py | 2 +-
|
||||||
|
pygments/lexers/varnish.py | 2 +-
|
||||||
|
8 files changed, 14 insertions(+), 12 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
|
||||||
|
index 65046613d..26f5ea8c9 100644
|
||||||
|
--- a/pygments/lexers/archetype.py
|
||||||
|
+++ b/pygments/lexers/archetype.py
|
||||||
|
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
|
||||||
|
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
|
||||||
|
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
|
||||||
|
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
|
||||||
|
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
|
||||||
|
+ (r'[+-]?\d*\.\d+%?', Number.Float),
|
||||||
|
(r'0x[0-9a-fA-F]+', Number.Hex),
|
||||||
|
(r'[+-]?\d+%?', Number.Integer),
|
||||||
|
],
|
||||||
|
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
|
||||||
|
index be7b30dff..9200547f9 100644
|
||||||
|
--- a/pygments/lexers/factor.py
|
||||||
|
+++ b/pygments/lexers/factor.py
|
||||||
|
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
|
||||||
|
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
|
||||||
|
|
||||||
|
# strings
|
||||||
|
- (r'"""\s+(?:.|\n)*?\s+"""', String),
|
||||||
|
+ (r'"""\s(?:.|\n)*?\s"""', String),
|
||||||
|
(r'"(?:\\\\|\\"|[^"])*"', String),
|
||||||
|
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
|
||||||
|
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
|
||||||
|
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
|
||||||
|
'slots': [
|
||||||
|
(r'\s+', Text),
|
||||||
|
(r';\s', Keyword, '#pop'),
|
||||||
|
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
|
||||||
|
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
|
||||||
|
bygroups(Text, Name.Variable, Text)),
|
||||||
|
(r'\S+', Name.Variable),
|
||||||
|
],
|
||||||
|
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
|
||||||
|
index 62dfd45e5..9a9397c2d 100644
|
||||||
|
--- a/pygments/lexers/jvm.py
|
||||||
|
+++ b/pygments/lexers/jvm.py
|
||||||
|
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
|
||||||
|
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
|
||||||
|
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
|
||||||
|
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
|
||||||
|
- (r'".*``.*``.*"', String.Interpol),
|
||||||
|
(r'(\.)([a-z_]\w*)',
|
||||||
|
bygroups(Operator, Name.Attribute)),
|
||||||
|
(r'[a-zA-Z_]\w*:', Name.Label),
|
||||||
|
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
|
||||||
|
index 4823c6a7e..578848623 100644
|
||||||
|
--- a/pygments/lexers/matlab.py
|
||||||
|
+++ b/pygments/lexers/matlab.py
|
||||||
|
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
|
||||||
|
(r'.', Comment.Multiline),
|
||||||
|
],
|
||||||
|
'deffunc': [
|
||||||
|
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||||
|
Whitespace, Name.Function, Punctuation, Text,
|
||||||
|
Punctuation, Whitespace), '#pop'),
|
||||||
|
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
|
||||||
|
(r"[^']*'", String, '#pop'),
|
||||||
|
],
|
||||||
|
'deffunc': [
|
||||||
|
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||||
|
Whitespace, Name.Function, Punctuation, Text,
|
||||||
|
Punctuation, Whitespace), '#pop'),
|
||||||
|
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
|
||||||
|
(r'.', String, '#pop'),
|
||||||
|
],
|
||||||
|
'deffunc': [
|
||||||
|
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||||
|
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||||
|
Whitespace, Name.Function, Punctuation, Text,
|
||||||
|
Punctuation, Whitespace), '#pop'),
|
||||||
|
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
|
||||||
|
index 34e4062f6..38ac9bb05 100644
|
||||||
|
--- a/pygments/lexers/objective.py
|
||||||
|
+++ b/pygments/lexers/objective.py
|
||||||
|
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
|
||||||
|
'logos_classname'),
|
||||||
|
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
|
||||||
|
bygroups(Keyword, Text, Name.Class)),
|
||||||
|
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
|
||||||
|
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
|
||||||
|
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
|
||||||
|
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
|
||||||
|
'function'),
|
||||||
|
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
|
||||||
|
+ (r'(%new)(\s*)(\()(.*?)(\))',
|
||||||
|
bygroups(Keyword, Text, Keyword, String, Keyword)),
|
||||||
|
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
|
||||||
|
inherit,
|
||||||
|
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
|
||||||
|
index 33c06c4c4..5c3346b4c 100644
|
||||||
|
--- a/pygments/lexers/templates.py
|
||||||
|
+++ b/pygments/lexers/templates.py
|
||||||
|
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
|
||||||
|
# see doc for handling first name arg: /directives/evoque/
|
||||||
|
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
|
||||||
|
# should be using(PythonLexer), not passed out as String
|
||||||
|
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
|
||||||
|
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
|
||||||
|
r'(.*?)((?(4)%)\})',
|
||||||
|
bygroups(Punctuation, Name.Builtin, Punctuation, None,
|
||||||
|
String, using(PythonLexer), Punctuation)),
|
||||||
|
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
|
||||||
|
index 23653f7a1..9d358bd7c 100644
|
||||||
|
--- a/pygments/lexers/varnish.py
|
||||||
|
+++ b/pygments/lexers/varnish.py
|
||||||
|
@@ -61,7 +61,7 @@ def analyse_text(text):
|
||||||
|
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
|
||||||
|
(r'(\.probe)(\s*=\s*)(\{)',
|
||||||
|
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
|
||||||
|
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
|
||||||
|
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
|
||||||
|
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
|
||||||
|
(r'\{', Punctuation, '#push'),
|
||||||
|
(r'\}', Punctuation, '#pop'),
|
903
SOURCES/bz1977012-azure-events-az-new-ra.patch
Normal file
903
SOURCES/bz1977012-azure-events-az-new-ra.patch
Normal file
@ -0,0 +1,903 @@
|
|||||||
|
From 5dcd5153f0318e4766f7f4d3e61dfdb4b352c39c Mon Sep 17 00:00:00 2001
|
||||||
|
From: MSSedusch <sedusch@microsoft.com>
|
||||||
|
Date: Mon, 30 May 2022 15:08:10 +0200
|
||||||
|
Subject: [PATCH 1/2] add new Azure Events AZ resource agent
|
||||||
|
|
||||||
|
---
|
||||||
|
.gitignore | 1 +
|
||||||
|
configure.ac | 8 +
|
||||||
|
doc/man/Makefile.am | 4 +
|
||||||
|
heartbeat/Makefile.am | 4 +
|
||||||
|
heartbeat/azure-events-az.in | 782 +++++++++++++++++++++++++++++++++++
|
||||||
|
5 files changed, 799 insertions(+)
|
||||||
|
create mode 100644 heartbeat/azure-events-az.in
|
||||||
|
|
||||||
|
diff --git a/.gitignore b/.gitignore
|
||||||
|
index 0c259b5cf..e2b7c039c 100644
|
||||||
|
--- a/.gitignore
|
||||||
|
+++ b/.gitignore
|
||||||
|
@@ -54,6 +54,7 @@ heartbeat/Squid
|
||||||
|
heartbeat/SysInfo
|
||||||
|
heartbeat/aws-vpc-route53
|
||||||
|
heartbeat/azure-events
|
||||||
|
+heartbeat/azure-events-az
|
||||||
|
heartbeat/clvm
|
||||||
|
heartbeat/conntrackd
|
||||||
|
heartbeat/dnsupdate
|
||||||
|
diff --git a/configure.ac b/configure.ac
|
||||||
|
index eeecfad0e..5716a2be2 100644
|
||||||
|
--- a/configure.ac
|
||||||
|
+++ b/configure.ac
|
||||||
|
@@ -523,6 +523,13 @@ if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
fi
|
||||||
|
AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1)
|
||||||
|
|
||||||
|
+BUILD_AZURE_EVENTS_AZ=1
|
||||||
|
+if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
+ BUILD_AZURE_EVENTS_AZ=0
|
||||||
|
+ AC_MSG_WARN("Not building azure-events-az")
|
||||||
|
+fi
|
||||||
|
+AM_CONDITIONAL(BUILD_AZURE_EVENTS_AZ, test $BUILD_AZURE_EVENTS_AZ -eq 1)
|
||||||
|
+
|
||||||
|
BUILD_GCP_PD_MOVE=1
|
||||||
|
if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
BUILD_GCP_PD_MOVE=0
|
||||||
|
@@ -976,6 +983,7 @@ rgmanager/Makefile \
|
||||||
|
|
||||||
|
dnl Files we output that need to be executable
|
||||||
|
AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events])
|
||||||
|
+AC_CONFIG_FILES([heartbeat/azure-events-az], [chmod +x heartbeat/azure-events-az])
|
||||||
|
AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget])
|
||||||
|
AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID])
|
||||||
|
AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE])
|
||||||
|
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||||
|
index cd8fd16bf..658c700ac 100644
|
||||||
|
--- a/doc/man/Makefile.am
|
||||||
|
+++ b/doc/man/Makefile.am
|
||||||
|
@@ -219,6 +219,10 @@ if BUILD_AZURE_EVENTS
|
||||||
|
man_MANS += ocf_heartbeat_azure-events.7
|
||||||
|
endif
|
||||||
|
|
||||||
|
+if BUILD_AZURE_EVENTS_AZ
|
||||||
|
+man_MANS += ocf_heartbeat_azure-events-az.7
|
||||||
|
+endif
|
||||||
|
+
|
||||||
|
if BUILD_GCP_PD_MOVE
|
||||||
|
man_MANS += ocf_heartbeat_gcp-pd-move.7
|
||||||
|
endif
|
||||||
|
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||||
|
index 20d41e36a..1133dc13e 100644
|
||||||
|
--- a/heartbeat/Makefile.am
|
||||||
|
+++ b/heartbeat/Makefile.am
|
||||||
|
@@ -188,6 +188,10 @@ if BUILD_AZURE_EVENTS
|
||||||
|
ocf_SCRIPTS += azure-events
|
||||||
|
endif
|
||||||
|
|
||||||
|
+if BUILD_AZURE_EVENTS_AZ
|
||||||
|
+ocf_SCRIPTS += azure-events-az
|
||||||
|
+endif
|
||||||
|
+
|
||||||
|
if BUILD_GCP_PD_MOVE
|
||||||
|
ocf_SCRIPTS += gcp-pd-move
|
||||||
|
endif
|
||||||
|
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||||
|
new file mode 100644
|
||||||
|
index 000000000..616fc8d9e
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/heartbeat/azure-events-az.in
|
||||||
|
@@ -0,0 +1,782 @@
|
||||||
|
+#!@PYTHON@ -tt
|
||||||
|
+#
|
||||||
|
+# Resource agent for monitoring Azure Scheduled Events
|
||||||
|
+#
|
||||||
|
+# License: GNU General Public License (GPL)
|
||||||
|
+# (c) 2018 Tobias Niekamp, Microsoft Corp.
|
||||||
|
+# and Linux-HA contributors
|
||||||
|
+
|
||||||
|
+import os
|
||||||
|
+import sys
|
||||||
|
+import time
|
||||||
|
+import subprocess
|
||||||
|
+import json
|
||||||
|
+try:
|
||||||
|
+ import urllib2
|
||||||
|
+ from urllib2 import URLError
|
||||||
|
+except ImportError:
|
||||||
|
+ import urllib.request as urllib2
|
||||||
|
+ from urllib.error import URLError
|
||||||
|
+import socket
|
||||||
|
+from collections import defaultdict
|
||||||
|
+
|
||||||
|
+OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT"))
|
||||||
|
+sys.path.append(OCF_FUNCTIONS_DIR)
|
||||||
|
+import ocf
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+VERSION = "0.10"
|
||||||
|
+USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro())
|
||||||
|
+
|
||||||
|
+attr_globalPullState = "azure-events-az_globalPullState"
|
||||||
|
+attr_lastDocVersion = "azure-events-az_lastDocVersion"
|
||||||
|
+attr_curNodeState = "azure-events-az_curNodeState"
|
||||||
|
+attr_pendingEventIDs = "azure-events-az_pendingEventIDs"
|
||||||
|
+attr_healthstate = "#health-azure"
|
||||||
|
+
|
||||||
|
+default_loglevel = ocf.logging.INFO
|
||||||
|
+default_relevantEventTypes = set(["Reboot", "Redeploy"])
|
||||||
|
+
|
||||||
|
+global_pullMaxAttempts = 3
|
||||||
|
+global_pullDelaySecs = 1
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+class attrDict(defaultdict):
|
||||||
|
+ """
|
||||||
|
+ A wrapper for accessing dict keys like an attribute
|
||||||
|
+ """
|
||||||
|
+ def __init__(self, data):
|
||||||
|
+ super(attrDict, self).__init__(attrDict)
|
||||||
|
+ for d in data.keys():
|
||||||
|
+ self.__setattr__(d, data[d])
|
||||||
|
+
|
||||||
|
+ def __getattr__(self, key):
|
||||||
|
+ try:
|
||||||
|
+ return self[key]
|
||||||
|
+ except KeyError:
|
||||||
|
+ raise AttributeError(key)
|
||||||
|
+
|
||||||
|
+ def __setattr__(self, key, value):
|
||||||
|
+ self[key] = value
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+class azHelper:
|
||||||
|
+ """
|
||||||
|
+ Helper class for Azure's metadata API (including Scheduled Events)
|
||||||
|
+ """
|
||||||
|
+ metadata_host = "http://169.254.169.254/metadata"
|
||||||
|
+ instance_api = "instance"
|
||||||
|
+ events_api = "scheduledevents"
|
||||||
|
+ api_version = "2019-08-01"
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def _sendMetadataRequest(endpoint, postData=None):
|
||||||
|
+ """
|
||||||
|
+ Send a request to Azure's Azure Metadata Service API
|
||||||
|
+ """
|
||||||
|
+ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version)
|
||||||
|
+ data = ""
|
||||||
|
+ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData))
|
||||||
|
+ ocf.logger.debug("_sendMetadataRequest: url = %s" % url)
|
||||||
|
+
|
||||||
|
+ if postData and type(postData) != bytes:
|
||||||
|
+ postData = postData.encode()
|
||||||
|
+
|
||||||
|
+ req = urllib2.Request(url, postData)
|
||||||
|
+ req.add_header("Metadata", "true")
|
||||||
|
+ req.add_header("User-Agent", USER_AGENT)
|
||||||
|
+ try:
|
||||||
|
+ resp = urllib2.urlopen(req)
|
||||||
|
+ except URLError as e:
|
||||||
|
+ if hasattr(e, 'reason'):
|
||||||
|
+ ocf.logger.warning("Failed to reach the server: %s" % e.reason)
|
||||||
|
+ clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
||||||
|
+ elif hasattr(e, 'code'):
|
||||||
|
+ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code)
|
||||||
|
+ clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
||||||
|
+ else:
|
||||||
|
+ data = resp.read()
|
||||||
|
+ ocf.logger.debug("_sendMetadataRequest: response = %s" % data)
|
||||||
|
+
|
||||||
|
+ if data:
|
||||||
|
+ data = json.loads(data)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("_sendMetadataRequest: finished")
|
||||||
|
+ return data
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def getInstanceInfo():
|
||||||
|
+ """
|
||||||
|
+ Fetch details about the current VM from Azure's Azure Metadata Service API
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("getInstanceInfo: begin")
|
||||||
|
+
|
||||||
|
+ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api)
|
||||||
|
+ ocf.logger.debug("getInstanceInfo: json = %s" % jsondata)
|
||||||
|
+
|
||||||
|
+ if jsondata:
|
||||||
|
+ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"]))
|
||||||
|
+ return attrDict(jsondata["compute"])
|
||||||
|
+ else:
|
||||||
|
+ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info")
|
||||||
|
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def pullScheduledEvents():
|
||||||
|
+ """
|
||||||
|
+ Retrieve all currently scheduled events via Azure Metadata Service API
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("pullScheduledEvents: begin")
|
||||||
|
+
|
||||||
|
+ jsondata = azHelper._sendMetadataRequest(azHelper.events_api)
|
||||||
|
+ ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("pullScheduledEvents: finished")
|
||||||
|
+ return attrDict(jsondata)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def forceEvents(eventIDs):
|
||||||
|
+ """
|
||||||
|
+ Force a set of events to start immediately
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("forceEvents: begin")
|
||||||
|
+
|
||||||
|
+ events = []
|
||||||
|
+ for e in eventIDs:
|
||||||
|
+ events.append({
|
||||||
|
+ "EventId": e,
|
||||||
|
+ })
|
||||||
|
+ postData = {
|
||||||
|
+ "StartRequests" : events
|
||||||
|
+ }
|
||||||
|
+ ocf.logger.info("forceEvents: postData = %s" % postData)
|
||||||
|
+ resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData))
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("forceEvents: finished")
|
||||||
|
+ return
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+class clusterHelper:
|
||||||
|
+ """
|
||||||
|
+ Helper functions for Pacemaker control via crm
|
||||||
|
+ """
|
||||||
|
+ @staticmethod
|
||||||
|
+ def _getLocation(node):
|
||||||
|
+ """
|
||||||
|
+ Helper function to retrieve local/global attributes
|
||||||
|
+ """
|
||||||
|
+ if node:
|
||||||
|
+ return ["--node", node]
|
||||||
|
+ else:
|
||||||
|
+ return ["--type", "crm_config"]
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def _exec(command, *args):
|
||||||
|
+ """
|
||||||
|
+ Helper function to execute a UNIX command
|
||||||
|
+ """
|
||||||
|
+ args = list(args)
|
||||||
|
+ ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args)))
|
||||||
|
+
|
||||||
|
+ def flatten(*n):
|
||||||
|
+ return (str(e) for a in n
|
||||||
|
+ for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),)))
|
||||||
|
+ command = list(flatten([command] + args))
|
||||||
|
+ ocf.logger.debug("_exec: cmd = %s" % " ".join(command))
|
||||||
|
+ try:
|
||||||
|
+ ret = subprocess.check_output(command)
|
||||||
|
+ if type(ret) != str:
|
||||||
|
+ ret = ret.decode()
|
||||||
|
+ ocf.logger.debug("_exec: return = %s" % ret)
|
||||||
|
+ return ret.rstrip()
|
||||||
|
+ except Exception as err:
|
||||||
|
+ ocf.logger.exception(err)
|
||||||
|
+ return None
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def setAttr(key, value, node=None):
|
||||||
|
+ """
|
||||||
|
+ Set the value of a specific global/local attribute in the Pacemaker cluster
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node))
|
||||||
|
+
|
||||||
|
+ if value:
|
||||||
|
+ ret = clusterHelper._exec("crm_attribute",
|
||||||
|
+ "--name", key,
|
||||||
|
+ "--update", value,
|
||||||
|
+ clusterHelper._getLocation(node))
|
||||||
|
+ else:
|
||||||
|
+ ret = clusterHelper._exec("crm_attribute",
|
||||||
|
+ "--name", key,
|
||||||
|
+ "--delete",
|
||||||
|
+ clusterHelper._getLocation(node))
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("setAttr: finished")
|
||||||
|
+ return len(ret) == 0
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def getAttr(key, node=None):
|
||||||
|
+ """
|
||||||
|
+ Retrieve a global/local attribute from the Pacemaker cluster
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node))
|
||||||
|
+
|
||||||
|
+ val = clusterHelper._exec("crm_attribute",
|
||||||
|
+ "--name", key,
|
||||||
|
+ "--query", "--quiet",
|
||||||
|
+ "--default", "",
|
||||||
|
+ clusterHelper._getLocation(node))
|
||||||
|
+ ocf.logger.debug("getAttr: finished")
|
||||||
|
+ if not val:
|
||||||
|
+ return None
|
||||||
|
+ return val if not val.isdigit() else int(val)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def getAllNodes():
|
||||||
|
+ """
|
||||||
|
+ Get a list of hostnames for all nodes in the Pacemaker cluster
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("getAllNodes: begin")
|
||||||
|
+
|
||||||
|
+ nodes = []
|
||||||
|
+ nodeList = clusterHelper._exec("crm_node", "--list")
|
||||||
|
+ for n in nodeList.split("\n"):
|
||||||
|
+ nodes.append(n.split()[1])
|
||||||
|
+ ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes))
|
||||||
|
+
|
||||||
|
+ return nodes
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def getHostNameFromAzName(azName):
|
||||||
|
+ """
|
||||||
|
+ Helper function to get the actual host name from an Azure node name
|
||||||
|
+ """
|
||||||
|
+ return clusterHelper.getAttr("hostName_%s" % azName)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def removeHoldFromNodes():
|
||||||
|
+ """
|
||||||
|
+ Remove the ON_HOLD state from all nodes in the Pacemaker cluster
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("removeHoldFromNodes: begin")
|
||||||
|
+
|
||||||
|
+ for n in clusterHelper.getAllNodes():
|
||||||
|
+ if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD":
|
||||||
|
+ clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n)
|
||||||
|
+ ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("removeHoldFromNodes: finished")
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def otherNodesAvailable(exceptNode):
|
||||||
|
+ """
|
||||||
|
+ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode)
|
||||||
|
+
|
||||||
|
+ for n in clusterHelper.getAllNodes():
|
||||||
|
+ state = clusterHelper.getAttr(attr_curNodeState, node=n)
|
||||||
|
+ state = stringToNodeState(state) if state else AVAILABLE
|
||||||
|
+ if state == AVAILABLE and n != exceptNode.hostName:
|
||||||
|
+ ocf.logger.info("otherNodesAvailable: at least %s is available" % n)
|
||||||
|
+ ocf.logger.debug("otherNodesAvailable: finished")
|
||||||
|
+ return True
|
||||||
|
+ ocf.logger.info("otherNodesAvailable: no other nodes are available")
|
||||||
|
+ ocf.logger.debug("otherNodesAvailable: finished")
|
||||||
|
+
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def transitionSummary():
|
||||||
|
+ """
|
||||||
|
+ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby)
|
||||||
|
+ """
|
||||||
|
+ # <tniek> Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node?
|
||||||
|
+ # # crm_simulate -Ls
|
||||||
|
+ # Transition Summary:
|
||||||
|
+ # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1)
|
||||||
|
+ # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0)
|
||||||
|
+ # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1)
|
||||||
|
+ # * Start rsc_nc_HN1_HDB03 (hsr3-db1)
|
||||||
|
+ # # Excepted result when there are no pending actions:
|
||||||
|
+ # Transition Summary:
|
||||||
|
+ ocf.logger.debug("transitionSummary: begin")
|
||||||
|
+
|
||||||
|
+ summary = clusterHelper._exec("crm_simulate", "-Ls")
|
||||||
|
+ if not summary:
|
||||||
|
+ ocf.logger.warning("transitionSummary: could not load transition summary")
|
||||||
|
+ return False
|
||||||
|
+ if summary.find("Transition Summary:") < 0:
|
||||||
|
+ ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary)
|
||||||
|
+ return False
|
||||||
|
+ summary = summary.split("Transition Summary:")[1]
|
||||||
|
+ ret = summary.split("\n").pop(0)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret))
|
||||||
|
+ return ret
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def listOperationsOnNode(node):
|
||||||
|
+ """
|
||||||
|
+ Get a list of all current operations for a given node (used to check if any resources are pending)
|
||||||
|
+ """
|
||||||
|
+ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0
|
||||||
|
+ # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete
|
||||||
|
+ # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete
|
||||||
|
+ # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending
|
||||||
|
+ # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete
|
||||||
|
+ ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node)
|
||||||
|
+ if len(resources) == 0:
|
||||||
|
+ ret = []
|
||||||
|
+ else:
|
||||||
|
+ ret = resources.split("\n")
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret))
|
||||||
|
+ return ret
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def noPendingResourcesOnNode(node):
|
||||||
|
+ """
|
||||||
|
+ Check that there are no pending resources on a given node
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ for r in clusterHelper.listOperationsOnNode(node):
|
||||||
|
+ ocf.logger.debug("noPendingResourcesOnNode: * %s" % r)
|
||||||
|
+ resource = r.split()[-1]
|
||||||
|
+ if resource == "pending":
|
||||||
|
+ ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource)
|
||||||
|
+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = False")
|
||||||
|
+ return False
|
||||||
|
+ ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node)
|
||||||
|
+ ocf.logger.debug("noPendingResourcesOnNode: finished; return = True")
|
||||||
|
+
|
||||||
|
+ return True
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ def allResourcesStoppedOnNode(node):
|
||||||
|
+ """
|
||||||
|
+ Check that all resources on a given node are stopped
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ if clusterHelper.noPendingResourcesOnNode(node):
|
||||||
|
+ if len(clusterHelper.transitionSummary()) == 0:
|
||||||
|
+ ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node)
|
||||||
|
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True")
|
||||||
|
+ return True
|
||||||
|
+ ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty")
|
||||||
|
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False")
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+ ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node)
|
||||||
|
+ ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False")
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+AVAILABLE = 0 # Node is online and ready to handle events
|
||||||
|
+STOPPING = 1 # Standby has been triggered, but some resources are still running
|
||||||
|
+IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service
|
||||||
|
+ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available
|
||||||
|
+
|
||||||
|
+def stringToNodeState(name):
|
||||||
|
+ if type(name) == int: return name
|
||||||
|
+ if name == "STOPPING": return STOPPING
|
||||||
|
+ if name == "IN_EVENT": return IN_EVENT
|
||||||
|
+ if name == "ON_HOLD": return ON_HOLD
|
||||||
|
+ return AVAILABLE
|
||||||
|
+
|
||||||
|
+def nodeStateToString(state):
|
||||||
|
+ if state == STOPPING: return "STOPPING"
|
||||||
|
+ if state == IN_EVENT: return "IN_EVENT"
|
||||||
|
+ if state == ON_HOLD: return "ON_HOLD"
|
||||||
|
+ return "AVAILABLE"
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+class Node:
|
||||||
|
+ """
|
||||||
|
+ Core class implementing logic for a cluster node
|
||||||
|
+ """
|
||||||
|
+ def __init__(self, ra):
|
||||||
|
+ self.raOwner = ra
|
||||||
|
+ self.azInfo = azHelper.getInstanceInfo()
|
||||||
|
+ self.azName = self.azInfo.name
|
||||||
|
+ self.hostName = socket.gethostname()
|
||||||
|
+ self.setAttr("azName", self.azName)
|
||||||
|
+ clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName)
|
||||||
|
+
|
||||||
|
+ def getAttr(self, key):
|
||||||
|
+ """
|
||||||
|
+ Get a local attribute
|
||||||
|
+ """
|
||||||
|
+ return clusterHelper.getAttr(key, node=self.hostName)
|
||||||
|
+
|
||||||
|
+ def setAttr(self, key, value):
|
||||||
|
+ """
|
||||||
|
+ Set a local attribute
|
||||||
|
+ """
|
||||||
|
+ return clusterHelper.setAttr(key, value, node=self.hostName)
|
||||||
|
+
|
||||||
|
+ def selfOrOtherNode(self, node):
|
||||||
|
+ """
|
||||||
|
+ Helper function to distinguish self/other node
|
||||||
|
+ """
|
||||||
|
+ return node if node else self.hostName
|
||||||
|
+
|
||||||
|
+ def setState(self, state, node=None):
|
||||||
|
+ """
|
||||||
|
+ Set the state for a given node (or self)
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state)))
|
||||||
|
+
|
||||||
|
+ clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("setState: finished")
|
||||||
|
+
|
||||||
|
+ def getState(self, node=None):
|
||||||
|
+ """
|
||||||
|
+ Get the state for a given node (or self)
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("getState: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ state = clusterHelper.getAttr(attr_curNodeState, node=node)
|
||||||
|
+ ocf.logger.debug("getState: state = %s" % state)
|
||||||
|
+ ocf.logger.debug("getState: finished")
|
||||||
|
+ if not state:
|
||||||
|
+ return AVAILABLE
|
||||||
|
+ return stringToNodeState(state)
|
||||||
|
+
|
||||||
|
+ def setEventIDs(self, eventIDs, node=None):
|
||||||
|
+ """
|
||||||
|
+ Set pending EventIDs for a given node (or self)
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs)))
|
||||||
|
+
|
||||||
|
+ if eventIDs:
|
||||||
|
+ eventIDStr = ",".join(eventIDs)
|
||||||
|
+ else:
|
||||||
|
+ eventIDStr = None
|
||||||
|
+ clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("setEventIDs: finished")
|
||||||
|
+ return
|
||||||
|
+
|
||||||
|
+ def getEventIDs(self, node=None):
|
||||||
|
+ """
|
||||||
|
+ Get pending EventIDs for a given node (or self)
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("getEventIDs: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node)
|
||||||
|
+ if eventIDStr:
|
||||||
|
+ eventIDs = eventIDStr.split(",")
|
||||||
|
+ else:
|
||||||
|
+ eventIDs = None
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs))
|
||||||
|
+ return eventIDs
|
||||||
|
+
|
||||||
|
+ def updateNodeStateAndEvents(self, state, eventIDs, node=None):
|
||||||
|
+ """
|
||||||
|
+ Set the state and pending EventIDs for a given node (or self)
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs)))
|
||||||
|
+
|
||||||
|
+ self.setState(state, node=node)
|
||||||
|
+ self.setEventIDs(eventIDs, node=node)
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("updateNodeStateAndEvents: finished")
|
||||||
|
+ return state
|
||||||
|
+
|
||||||
|
+ def putNodeStandby(self, node=None):
|
||||||
|
+ """
|
||||||
|
+ Put self to standby
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("putNodeStandby: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ clusterHelper._exec("crm_attribute",
|
||||||
|
+ "--node", node,
|
||||||
|
+ "--name", attr_healthstate,
|
||||||
|
+ "--update", "-1000000",
|
||||||
|
+ "--lifetime=forever")
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("putNodeStandby: finished")
|
||||||
|
+
|
||||||
|
+ def isNodeInStandby(self, node=None):
|
||||||
|
+ """
|
||||||
|
+ check if node is in standby
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("isNodeInStandby: begin; node = %s" % node)
|
||||||
|
+ isInStandy = False
|
||||||
|
+
|
||||||
|
+ healthAttributeStr = clusterHelper.getAttr(attr_healthstate, node)
|
||||||
|
+ if healthAttributeStr is not None:
|
||||||
|
+ try:
|
||||||
|
+ healthAttribute = int(healthAttributeStr)
|
||||||
|
+ isInStandy = healthAttribute < 0
|
||||||
|
+ except ValueError:
|
||||||
|
+ # Handle the exception
|
||||||
|
+ ocf.logger.warn("Health attribute %s on node %s cannot be converted to an integer value" % (healthAttributeStr, node))
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("isNodeInStandby: finished - result %s" % isInStandy)
|
||||||
|
+ return isInStandy
|
||||||
|
+
|
||||||
|
+ def putNodeOnline(self, node=None):
|
||||||
|
+ """
|
||||||
|
+ Put self back online
|
||||||
|
+ """
|
||||||
|
+ node = self.selfOrOtherNode(node)
|
||||||
|
+ ocf.logger.debug("putNodeOnline: begin; node = %s" % node)
|
||||||
|
+
|
||||||
|
+ clusterHelper._exec("crm_attribute",
|
||||||
|
+ "--node", node,
|
||||||
|
+ "--name", "#health-azure",
|
||||||
|
+ "--update", "0",
|
||||||
|
+ "--lifetime=forever")
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("putNodeOnline: finished")
|
||||||
|
+
|
||||||
|
+ def separateEvents(self, events):
|
||||||
|
+ """
|
||||||
|
+ Split own/other nodes' events
|
||||||
|
+ """
|
||||||
|
+ ocf.logger.debug("separateEvents: begin; events = %s" % str(events))
|
||||||
|
+
|
||||||
|
+ localEvents = []
|
||||||
|
+ remoteEvents = []
|
||||||
|
+ for e in events:
|
||||||
|
+ e = attrDict(e)
|
||||||
|
+ if e.EventType not in self.raOwner.relevantEventTypes:
|
||||||
|
+ continue
|
||||||
|
+ if self.azName in e.Resources:
|
||||||
|
+ localEvents.append(e)
|
||||||
|
+ else:
|
||||||
|
+ remoteEvents.append(e)
|
||||||
|
+ ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents)))
|
||||||
|
+ return (localEvents, remoteEvents)
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+class raAzEvents:
|
||||||
|
+ """
|
||||||
|
+ Main class for resource agent
|
||||||
|
+ """
|
||||||
|
+ def __init__(self, relevantEventTypes):
|
||||||
|
+ self.node = Node(self)
|
||||||
|
+ self.relevantEventTypes = relevantEventTypes
|
||||||
|
+
|
||||||
|
+ def monitor(self):
|
||||||
|
+ ocf.logger.debug("monitor: begin")
|
||||||
|
+
|
||||||
|
+ events = azHelper.pullScheduledEvents()
|
||||||
|
+
|
||||||
|
+ # get current document version
|
||||||
|
+ curDocVersion = events.DocumentIncarnation
|
||||||
|
+ lastDocVersion = self.node.getAttr(attr_lastDocVersion)
|
||||||
|
+ ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion))
|
||||||
|
+
|
||||||
|
+ # split events local/remote
|
||||||
|
+ (localEvents, remoteEvents) = self.node.separateEvents(events.Events)
|
||||||
|
+
|
||||||
|
+ # ensure local events are only executing once
|
||||||
|
+ if curDocVersion == lastDocVersion:
|
||||||
|
+ ocf.logger.info("monitor: already handled curDocVersion, skip")
|
||||||
|
+ return ocf.OCF_SUCCESS
|
||||||
|
+
|
||||||
|
+ localAzEventIDs = set()
|
||||||
|
+ for e in localEvents:
|
||||||
|
+ localAzEventIDs.add(e.EventId)
|
||||||
|
+
|
||||||
|
+ curState = self.node.getState()
|
||||||
|
+ clusterEventIDs = self.node.getEventIDs()
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("monitor: curDocVersion has not been handled yet")
|
||||||
|
+
|
||||||
|
+ if clusterEventIDs:
|
||||||
|
+ # there are pending events set, so our state must be STOPPING or IN_EVENT
|
||||||
|
+ i = 0; touchedEventIDs = False
|
||||||
|
+ while i < len(clusterEventIDs):
|
||||||
|
+ # clean up pending events that are already finished according to AZ
|
||||||
|
+ if clusterEventIDs[i] not in localAzEventIDs:
|
||||||
|
+ ocf.logger.info("monitor: remove finished local clusterEvent %s" % (clusterEventIDs[i]))
|
||||||
|
+ clusterEventIDs.pop(i)
|
||||||
|
+ touchedEventIDs = True
|
||||||
|
+ else:
|
||||||
|
+ i += 1
|
||||||
|
+ if len(clusterEventIDs) > 0:
|
||||||
|
+ # there are still pending events (either because we're still stopping, or because the event is still in place)
|
||||||
|
+ # either way, we need to wait
|
||||||
|
+ if touchedEventIDs:
|
||||||
|
+ ocf.logger.info("monitor: added new local clusterEvent %s" % str(clusterEventIDs))
|
||||||
|
+ self.node.setEventIDs(clusterEventIDs)
|
||||||
|
+ else:
|
||||||
|
+ ocf.logger.info("monitor: no local clusterEvents were updated")
|
||||||
|
+ else:
|
||||||
|
+ # there are no more pending events left after cleanup
|
||||||
|
+ if clusterHelper.noPendingResourcesOnNode(self.node.hostName):
|
||||||
|
+ # and no pending resources on the node -> set it back online
|
||||||
|
+ ocf.logger.info("monitor: all local events finished -> clean up, put node online and AVAILABLE")
|
||||||
|
+ curState = self.node.updateNodeStateAndEvents(AVAILABLE, None)
|
||||||
|
+ self.node.putNodeOnline()
|
||||||
|
+ clusterHelper.removeHoldFromNodes()
|
||||||
|
+ # If Azure Scheduled Events are not used for 24 hours (e.g. because the cluster was asleep), it will be disabled for a VM.
|
||||||
|
+ # When the cluster wakes up and starts using it again, the DocumentIncarnation is reset.
|
||||||
|
+ # We need to remove it during cleanup, otherwise azure-events-az will not process the event after wakeup
|
||||||
|
+ self.node.setAttr(attr_lastDocVersion, None)
|
||||||
|
+ else:
|
||||||
|
+ ocf.logger.info("monitor: all local events finished, but some resources have not completed startup yet -> wait")
|
||||||
|
+ else:
|
||||||
|
+ if curState == AVAILABLE:
|
||||||
|
+ if len(localAzEventIDs) > 0:
|
||||||
|
+ if clusterHelper.otherNodesAvailable(self.node):
|
||||||
|
+ ocf.logger.info("monitor: can handle local events %s -> set state STOPPING" % (str(localAzEventIDs)))
|
||||||
|
+ curState = self.node.updateNodeStateAndEvents(STOPPING, localAzEventIDs)
|
||||||
|
+ else:
|
||||||
|
+ ocf.logger.info("monitor: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(localAzEventIDs))
|
||||||
|
+ self.node.setState(ON_HOLD)
|
||||||
|
+ else:
|
||||||
|
+ ocf.logger.debug("monitor: no local azEvents to handle")
|
||||||
|
+
|
||||||
|
+ if curState == STOPPING:
|
||||||
|
+ eventIDsForNode = {}
|
||||||
|
+ if clusterHelper.noPendingResourcesOnNode(self.node.hostName):
|
||||||
|
+ if not self.node.isNodeInStandby():
|
||||||
|
+ ocf.logger.info("monitor: all local resources are started properly -> put node standby and exit")
|
||||||
|
+ self.node.putNodeStandby()
|
||||||
|
+ return ocf.OCF_SUCCESS
|
||||||
|
+
|
||||||
|
+ for e in localEvents:
|
||||||
|
+ ocf.logger.info("monitor: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources)))
|
||||||
|
+ # before we can force an event to start, we need to ensure all nodes involved have stopped their resources
|
||||||
|
+ if e.EventStatus == "Scheduled":
|
||||||
|
+ allNodesStopped = True
|
||||||
|
+ for azName in e.Resources:
|
||||||
|
+ hostName = clusterHelper.getHostNameFromAzName(azName)
|
||||||
|
+ state = self.node.getState(node=hostName)
|
||||||
|
+ if state == STOPPING:
|
||||||
|
+ # the only way we can continue is when node state is STOPPING, but all resources have been stopped
|
||||||
|
+ if not clusterHelper.allResourcesStoppedOnNode(hostName):
|
||||||
|
+ ocf.logger.info("monitor: (at least) node %s has still resources running -> wait" % hostName)
|
||||||
|
+ allNodesStopped = False
|
||||||
|
+ break
|
||||||
|
+ elif state in (AVAILABLE, IN_EVENT, ON_HOLD):
|
||||||
|
+ ocf.logger.info("monitor: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state)))
|
||||||
|
+ allNodesStopped = False
|
||||||
|
+ break
|
||||||
|
+ if allNodesStopped:
|
||||||
|
+ ocf.logger.info("monitor: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId))
|
||||||
|
+ for n in e.Resources:
|
||||||
|
+ hostName = clusterHelper.getHostNameFromAzName(n)
|
||||||
|
+ if hostName in eventIDsForNode:
|
||||||
|
+ eventIDsForNode[hostName].append(e.EventId)
|
||||||
|
+ else:
|
||||||
|
+ eventIDsForNode[hostName] = [e.EventId]
|
||||||
|
+ elif e.EventStatus == "Started":
|
||||||
|
+ ocf.logger.info("monitor: remote event already started")
|
||||||
|
+
|
||||||
|
+ # force the start of all events whose nodes are ready (i.e. have no more resources running)
|
||||||
|
+ if len(eventIDsForNode.keys()) > 0:
|
||||||
|
+ eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist])
|
||||||
|
+ ocf.logger.info("monitor: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce)))
|
||||||
|
+ for node, eventId in eventIDsForNode.items():
|
||||||
|
+ self.node.updateNodeStateAndEvents(IN_EVENT, eventId, node=node)
|
||||||
|
+ azHelper.forceEvents(eventIDsToForce)
|
||||||
|
+ self.node.setAttr(attr_lastDocVersion, curDocVersion)
|
||||||
|
+ else:
|
||||||
|
+ ocf.logger.info("monitor: some local resources are not clean yet -> wait")
|
||||||
|
+
|
||||||
|
+ ocf.logger.debug("monitor: finished")
|
||||||
|
+ return ocf.OCF_SUCCESS
|
||||||
|
+
|
||||||
|
+##############################################################################
|
||||||
|
+
|
||||||
|
+def setLoglevel(verbose):
|
||||||
|
+ # set up writing into syslog
|
||||||
|
+ loglevel = default_loglevel
|
||||||
|
+ if verbose:
|
||||||
|
+ opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1))
|
||||||
|
+ urllib2.install_opener(opener)
|
||||||
|
+ loglevel = ocf.logging.DEBUG
|
||||||
|
+ ocf.log.setLevel(loglevel)
|
||||||
|
+
|
||||||
|
+description = (
|
||||||
|
+ "Microsoft Azure Scheduled Events monitoring agent",
|
||||||
|
+ """This resource agent implements a monitor for scheduled
|
||||||
|
+(maintenance) events for a Microsoft Azure VM.
|
||||||
|
+
|
||||||
|
+If any relevant events are found, it moves all Pacemaker resources
|
||||||
|
+away from the affected node to allow for a graceful shutdown.
|
||||||
|
+
|
||||||
|
+ Usage:
|
||||||
|
+ [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events-az ACTION
|
||||||
|
+
|
||||||
|
+ action (required): Supported values: monitor, help, meta-data
|
||||||
|
+ eventTypes (optional): List of event types to be considered
|
||||||
|
+ relevant by the resource agent (comma-separated).
|
||||||
|
+ Supported values: Freeze,Reboot,Redeploy
|
||||||
|
+ Default = Reboot,Redeploy
|
||||||
|
+/ verbose (optional): If set to true, displays debug info.
|
||||||
|
+ Default = false
|
||||||
|
+
|
||||||
|
+ Deployment:
|
||||||
|
+ crm configure primitive rsc_azure-events-az ocf:heartbeat:azure-events-az \
|
||||||
|
+ op monitor interval=10s
|
||||||
|
+ crm configure clone cln_azure-events-az rsc_azure-events-az
|
||||||
|
+
|
||||||
|
+For further information on Microsoft Azure Scheduled Events, please
|
||||||
|
+refer to the following documentation:
|
||||||
|
+https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events
|
||||||
|
+""")
|
||||||
|
+
|
||||||
|
+def monitor_action(eventTypes):
|
||||||
|
+ relevantEventTypes = set(eventTypes.split(",") if eventTypes else [])
|
||||||
|
+ ra = raAzEvents(relevantEventTypes)
|
||||||
|
+ return ra.monitor()
|
||||||
|
+
|
||||||
|
+def validate_action(eventTypes):
|
||||||
|
+ if eventTypes:
|
||||||
|
+ for event in eventTypes.split(","):
|
||||||
|
+ if event not in ("Freeze", "Reboot", "Redeploy"):
|
||||||
|
+ ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes)
|
||||||
|
+ return ocf.OCF_ERR_CONFIGURED
|
||||||
|
+ return ocf.OCF_SUCCESS
|
||||||
|
+
|
||||||
|
+def main():
|
||||||
|
+ agent = ocf.Agent("azure-events-az", shortdesc=description[0], longdesc=description[1])
|
||||||
|
+ agent.add_parameter(
|
||||||
|
+ "eventTypes",
|
||||||
|
+ shortdesc="List of resources to be considered",
|
||||||
|
+ longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)",
|
||||||
|
+ content_type="string",
|
||||||
|
+ default="Reboot,Redeploy")
|
||||||
|
+ agent.add_parameter(
|
||||||
|
+ "verbose",
|
||||||
|
+ shortdesc="Enable verbose agent logging",
|
||||||
|
+ longdesc="Set to true to enable verbose logging",
|
||||||
|
+ content_type="boolean",
|
||||||
|
+ default="false")
|
||||||
|
+ agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
||||||
|
+ agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
||||||
|
+ agent.add_action("validate-all", timeout=20, handler=validate_action)
|
||||||
|
+ agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action)
|
||||||
|
+ setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false")))
|
||||||
|
+ agent.run()
|
||||||
|
+
|
||||||
|
+if __name__ == '__main__':
|
||||||
|
+ main()
|
||||||
|
\ No newline at end of file
|
||||||
|
|
||||||
|
From a95337d882c7cc69d604b050159ad50b679f18be Mon Sep 17 00:00:00 2001
|
||||||
|
From: MSSedusch <sedusch@microsoft.com>
|
||||||
|
Date: Thu, 2 Jun 2022 14:10:33 +0200
|
||||||
|
Subject: [PATCH 2/2] Remove developer documentation
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/azure-events-az.in | 11 -----------
|
||||||
|
1 file changed, 11 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||||
|
index 616fc8d9e..59d095306 100644
|
||||||
|
--- a/heartbeat/azure-events-az.in
|
||||||
|
+++ b/heartbeat/azure-events-az.in
|
||||||
|
@@ -723,17 +723,6 @@ description = (
|
||||||
|
If any relevant events are found, it moves all Pacemaker resources
|
||||||
|
away from the affected node to allow for a graceful shutdown.
|
||||||
|
|
||||||
|
- Usage:
|
||||||
|
- [OCF_RESKEY_eventTypes=VAL] [OCF_RESKEY_verbose=VAL] azure-events-az ACTION
|
||||||
|
-
|
||||||
|
- action (required): Supported values: monitor, help, meta-data
|
||||||
|
- eventTypes (optional): List of event types to be considered
|
||||||
|
- relevant by the resource agent (comma-separated).
|
||||||
|
- Supported values: Freeze,Reboot,Redeploy
|
||||||
|
- Default = Reboot,Redeploy
|
||||||
|
-/ verbose (optional): If set to true, displays debug info.
|
||||||
|
- Default = false
|
||||||
|
-
|
||||||
|
Deployment:
|
||||||
|
crm configure primitive rsc_azure-events-az ocf:heartbeat:azure-events-az \
|
||||||
|
op monitor interval=10s
|
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
From ed5bc606a4db5108995df9297698cf9dc14cccb2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 18 Jan 2022 11:32:05 +0100
|
||||||
|
Subject: [PATCH] mysql-common: fix local SSL connection by using
|
||||||
|
--ssl-mode=REQUIRED which is available on 5.7+ (--ssl is not available in
|
||||||
|
8.0)
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql-common.sh | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||||
|
index 459948b10..de8763544 100755
|
||||||
|
--- a/heartbeat/mysql-common.sh
|
||||||
|
+++ b/heartbeat/mysql-common.sh
|
||||||
|
@@ -97,7 +97,7 @@ MYSQL_BINDIR=`dirname ${OCF_RESKEY_binary}`
|
||||||
|
|
||||||
|
MYSQL=$OCF_RESKEY_client_binary
|
||||||
|
if ocf_is_true "$OCF_RESKEY_replication_require_ssl"; then
|
||||||
|
- MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl"
|
||||||
|
+ MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl-mode=REQUIRED"
|
||||||
|
else
|
||||||
|
MYSQL_OPTIONS_LOCAL_SSL_OPTIONS=""
|
||||||
|
fi
|
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
From 09cde6531a87fd6a04568eaae94d5c489f36a8b6 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 6 Sep 2021 15:07:41 +0200
|
||||||
|
Subject: [PATCH] storage-mon: update metadata to suggest usage in combination
|
||||||
|
with HealthSMART agent
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/storage-mon.in | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
|
||||||
|
index 5b289fe55..875095670 100644
|
||||||
|
--- a/heartbeat/storage-mon.in
|
||||||
|
+++ b/heartbeat/storage-mon.in
|
||||||
|
@@ -75,7 +75,7 @@ meta_data() {
|
||||||
|
<longdesc lang="en">
|
||||||
|
System health agent that checks the storage I/O status of the given drives and
|
||||||
|
updates the #health-storage attribute. Usage is highly recommended in combination
|
||||||
|
-with storage-mon monitoring agent. The agent currently support a maximum of 25
|
||||||
|
+with the HealthSMART monitoring agent. The agent currently support a maximum of 25
|
||||||
|
devices per instance.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">storage I/O health status</shortdesc>
|
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,64 @@
|
|||||||
|
From fcd2565602146c0b9317d159cecb8935e304c7ce Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 30 Sep 2021 10:23:17 +0200
|
||||||
|
Subject: [PATCH] gcp-pd-move/gcp-vpc-move-route: dont fail failed resources
|
||||||
|
instantly (caused by OCF_ERR_CONFIGURED)
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/gcp-pd-move.in | 4 ++--
|
||||||
|
heartbeat/gcp-vpc-move-route.in | 6 +++---
|
||||||
|
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||||
|
index e99cc71f88..cbe703c3c5 100644
|
||||||
|
--- a/heartbeat/gcp-pd-move.in
|
||||||
|
+++ b/heartbeat/gcp-pd-move.in
|
||||||
|
@@ -157,7 +157,7 @@ def populate_vars():
|
||||||
|
CONN = googleapiclient.discovery.build('compute', 'v1')
|
||||||
|
except Exception as e:
|
||||||
|
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||||
|
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||||
|
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||||
|
|
||||||
|
for param in PARAMETERS:
|
||||||
|
value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param])
|
||||||
|
@@ -172,7 +172,7 @@ def populate_vars():
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
'Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
|
||||||
|
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||||
|
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||||
|
|
||||||
|
PROJECT = get_metadata('project/project-id')
|
||||||
|
if PARAMETERS['disk_scope'] in ['detect', 'regional']:
|
||||||
|
diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||||
|
index dac6e4ea8c..6b240c04d0 100644
|
||||||
|
--- a/heartbeat/gcp-vpc-move-route.in
|
||||||
|
+++ b/heartbeat/gcp-vpc-move-route.in
|
||||||
|
@@ -243,7 +243,7 @@ def validate(ctx):
|
||||||
|
ctx.conn = googleapiclient.discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||||
|
- sys.exit(OCF_ERR_CONFIGURED)
|
||||||
|
+ sys.exit(OCF_ERR_GENERIC)
|
||||||
|
|
||||||
|
ctx.ip = os.environ.get('OCF_RESKEY_ip')
|
||||||
|
if not ctx.ip:
|
||||||
|
@@ -258,7 +258,7 @@ def validate(ctx):
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
'Instance information not found. Is this a GCE instance ?: %s', str(e))
|
||||||
|
- sys.exit(OCF_ERR_CONFIGURED)
|
||||||
|
+ sys.exit(OCF_ERR_GENERIC)
|
||||||
|
|
||||||
|
ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
|
||||||
|
GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
|
||||||
|
@@ -273,7 +273,7 @@ def validate(ctx):
|
||||||
|
idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
|
||||||
|
if not idxs:
|
||||||
|
logger.error('Network interface not found')
|
||||||
|
- sys.exit(OCF_ERR_CONFIGURED)
|
||||||
|
+ sys.exit(OCF_ERR_GENERIC)
|
||||||
|
ctx.iface_idx = idxs[0]
|
||||||
|
|
||||||
|
|
@ -0,0 +1,43 @@
|
|||||||
|
From 7c54e4ecda33c90a1046c0688774f5b847ab10fe Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 7 Dec 2021 10:37:24 +0100
|
||||||
|
Subject: [PATCH] Route: return OCF_NOT_RUNNING for probe action when interface
|
||||||
|
or route doesnt exist
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Route | 15 +++++----------
|
||||||
|
1 file changed, 5 insertions(+), 10 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Route b/heartbeat/Route
|
||||||
|
index 8b390615a..7db41d0ae 100755
|
||||||
|
--- a/heartbeat/Route
|
||||||
|
+++ b/heartbeat/Route
|
||||||
|
@@ -227,15 +227,6 @@ route_stop() {
|
||||||
|
}
|
||||||
|
|
||||||
|
route_status() {
|
||||||
|
- if [ -n "${OCF_RESKEY_device}" ]; then
|
||||||
|
- # Must check if device exists or is gone.
|
||||||
|
- # If device is gone, route is also unconfigured.
|
||||||
|
- ip link show dev ${OCF_RESKEY_device} >/dev/null 2>&1
|
||||||
|
- if [ $? -ne 0 ]; then
|
||||||
|
- # Assume device does not exist, and short-circuit here.
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
- fi
|
||||||
|
- fi
|
||||||
|
show_output="$(ip $addr_family route show $(create_route_spec) 2>/dev/null)"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
if [ -n "$show_output" ]; then
|
||||||
|
@@ -251,7 +242,11 @@ route_status() {
|
||||||
|
else
|
||||||
|
# "ip route show" returned an error code. Assume something
|
||||||
|
# went wrong.
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
+ if ocf_is_probe; then
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ else
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
@ -0,0 +1,366 @@
|
|||||||
|
From 764dacb6195f8940f13b9c322b1bc8189c5619fc Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Mon, 6 Sep 2021 12:13:42 +0200
|
||||||
|
Subject: [PATCH 1/6] Fix NFSv4 lock failover: set NFS Server Scope
|
||||||
|
|
||||||
|
Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||||
|
RFC8881, 8.4.2.1 State Reclaim:
|
||||||
|
|
||||||
|
| If the server scope is different, the client should not attempt to
|
||||||
|
| reclaim locks. In this situation, no lock reclaim is possible.
|
||||||
|
| Any attempt to re-obtain the locks with non-reclaim operations is
|
||||||
|
| problematic since there is no guarantee that the existing
|
||||||
|
| filehandles will be recognized by the new server, or that if
|
||||||
|
| recognized, they denote the same objects. It is best to treat the
|
||||||
|
| locks as having been revoked by the reconfiguration event.
|
||||||
|
|
||||||
|
That's why for lock reclaim to even be attempted, we have to define and set
|
||||||
|
the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||||
|
cluster. And in linux, that is done by setting the uts nodename for the
|
||||||
|
command that starts the nfsd kernel threads.
|
||||||
|
|
||||||
|
For "init scripts", just set it directly using unshare --uts.
|
||||||
|
For systemd units, add NFS_SERVER_SCOPE to some environment files
|
||||||
|
and inject the "unshare --uts" into the ExecStart command lines
|
||||||
|
using override drop-in files.
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 120 +++++++++++++++++++++++++++++++++++++++++++-
|
||||||
|
1 file changed, 119 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 96b19abe36..0888378645 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -5,6 +5,18 @@
|
||||||
|
# by hxinwei@gmail.com
|
||||||
|
# License: GNU General Public License v2 (GPLv2) and later
|
||||||
|
|
||||||
|
+
|
||||||
|
+# I don't know for certain whether all services actuall _need_ this,
|
||||||
|
+# I know that at least nfs-server needs it.
|
||||||
|
+# The rgmanager resource agent in rgmanager/src/resources/nfsserver.sh.in
|
||||||
|
+# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||||
|
+# Let's start with just the nfs-server, and add others if/when we have clear
|
||||||
|
+# indication they need it.
|
||||||
|
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||||
|
+NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||||
|
+SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||||
|
+SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
||||||
|
+
|
||||||
|
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||||
|
. $OCF_DEBUG_LIBRARY
|
||||||
|
else
|
||||||
|
@@ -99,6 +111,31 @@ Specifies the length of sm-notify retry time (minutes).
|
||||||
|
<content type="integer" default="" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="nfs_server_scope" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+RFC8881, 8.4.2.1 State Reclaim:
|
||||||
|
+
|
||||||
|
+If the server scope is different, the client should not attempt to
|
||||||
|
+reclaim locks. In this situation, no lock reclaim is possible.
|
||||||
|
+Any attempt to re-obtain the locks with non-reclaim operations is
|
||||||
|
+problematic since there is no guarantee that the existing
|
||||||
|
+filehandles will be recognized by the new server, or that if
|
||||||
|
+recognized, they denote the same objects. It is best to treat the
|
||||||
|
+locks as having been revoked by the reconfiguration event.
|
||||||
|
+
|
||||||
|
+For lock reclaim to even be attempted, we have to define and set the same
|
||||||
|
+server scope for NFSD on all cluster nodes in the NFS failover cluster.
|
||||||
|
+
|
||||||
|
+This agent won't "guess" a suitable server scope name for you, you need to
|
||||||
|
+explicitly specify this. But without it, NFSv4 lock reclaim after failover
|
||||||
|
+won't work properly. Suggested value: the failover "service IP".
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">
|
||||||
|
+RFC8881 NFS server scope for (lock) state reclaim after failover.
|
||||||
|
+</shortdesc>
|
||||||
|
+<content type="string"/>
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="nfs_ip" unique="0" required="0">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Comma separated list of floating IP addresses used to access the nfs service
|
||||||
|
@@ -269,7 +306,11 @@ nfs_exec()
|
||||||
|
set_exec_mode
|
||||||
|
|
||||||
|
case $EXEC_MODE in
|
||||||
|
- 1) ${OCF_RESKEY_nfs_init_script} $cmd;;
|
||||||
|
+ 1) if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||||
|
+ ${OCF_RESKEY_nfs_init_script} $cmd
|
||||||
|
+ else
|
||||||
|
+ unshare -u /bin/sh -c "hostname ${OCF_RESKEY_nfs_server_scope}; exec ${OCF_RESKEY_nfs_init_script} $cmd"
|
||||||
|
+ fi ;;
|
||||||
|
2) if ! echo $svc | grep -q "\."; then
|
||||||
|
svc="${svc}.service"
|
||||||
|
fi
|
||||||
|
@@ -623,6 +664,74 @@ notify_locks()
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
+# Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||||
|
+# RFC8881, 8.4.2.1 State Reclaim:
|
||||||
|
+#
|
||||||
|
+# | If the server scope is different, the client should not attempt to
|
||||||
|
+# | reclaim locks. In this situation, no lock reclaim is possible.
|
||||||
|
+# | Any attempt to re-obtain the locks with non-reclaim operations is
|
||||||
|
+# | problematic since there is no guarantee that the existing
|
||||||
|
+# | filehandles will be recognized by the new server, or that if
|
||||||
|
+# | recognized, they denote the same objects. It is best to treat the
|
||||||
|
+# | locks as having been revoked by the reconfiguration event.
|
||||||
|
+#
|
||||||
|
+# That's why for lock reclaim to even be attempted, we have to define and set
|
||||||
|
+# the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||||
|
+# cluster. And in linux, that is done by setting the uts nodename for the
|
||||||
|
+# command that starts the nfsd kernel threads.
|
||||||
|
+#
|
||||||
|
+inject_unshare_uts_name_into_systemd_units ()
|
||||||
|
+{
|
||||||
|
+ local END_TAG="# END OF DROP-IN FOR NFS SERVER SCOPE"
|
||||||
|
+ local services
|
||||||
|
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||||
|
+
|
||||||
|
+ local svc dir dropin edited_exec_start do_reload=false
|
||||||
|
+ for svc in $services ; do
|
||||||
|
+ dir=/run/systemd/system/$svc.d
|
||||||
|
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||||
|
+ grep -sqF "$END_TAG" "$dropin" && continue
|
||||||
|
+
|
||||||
|
+ test -d "$dir" || mkdir -p "$dir"
|
||||||
|
+ test -e "$dropin" && rm -f "$dropin"
|
||||||
|
+
|
||||||
|
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||||
|
+ cat > "$dropin" <<___
|
||||||
|
+[Service]
|
||||||
|
+EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||||
|
+# reset list of exec start, then re-populate with unshared uts namespace
|
||||||
|
+ExecStart=
|
||||||
|
+$edited_exec_start
|
||||||
|
+$END_TAG
|
||||||
|
+___
|
||||||
|
+ do_reload=true
|
||||||
|
+ ocf_log debug "injected unshare --uts into $dropin"
|
||||||
|
+ done
|
||||||
|
+
|
||||||
|
+ mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||||
|
+ echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||||
|
+
|
||||||
|
+ $do_reload && systemctl daemon-reload
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+remove_unshare_uts_dropins ()
|
||||||
|
+{
|
||||||
|
+ local services
|
||||||
|
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||||
|
+
|
||||||
|
+ local svc dir dropin do_reload=false
|
||||||
|
+ for svc in $services ; do
|
||||||
|
+ dir=/run/systemd/system/$svc.d
|
||||||
|
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||||
|
+ test -e "$dropin" || continue
|
||||||
|
+ rm -f "$dropin"
|
||||||
|
+ do_reload=true
|
||||||
|
+ ocf_log debug "removed unshare --uts from $svc"
|
||||||
|
+ done
|
||||||
|
+ rm -f "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE}"
|
||||||
|
+ $do_reload && systemctl daemon-reload
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
nfsserver_start ()
|
||||||
|
{
|
||||||
|
local rc;
|
||||||
|
@@ -636,6 +745,13 @@ nfsserver_start ()
|
||||||
|
is_redhat_based && set_env_args
|
||||||
|
bind_tree
|
||||||
|
prepare_directory
|
||||||
|
+ case $EXEC_MODE in [23])
|
||||||
|
+ if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||||
|
+ remove_unshare_uts_dropins
|
||||||
|
+ else
|
||||||
|
+ inject_unshare_uts_name_into_systemd_units
|
||||||
|
+ fi ;;
|
||||||
|
+ esac
|
||||||
|
|
||||||
|
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||||
|
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||||
|
@@ -854,6 +970,8 @@ nfsserver_stop ()
|
||||||
|
ocf_log info "NFS server stopped"
|
||||||
|
fi
|
||||||
|
|
||||||
|
+ case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||||
|
+
|
||||||
|
return $rc
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
From 515697b53c1614d05d39491c9af83e8d8b844b17 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Fri, 8 Oct 2021 12:01:41 +0200
|
||||||
|
Subject: [PATCH 2/6] Fix NFSv4 lock failover: set NFS Server Scope, regardless
|
||||||
|
of EXEC_MODE
|
||||||
|
|
||||||
|
Debian (and other systems) may provide "init scripts",
|
||||||
|
which will only redirect back to systemd.
|
||||||
|
|
||||||
|
If we just unshare --uts the init script invocation,
|
||||||
|
the uts namespace is useless in that case.
|
||||||
|
|
||||||
|
If systemd is running, mangle the nfs-server.service unit,
|
||||||
|
independent of the "EXEC_MODE".
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 18 ++++++++++++++----
|
||||||
|
1 file changed, 14 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 0888378645..054aabbaf6 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -745,13 +745,20 @@ nfsserver_start ()
|
||||||
|
is_redhat_based && set_env_args
|
||||||
|
bind_tree
|
||||||
|
prepare_directory
|
||||||
|
- case $EXEC_MODE in [23])
|
||||||
|
+
|
||||||
|
+ # Debian (and other systems) may provide "init scripts",
|
||||||
|
+ # which will only redirect back to systemd.
|
||||||
|
+ # If we just unshare --uts the init script invocation,
|
||||||
|
+ # the uts namespace is useless in that case.
|
||||||
|
+ # If systemd is running, mangle the nfs-server.service unit,
|
||||||
|
+ # independent of the "EXEC_MODE" we detected.
|
||||||
|
+ if $systemd_is_running ; then
|
||||||
|
if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||||
|
remove_unshare_uts_dropins
|
||||||
|
else
|
||||||
|
inject_unshare_uts_name_into_systemd_units
|
||||||
|
- fi ;;
|
||||||
|
- esac
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
|
||||||
|
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||||
|
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||||
|
@@ -970,7 +977,9 @@ nfsserver_stop ()
|
||||||
|
ocf_log info "NFS server stopped"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||||
|
+ if $systemd_is_running; then
|
||||||
|
+ remove_unshare_uts_dropins
|
||||||
|
+ fi
|
||||||
|
|
||||||
|
return $rc
|
||||||
|
}
|
||||||
|
@@ -1008,6 +1017,7 @@ nfsserver_validate ()
|
||||||
|
}
|
||||||
|
|
||||||
|
nfsserver_validate
|
||||||
|
+systemd_is_running && systemd_is_running=true || systemd_is_running=false
|
||||||
|
|
||||||
|
case $__OCF_ACTION in
|
||||||
|
start) nfsserver_start
|
||||||
|
|
||||||
|
From e83c20d88f404f9f9d829c654883d60eb6cc9ff3 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Fri, 8 Oct 2021 17:06:18 +0200
|
||||||
|
Subject: [PATCH 3/6] Fix NFSv4 lock failover: add missing "|cut -f1" in
|
||||||
|
remove_unshare_uts_dropins
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 054aabbaf6..d3db89a537 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -717,7 +717,7 @@ ___
|
||||||
|
remove_unshare_uts_dropins ()
|
||||||
|
{
|
||||||
|
local services
|
||||||
|
- services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||||
|
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||||
|
|
||||||
|
local svc dir dropin do_reload=false
|
||||||
|
for svc in $services ; do
|
||||||
|
|
||||||
|
From b5b0e4a0b60d285af576b2d8ecfbe95e5a177a87 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Fri, 8 Oct 2021 17:07:13 +0200
|
||||||
|
Subject: [PATCH 4/6] Fix NFSv4 lock failover: get rid of "world-inaccessible"
|
||||||
|
warning
|
||||||
|
|
||||||
|
by temporarily changing the umask before generating the dropins
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 3 +++
|
||||||
|
1 file changed, 3 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index d3db89a537..447e0302b2 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -687,6 +687,8 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||||
|
services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||||
|
|
||||||
|
local svc dir dropin edited_exec_start do_reload=false
|
||||||
|
+ local old_umask=$(umask)
|
||||||
|
+ umask 0022
|
||||||
|
for svc in $services ; do
|
||||||
|
dir=/run/systemd/system/$svc.d
|
||||||
|
dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||||
|
@@ -710,6 +712,7 @@ ___
|
||||||
|
|
||||||
|
mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||||
|
echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||||
|
+ umask $old_umask
|
||||||
|
|
||||||
|
$do_reload && systemctl daemon-reload
|
||||||
|
}
|
||||||
|
|
||||||
|
From 3c6c91ce5a00eeef9cd766389d73a0b42580a1e6 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Fri, 8 Oct 2021 17:08:09 +0200
|
||||||
|
Subject: [PATCH 5/6] Fix NFSv4 lock failover: deal with "special executable
|
||||||
|
prefix" chars in ExecStart
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 447e0302b2..5326bd2c6e 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -697,7 +697,7 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||||
|
test -d "$dir" || mkdir -p "$dir"
|
||||||
|
test -e "$dropin" && rm -f "$dropin"
|
||||||
|
|
||||||
|
- edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||||
|
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\([-+:!@]*\\)\\(.*\\)#ExecStart=\\1/usr/bin/unshare --uts /bin/sh -c 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\2#p")
|
||||||
|
cat > "$dropin" <<___
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||||
|
|
||||||
|
From 512fbaf61e6d24a1236ef50e323ea17a62485c36 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||||
|
Date: Fri, 8 Oct 2021 17:08:59 +0200
|
||||||
|
Subject: [PATCH 6/6] Fix NFSv4 lock failover: add rpc-statd-notify to the
|
||||||
|
comment list of potentially interesting services
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 5326bd2c6e..240dd1a76c 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -12,7 +12,7 @@
|
||||||
|
# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||||
|
# Let's start with just the nfs-server, and add others if/when we have clear
|
||||||
|
# indication they need it.
|
||||||
|
-#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||||
|
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpc-statd-notify.service rpcbind.service"
|
||||||
|
NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||||
|
SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||||
|
SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
29
SOURCES/bz2015789-gcp-ilb-1-fix-log_enable.patch
Normal file
29
SOURCES/bz2015789-gcp-ilb-1-fix-log_enable.patch
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
From 9a7b47f1838e9d6e3c807e9db5312097adb5c499 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Fri, 5 Nov 2021 10:30:49 +0100
|
||||||
|
Subject: [PATCH] gcp-ilb/Squid: fix issues detected by CI
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Squid.in | 2 +-
|
||||||
|
heartbeat/gcp-ilb | 4 ++--
|
||||||
|
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
|
||||||
|
index 28484b241..48dc3ac4e 100755
|
||||||
|
--- a/heartbeat/gcp-ilb
|
||||||
|
+++ b/heartbeat/gcp-ilb
|
||||||
|
@@ -53,12 +53,12 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
|
||||||
|
|
||||||
|
|
||||||
|
#Validate command for logging
|
||||||
|
-if $OCF_RESKEY_log_enable = "true"; then
|
||||||
|
+if [ $OCF_RESKEY_log_enable = "true" ]; then
|
||||||
|
if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
|
||||||
|
logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
|
||||||
|
ocf_log debug "Logging command is: \'$logging_cmd\' "
|
||||||
|
else
|
||||||
|
- $OCF_RESKEY_log_enable = "false"
|
||||||
|
+ OCF_RESKEY_log_enable="false"
|
||||||
|
ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
|
||||||
|
|
||||||
|
fi;
|
@ -0,0 +1,51 @@
|
|||||||
|
From 14576f7ca02fb0abff188238ac019e88ab06e878 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 9 Nov 2021 11:49:36 +0100
|
||||||
|
Subject: [PATCH] gcp-ilb: only check if log_cmd binary is available if
|
||||||
|
log_enable is true
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/gcp-ilb | 9 +++++----
|
||||||
|
1 file changed, 5 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
|
||||||
|
index 48dc3ac4e..f84f373b7 100755
|
||||||
|
--- a/heartbeat/gcp-ilb
|
||||||
|
+++ b/heartbeat/gcp-ilb
|
||||||
|
@@ -37,7 +37,7 @@ if type "socat" > /dev/null 2>&1; then
|
||||||
|
OCF_RESKEY_cat_default="socat"
|
||||||
|
else
|
||||||
|
OCF_RESKEY_cat_default="nc"
|
||||||
|
-fi;
|
||||||
|
+fi
|
||||||
|
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_cat=${OCF_RESKEY_cat_default}}
|
||||||
|
@@ -53,7 +53,7 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
|
||||||
|
|
||||||
|
|
||||||
|
#Validate command for logging
|
||||||
|
-if [ $OCF_RESKEY_log_enable = "true" ]; then
|
||||||
|
+if ocf_is_true "$OCF_RESKEY_log_enable"; then
|
||||||
|
if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
|
||||||
|
logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
|
||||||
|
ocf_log debug "Logging command is: \'$logging_cmd\' "
|
||||||
|
@@ -61,7 +61,7 @@ if [ $OCF_RESKEY_log_enable = "true" ]; then
|
||||||
|
OCF_RESKEY_log_enable="false"
|
||||||
|
ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
|
||||||
|
|
||||||
|
- fi;
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@@ -285,7 +285,8 @@ ilb_stop() {
|
||||||
|
|
||||||
|
ilb_validate() {
|
||||||
|
check_binary "$OCF_RESKEY_cat"
|
||||||
|
- check_binary "$OCF_RESKEY_log_cmd"
|
||||||
|
+
|
||||||
|
+ ocf_is_true "$OCF_RESKEY_log_enable" && check_binary "$OCF_RESKEY_log_cmd"
|
||||||
|
|
||||||
|
if ! ocf_is_decimal "$OCF_RESKEY_port"; then
|
||||||
|
ocf_exit_reason "$OCF_RESKEY_port is not a valid port"
|
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
--- a/heartbeat/gcp-ilb 2021-11-09 14:13:20.311243373 +0100
|
||||||
|
+++ b/heartbeat/gcp-ilb 2021-11-09 14:13:50.269329165 +0100
|
||||||
|
@@ -28,7 +28,7 @@
|
||||||
|
OCF_RESKEY_cat_default="socat"
|
||||||
|
OCF_RESKEY_port_default="60000"
|
||||||
|
OCF_RESKEY_log_enable_default="false"
|
||||||
|
-OCF_RESKEY_log_cmd_default="gcloud"
|
||||||
|
+OCF_RESKEY_log_cmd_default="gcloud-ra"
|
||||||
|
OCF_RESKEY_log_params_default="logging write GCPILB"
|
||||||
|
OCF_RESKEY_log_end_params_default=""
|
||||||
|
|
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 26 Aug 2021 12:27:50 +0200
|
||||||
|
Subject: [PATCH] nfsnotify: fix default value for "notify_args"
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/nfsnotify.in | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in
|
||||||
|
index 851f6ad6b4..fe6d2793ba 100644
|
||||||
|
--- a/heartbeat/nfsnotify.in
|
||||||
|
+++ b/heartbeat/nfsnotify.in
|
||||||
|
@@ -33,7 +33,7 @@
|
||||||
|
# Parameter defaults
|
||||||
|
|
||||||
|
OCF_RESKEY_source_host_default=""
|
||||||
|
-OCF_RESKEY_notify_args_default="false"
|
||||||
|
+OCF_RESKEY_notify_args_default=""
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}}
|
||||||
|
: ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}}
|
32
SOURCES/bz2029706-1-db2-crm_attribute-use-forever.patch
Normal file
32
SOURCES/bz2029706-1-db2-crm_attribute-use-forever.patch
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
From 925180da2f41feddc5aac3c249563eb179b34029 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 22 Nov 2021 16:44:48 +0100
|
||||||
|
Subject: [PATCH] db2: use -l forever instead of -t nodes -l reboot, as they
|
||||||
|
conflict with eachother
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/db2 | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||||
|
index 03146a957..fa2a45a5d 100755
|
||||||
|
--- a/heartbeat/db2
|
||||||
|
+++ b/heartbeat/db2
|
||||||
|
@@ -274,7 +274,7 @@ db2_fal_attrib() {
|
||||||
|
while read id node member
|
||||||
|
do
|
||||||
|
[ "$member" = member -a "$node" != "$me" ] || continue
|
||||||
|
- crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3"
|
||||||
|
+ crm_attribute -l forever --node=$node -n $attr -v "$3"
|
||||||
|
rc=$?
|
||||||
|
ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node"
|
||||||
|
[ $rc != 0 ] && break
|
||||||
|
@@ -282,7 +282,7 @@ db2_fal_attrib() {
|
||||||
|
;;
|
||||||
|
|
||||||
|
get)
|
||||||
|
- crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1
|
||||||
|
+ crm_attribute -l forever -n $attr -G --quiet 2>&1
|
||||||
|
rc=$?
|
||||||
|
if [ $rc != 0 ]
|
||||||
|
then
|
32
SOURCES/bz2029706-2-db2-fixes.patch
Normal file
32
SOURCES/bz2029706-2-db2-fixes.patch
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
From 75eaf06eea8957aa3941823955d1c8fa7933ab1d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 23 Feb 2022 16:32:21 +0100
|
||||||
|
Subject: [PATCH] db2: only warn when notify isnt set, and use
|
||||||
|
ocf_local_nodename() to get node name
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/db2 | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||||
|
index fa2a45a5d..ea24d33fc 100755
|
||||||
|
--- a/heartbeat/db2
|
||||||
|
+++ b/heartbeat/db2
|
||||||
|
@@ -267,7 +267,7 @@ db2_fal_attrib() {
|
||||||
|
|
||||||
|
case "$2" in
|
||||||
|
set)
|
||||||
|
- me=$(uname -n)
|
||||||
|
+ me=$(ocf_local_nodename)
|
||||||
|
|
||||||
|
# loop over all member nodes and set attribute
|
||||||
|
crm_node -l |
|
||||||
|
@@ -284,7 +284,7 @@ db2_fal_attrib() {
|
||||||
|
get)
|
||||||
|
crm_attribute -l forever -n $attr -G --quiet 2>&1
|
||||||
|
rc=$?
|
||||||
|
- if [ $rc != 0 ]
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_CRM_meta_notify" && [ $rc != 0 ]
|
||||||
|
then
|
||||||
|
ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?"
|
||||||
|
fi
|
70
SOURCES/bz2039692-mysql-1-replication-fixes.patch
Normal file
70
SOURCES/bz2039692-mysql-1-replication-fixes.patch
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
From 706b48fd93a75a582c538013aea1418b6ed69dd0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 9 Mar 2023 15:57:59 +0100
|
||||||
|
Subject: [PATCH] mysql: promotable fixes to avoid nodes getting bounced around
|
||||||
|
by setting -v 1/-v 2, and added OCF_CHECK_LEVEL=10 for promotable resources
|
||||||
|
to be able to distinguish between promoted and not
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql | 19 +++++++++++++------
|
||||||
|
1 file changed, 13 insertions(+), 6 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql b/heartbeat/mysql
|
||||||
|
index 9ab49ab20e..29ed427319 100755
|
||||||
|
--- a/heartbeat/mysql
|
||||||
|
+++ b/heartbeat/mysql
|
||||||
|
@@ -757,6 +757,10 @@ mysql_monitor() {
|
||||||
|
status_loglevel="info"
|
||||||
|
fi
|
||||||
|
|
||||||
|
+ if ocf_is_ms; then
|
||||||
|
+ OCF_CHECK_LEVEL=10
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
mysql_common_status $status_loglevel
|
||||||
|
rc=$?
|
||||||
|
|
||||||
|
@@ -777,7 +781,13 @@ mysql_monitor() {
|
||||||
|
return $rc
|
||||||
|
fi
|
||||||
|
|
||||||
|
- if [ $OCF_CHECK_LEVEL -gt 0 -a -n "$OCF_RESKEY_test_table" ]; then
|
||||||
|
+ if [ $OCF_CHECK_LEVEL -eq 10 ]; then
|
||||||
|
+ if [ -z "$OCF_RESKEY_test_table" ]; then
|
||||||
|
+ ocf_exit_reason "test_table not set"
|
||||||
|
+ return $OCF_ERR_CONFIGURED
|
||||||
|
+
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
# Check if this instance is configured as a slave, and if so
|
||||||
|
# check slave status
|
||||||
|
if is_slave; then
|
||||||
|
@@ -795,18 +805,16 @@ mysql_monitor() {
|
||||||
|
ocf_exit_reason "Failed to select from $test_table";
|
||||||
|
return $OCF_ERR_GENERIC;
|
||||||
|
fi
|
||||||
|
- else
|
||||||
|
- # In case no exnteded tests are enabled and we are in master/slave mode _always_ set the master score to 1 if we reached this point
|
||||||
|
- ocf_is_ms && $CRM_MASTER -v 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ocf_is_ms && ! get_read_only; then
|
||||||
|
ocf_log debug "MySQL monitor succeeded (master)";
|
||||||
|
# Always set master score for the master
|
||||||
|
- $CRM_MASTER -v 2
|
||||||
|
+ $CRM_MASTER -v $((${OCF_RESKEY_max_slave_lag}+1))
|
||||||
|
return $OCF_RUNNING_MASTER
|
||||||
|
else
|
||||||
|
ocf_log debug "MySQL monitor succeeded";
|
||||||
|
+ ocf_is_ms && $CRM_MASTER -v 1
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
@@ -873,7 +881,6 @@ mysql_start() {
|
||||||
|
# preference set by the administrator. We choose a low
|
||||||
|
# greater-than-zero preference.
|
||||||
|
$CRM_MASTER -v 1
|
||||||
|
-
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initial monitor action
|
32
SOURCES/bz2039692-mysql-2-fix-demoted-score-bounce.patch
Normal file
32
SOURCES/bz2039692-mysql-2-fix-demoted-score-bounce.patch
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
From 34483f8029ea9ab25220cfee71d53adaf5aacaa0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 14 Jun 2023 14:37:01 +0200
|
||||||
|
Subject: [PATCH] mysql: fix promotion_score bouncing between ~3600 and 1 on
|
||||||
|
demoted nodes
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql | 11 -----------
|
||||||
|
1 file changed, 11 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql b/heartbeat/mysql
|
||||||
|
index 29ed42731..1df2fc0f2 100755
|
||||||
|
--- a/heartbeat/mysql
|
||||||
|
+++ b/heartbeat/mysql
|
||||||
|
@@ -517,17 +517,6 @@ check_slave() {
|
||||||
|
|
||||||
|
exit $OCF_ERR_INSTALLED
|
||||||
|
fi
|
||||||
|
- elif ocf_is_ms; then
|
||||||
|
- # Even if we're not set to evict lagging slaves, we can
|
||||||
|
- # still use the seconds behind master value to set our
|
||||||
|
- # master preference.
|
||||||
|
- local master_pref
|
||||||
|
- master_pref=$((${OCF_RESKEY_max_slave_lag}-${secs_behind}))
|
||||||
|
- if [ $master_pref -lt 0 ]; then
|
||||||
|
- # Sanitize a below-zero preference to just zero
|
||||||
|
- master_pref=0
|
||||||
|
- fi
|
||||||
|
- $CRM_MASTER -v $master_pref
|
||||||
|
fi
|
||||||
|
|
||||||
|
# is the slave ok to have a VIP on it
|
@ -0,0 +1,84 @@
|
|||||||
|
From 4d87bcfe5df8a1e40ee945e095ac9e7cca147ec4 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 29 Jun 2022 10:26:25 +0200
|
||||||
|
Subject: [PATCH] IPaddr2/IPsrcaddr: add/modify table parameter to be able to
|
||||||
|
find interface while using policy based routing
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPaddr2 | 12 ++++++++++++
|
||||||
|
heartbeat/IPsrcaddr | 5 ++++-
|
||||||
|
heartbeat/findif.sh | 2 +-
|
||||||
|
3 files changed, 17 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||||
|
index 97a7431a2..e8384c586 100755
|
||||||
|
--- a/heartbeat/IPaddr2
|
||||||
|
+++ b/heartbeat/IPaddr2
|
||||||
|
@@ -73,6 +73,7 @@ OCF_RESKEY_ip_default=""
|
||||||
|
OCF_RESKEY_cidr_netmask_default=""
|
||||||
|
OCF_RESKEY_broadcast_default=""
|
||||||
|
OCF_RESKEY_iflabel_default=""
|
||||||
|
+OCF_RESKEY_table_default=""
|
||||||
|
OCF_RESKEY_cidr_netmask_default=""
|
||||||
|
OCF_RESKEY_lvs_support_default=false
|
||||||
|
OCF_RESKEY_lvs_ipv6_addrlabel_default=false
|
||||||
|
@@ -97,6 +98,7 @@ OCF_RESKEY_network_namespace_default=""
|
||||||
|
: ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}}
|
||||||
|
: ${OCF_RESKEY_broadcast=${OCF_RESKEY_broadcast_default}}
|
||||||
|
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||||
|
+: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}}
|
||||||
|
: ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}}
|
||||||
|
: ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}}
|
||||||
|
: ${OCF_RESKEY_lvs_ipv6_addrlabel_value=${OCF_RESKEY_lvs_ipv6_addrlabel_value_default}}
|
||||||
|
@@ -239,6 +241,16 @@ If a label is specified in nic name, this parameter has no effect.
|
||||||
|
<content type="string" default="${OCF_RESKEY_iflabel_default}"/>
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="table">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Table to use to lookup which interface to use for the IP.
|
||||||
|
+
|
||||||
|
+This can be used for policy based routing. See man ip-rule(8).
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Table</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_table_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="lvs_support">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Enable support for LVS Direct Routing configurations. In case a IP
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 1bd41a930..cf106cc34 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -155,13 +155,16 @@ Metric. Only needed if incorrect metric value is used.
|
||||||
|
|
||||||
|
<parameter name="table">
|
||||||
|
<longdesc lang="en">
|
||||||
|
-Table to modify. E.g. "local".
|
||||||
|
+Table to modify and use for interface lookup. E.g. "local".
|
||||||
|
|
||||||
|
The table has to have a route matching the "destination" parameter.
|
||||||
|
+
|
||||||
|
+This can be used for policy based routing. See man ip-rule(8).
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Table</shortdesc>
|
||||||
|
<content type="string" default="${OCF_RESKEY_table_default}" />
|
||||||
|
</parameter>
|
||||||
|
+
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||||
|
index 66bc6d56a..1a40cc9a4 100644
|
||||||
|
--- a/heartbeat/findif.sh
|
||||||
|
+++ b/heartbeat/findif.sh
|
||||||
|
@@ -32,7 +32,7 @@ prefixcheck() {
|
||||||
|
getnetworkinfo()
|
||||||
|
{
|
||||||
|
local line netinfo
|
||||||
|
- ip -o -f inet route list match $OCF_RESKEY_ip table local scope host | (while read line;
|
||||||
|
+ ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table=local}" scope host | (while read line;
|
||||||
|
do
|
||||||
|
netinfo=`echo $line | awk '{print $2}'`
|
||||||
|
case $netinfo in
|
@ -0,0 +1,35 @@
|
|||||||
|
From da9e8e691f39494e14f8f11173b6ab6433384396 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 20 Jun 2023 14:19:23 +0200
|
||||||
|
Subject: [PATCH] findif.sh: fix table parameter so it uses main table by
|
||||||
|
default
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/findif.sh | 6 +++---
|
||||||
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||||
|
index 1a40cc9a4b..6c04c98c19 100644
|
||||||
|
--- a/heartbeat/findif.sh
|
||||||
|
+++ b/heartbeat/findif.sh
|
||||||
|
@@ -32,7 +32,7 @@ prefixcheck() {
|
||||||
|
getnetworkinfo()
|
||||||
|
{
|
||||||
|
local line netinfo
|
||||||
|
- ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table=local}" scope host | (while read line;
|
||||||
|
+ ip -o -f inet route list match $OCF_RESKEY_ip table "${OCF_RESKEY_table:=main}" scope host | (while read line;
|
||||||
|
do
|
||||||
|
netinfo=`echo $line | awk '{print $2}'`
|
||||||
|
case $netinfo in
|
||||||
|
@@ -215,9 +215,9 @@ findif()
|
||||||
|
fi
|
||||||
|
if [ -n "$nic" ] ; then
|
||||||
|
# NIC supports more than two.
|
||||||
|
- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
+ set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
else
|
||||||
|
- set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
+ set -- $(ip -o -f $family route list match $match $scope table "${OCF_RESKEY_table:=main}" | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||||
|
fi
|
||||||
|
if [ $# = 0 ] ; then
|
||||||
|
case $OCF_RESKEY_ip in
|
175
SOURCES/bz2049319-Filesystem-add-support-for-Amazon-EFS.patch
Normal file
175
SOURCES/bz2049319-Filesystem-add-support-for-Amazon-EFS.patch
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
From cab190c737fdf58268aa5c009f6089b754862b22 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Tue, 1 Feb 2022 16:32:50 -0800
|
||||||
|
Subject: [PATCH 1/3] Filesystem: Fix OpenBSD check in fstype_supported()
|
||||||
|
|
||||||
|
fstype_supported() is supposed to skip the /proc/filesystems check if
|
||||||
|
the OS is OpenBSD. Instead, it skips the check if the OS is **not**
|
||||||
|
OpenBSD. That means the function has been a no-op for all other distros.
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 010c1dcfc..8b4792152 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -440,7 +440,7 @@ fstype_supported()
|
||||||
|
local support="$FSTYPE"
|
||||||
|
local rc
|
||||||
|
|
||||||
|
- if [ "X${HOSTOS}" != "XOpenBSD" ];then
|
||||||
|
+ if [ "X${HOSTOS}" = "XOpenBSD" ];then
|
||||||
|
# skip checking /proc/filesystems for obsd
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
|
||||||
|
From 5d38b87daa9cfffa89a193df131d6ebd87cd05aa Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Tue, 1 Feb 2022 18:26:32 -0800
|
||||||
|
Subject: [PATCH 2/3] Filesystem: Improve fstype_supported logs for fuse
|
||||||
|
|
||||||
|
Make it more clear when we have to use a different name to check for
|
||||||
|
support of a particular filesystem. Currently only used for fuse-type
|
||||||
|
filesystems.
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 13 ++++++++++---
|
||||||
|
1 file changed, 10 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 8b4792152..4d84846c1 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -455,6 +455,10 @@ fstype_supported()
|
||||||
|
fuse.*|glusterfs|rozofs) support="fuse";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
+ if [ "$support" != "$FSTYPE" ]; then
|
||||||
|
+ ocf_log info "Checking support for $FSTYPE as \"$support\""
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
grep -w "$support"'$' /proc/filesystems >/dev/null
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
# found the fs type
|
||||||
|
@@ -465,7 +469,7 @@ fstype_supported()
|
||||||
|
# check the if the filesystem support exists again.
|
||||||
|
$MODPROBE $support >/dev/null
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
- ocf_exit_reason "Couldn't find filesystem $FSTYPE in /proc/filesystems and failed to load kernel module"
|
||||||
|
+ ocf_exit_reason "Couldn't find filesystem $support in /proc/filesystems and failed to load kernel module"
|
||||||
|
return $OCF_ERR_INSTALLED
|
||||||
|
fi
|
||||||
|
|
||||||
|
@@ -478,11 +482,11 @@ fstype_supported()
|
||||||
|
# yes. found the filesystem after doing the modprobe
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
- ocf_log debug "Unable to find support for $FSTYPE in /proc/filesystems after modprobe, trying again"
|
||||||
|
+ ocf_log debug "Unable to find support for $support in /proc/filesystems after modprobe, trying again"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
- ocf_exit_reason "Couldn't find filesystem $FSTYPE in /proc/filesystems"
|
||||||
|
+ ocf_exit_reason "Couldn't find filesystem $support in /proc/filesystems"
|
||||||
|
return $OCF_ERR_INSTALLED
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -837,6 +841,9 @@ Filesystem_monitor()
|
||||||
|
# VALIDATE_ALL: Are the instance parameters valid?
|
||||||
|
# FIXME!! The only part that's useful is the return code.
|
||||||
|
# This code always returns $OCF_SUCCESS (!)
|
||||||
|
+# FIXME!! Needs some tuning to match fstype_supported() (e.g., for
|
||||||
|
+# fuse). Can we just call fstype_supported() with a flag like
|
||||||
|
+# "no_modprobe" instead?
|
||||||
|
#
|
||||||
|
Filesystem_validate_all()
|
||||||
|
{
|
||||||
|
|
||||||
|
From e2174244067b02d798e0f12437f0f499c80f91fe Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Tue, 1 Feb 2022 18:55:47 -0800
|
||||||
|
Subject: [PATCH 3/3] Filesystem: Add support for Amazon EFS mount helper
|
||||||
|
|
||||||
|
mount.efs, the mount helper for Amazon Elastic File System (EFS)
|
||||||
|
provided by amazon-efs-utils [1], is a wrapper for mount.nfs4. It offers
|
||||||
|
a number of AWS-specific mount options and some security improvements
|
||||||
|
like encryption of data in transit.
|
||||||
|
|
||||||
|
This commit adds support by treating an fstype=efs like fstype=nfs4 for
|
||||||
|
the most part.
|
||||||
|
|
||||||
|
Resolves: RHBZ#2049319
|
||||||
|
|
||||||
|
[1] https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 14 ++++++++------
|
||||||
|
1 file changed, 8 insertions(+), 6 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 4d84846c1..1a90d6a42 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -341,7 +341,7 @@ determine_blockdevice() {
|
||||||
|
# Get the current real device name, if possible.
|
||||||
|
# (specified devname could be -L or -U...)
|
||||||
|
case "$FSTYPE" in
|
||||||
|
- nfs4|nfs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none|lustre)
|
||||||
|
+ nfs4|nfs|efs|smbfs|cifs|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|none|lustre)
|
||||||
|
: ;;
|
||||||
|
*)
|
||||||
|
match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}"
|
||||||
|
@@ -423,7 +423,7 @@ is_fsck_needed() {
|
||||||
|
no) false;;
|
||||||
|
""|auto)
|
||||||
|
case "$FSTYPE" in
|
||||||
|
- ext4|ext4dev|ext3|reiserfs|reiser4|nss|xfs|jfs|vfat|fat|nfs4|nfs|cifs|smbfs|ocfs2|gfs2|none|lustre|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs)
|
||||||
|
+ ext4|ext4dev|ext3|reiserfs|reiser4|nss|xfs|jfs|vfat|fat|nfs4|nfs|efs|cifs|smbfs|ocfs2|gfs2|none|lustre|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs)
|
||||||
|
false;;
|
||||||
|
*)
|
||||||
|
true;;
|
||||||
|
@@ -450,9 +450,11 @@ fstype_supported()
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
fi
|
||||||
|
|
||||||
|
- # support fuse-filesystems (e.g. GlusterFS)
|
||||||
|
+ # support fuse-filesystems (e.g. GlusterFS) and Amazon Elastic File
|
||||||
|
+ # System (EFS)
|
||||||
|
case "$FSTYPE" in
|
||||||
|
fuse.*|glusterfs|rozofs) support="fuse";;
|
||||||
|
+ efs) support="nfs4";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ "$support" != "$FSTYPE" ]; then
|
||||||
|
@@ -701,7 +703,7 @@ Filesystem_stop()
|
||||||
|
|
||||||
|
# For networked filesystems, there's merit in trying -f:
|
||||||
|
case "$FSTYPE" in
|
||||||
|
- nfs4|nfs|cifs|smbfs) umount_force="-f" ;;
|
||||||
|
+ nfs4|nfs|efs|cifs|smbfs) umount_force="-f" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Umount all sub-filesystems mounted under $MOUNTPOINT/ too.
|
||||||
|
@@ -892,7 +894,7 @@ set_blockdevice_var() {
|
||||||
|
|
||||||
|
# these are definitely not block devices
|
||||||
|
case "$FSTYPE" in
|
||||||
|
- nfs4|nfs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|lustre) return;;
|
||||||
|
+ nfs4|nfs|efs|smbfs|cifs|none|glusterfs|ceph|tmpfs|overlay|overlayfs|rozofs|zfs|cvfs|lustre) return;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if $(is_option "loop"); then
|
||||||
|
@@ -1013,7 +1015,7 @@ is_option "ro" &&
|
||||||
|
CLUSTERSAFE=2
|
||||||
|
|
||||||
|
case "$FSTYPE" in
|
||||||
|
-nfs4|nfs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs|lustre)
|
||||||
|
+nfs4|nfs|efs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs|lustre)
|
||||||
|
CLUSTERSAFE=1 # this is kind of safe too
|
||||||
|
;;
|
||||||
|
# add here CLUSTERSAFE=0 for all filesystems which are not
|
@ -0,0 +1,44 @@
|
|||||||
|
From 26de0ad2f0f975166fe79ef72ab08e2c03519eea Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 28 Mar 2022 13:25:35 +0200
|
||||||
|
Subject: [PATCH] Filesystem: fix logic for UUID/label devices with space
|
||||||
|
between parameter and UUID/label
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 14 ++++++++++----
|
||||||
|
1 file changed, 10 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 1a90d6a42..72a1b8623 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -596,11 +596,11 @@ Filesystem_start()
|
||||||
|
flushbufs "$DEVICE"
|
||||||
|
# Mount the filesystem.
|
||||||
|
case "$FSTYPE" in
|
||||||
|
- none) $MOUNT $options "$DEVICE" "$MOUNTPOINT" &&
|
||||||
|
+ none) $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" &&
|
||||||
|
bind_mount
|
||||||
|
;;
|
||||||
|
- "") $MOUNT $options "$DEVICE" "$MOUNTPOINT" ;;
|
||||||
|
- *) $MOUNT -t "$FSTYPE" $options "$DEVICE" "$MOUNTPOINT" ;;
|
||||||
|
+ "") $MOUNT $options $device_opt "$DEVICE" "$MOUNTPOINT" ;;
|
||||||
|
+ *) $MOUNT -t "$FSTYPE" $options $device_opt "$DEVICE" "$MOUNTPOINT" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
@@ -902,7 +902,13 @@ set_blockdevice_var() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$DEVICE" in
|
||||||
|
- -*) # Oh... An option to mount instead... Typically -U or -L
|
||||||
|
+ --*) # Typically --uuid or --label
|
||||||
|
+ device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//")
|
||||||
|
+ DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//")
|
||||||
|
+ ;;
|
||||||
|
+ -*) # Oh... An option to mount instead... Typically -U or -L
|
||||||
|
+ device_opt=$(echo $DEVICE | cut -c1-2)
|
||||||
|
+ DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//")
|
||||||
|
;;
|
||||||
|
/dev/null) # Special case for BSC
|
||||||
|
blockdevice=yes
|
@ -0,0 +1,38 @@
|
|||||||
|
From d9b46474fc19d9c57e2cfb752d60319017da8410 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 6 Apr 2022 14:14:19 +0200
|
||||||
|
Subject: [PATCH] Filesystem: improve logic for UUID/label and add note that
|
||||||
|
/dev/disk/by-{uuid,label}/ are preferred on Linux
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 8 +++++---
|
||||||
|
1 file changed, 5 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 72a1b8623..44270ad98 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -163,6 +163,8 @@ directory where the status file is to be placed.
|
||||||
|
<parameter name="device" required="1">
|
||||||
|
<longdesc lang="en">
|
||||||
|
The name of block device for the filesystem, or -U, -L options for mount, or NFS mount specification.
|
||||||
|
+
|
||||||
|
+NOTE: On Linux /dev/disk/by-{uuid,label}/ are preferred to -U/-L.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">block device</shortdesc>
|
||||||
|
<content type="string" default="${OCF_RESKEY_device_default}" />
|
||||||
|
@@ -902,11 +904,11 @@ set_blockdevice_var() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$DEVICE" in
|
||||||
|
- --*) # Typically --uuid or --label
|
||||||
|
- device_opt=$(echo $DEVICE | sed -E "s/([[:blank:]]|=).*//")
|
||||||
|
+ --uuid=*|--uuid\ *|--label=*|--label\ *)
|
||||||
|
+ device_opt=$(echo $DEVICE | sed "s/\([[:blank:]]\|=\).*//")
|
||||||
|
DEVICE=$(echo $DEVICE | sed -E "s/$device_opt([[:blank:]]*|=)//")
|
||||||
|
;;
|
||||||
|
- -*) # Oh... An option to mount instead... Typically -U or -L
|
||||||
|
+ -U*|-L*) # short versions of --uuid/--label
|
||||||
|
device_opt=$(echo $DEVICE | cut -c1-2)
|
||||||
|
DEVICE=$(echo $DEVICE | sed "s/$device_opt[[:blank:]]*//")
|
||||||
|
;;
|
41
SOURCES/bz2064342-1-IPsrcaddr-dhcp-warning.patch
Normal file
41
SOURCES/bz2064342-1-IPsrcaddr-dhcp-warning.patch
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
From 6d2ed7615614ede093f097189876d0f08553a43e Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Mon, 14 Feb 2022 22:23:39 -0800
|
||||||
|
Subject: [PATCH] IPsrcaddr: Add warning about DHCP
|
||||||
|
|
||||||
|
If DHCP is enabled for the interface that serves OCF_RESKEY_ipaddress,
|
||||||
|
then NetworkManager (and possibly dhclient in systems without NM;
|
||||||
|
unsure) may later re-add a route that the IPsrcaddr resource replaced.
|
||||||
|
This may cause the resource to fail or cause other unexpected behavior.
|
||||||
|
|
||||||
|
So far this has been observed with a default route, albeit with an edge
|
||||||
|
case of a configuration (OCF_RESKEY_ipaddress on a different subnet)
|
||||||
|
that may not be totally valid. There are likely to be other situations
|
||||||
|
as well where DHCP can cause conflicts with IPsrcaddr's manual updates
|
||||||
|
via iproute. The safest option is to use only static configuration for
|
||||||
|
the involved interface.
|
||||||
|
|
||||||
|
Resolves: RHBZ#1654862
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 6 ++++++
|
||||||
|
1 file changed, 6 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index ec868409f..fd7b6f68d 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -99,6 +99,12 @@ meta_data() {
|
||||||
|
<longdesc lang="en">
|
||||||
|
Resource script for IPsrcaddr. It manages the preferred source address
|
||||||
|
modification.
|
||||||
|
+
|
||||||
|
+Note: DHCP should not be enabled for the interface serving the preferred
|
||||||
|
+source address. Enabling DHCP may result in unexpected behavior, such as
|
||||||
|
+the automatic addition of duplicate or conflicting routes. This may
|
||||||
|
+cause the IPsrcaddr resource to fail, or it may produce undesired
|
||||||
|
+behavior while the resource continues to run.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Manages the preferred source address for outgoing IP packets</shortdesc>
|
||||||
|
|
@ -0,0 +1,49 @@
|
|||||||
|
From 5a65f66ff803ad7ed15af958cc1efdde4d53dcb7 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
Date: Thu, 17 Feb 2022 03:53:21 -0800
|
||||||
|
Subject: [PATCH] IPsrcaddr: Better error message when no matching route found
|
||||||
|
|
||||||
|
If OCF_RESKEY_destination is not explicitly set and `ip route list`
|
||||||
|
can't find a route matching the specifications, the NETWORK variable
|
||||||
|
doesn't get set. This causes a certain failure of the start operation,
|
||||||
|
because there is no PREFIX argument to `ip route replace` (syntax
|
||||||
|
error). It may also cause unexpected behavior for stop operations (but
|
||||||
|
not in all cases). During a monitor, this event can only happen if
|
||||||
|
something has changed outside the cluster's control, and so is cause
|
||||||
|
for warning there.
|
||||||
|
|
||||||
|
Exit OCF_ERR_ARGS for start, log debug for probe, log warning for all
|
||||||
|
other ops.
|
||||||
|
|
||||||
|
Resolves: RHBZ#1654862
|
||||||
|
|
||||||
|
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 14 ++++++++++++++
|
||||||
|
1 file changed, 14 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index fd7b6f68d..f0216722d 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -549,6 +549,20 @@ rc=$?
|
||||||
|
INTERFACE=`echo $findif_out | awk '{print $1}'`
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
|
||||||
|
+
|
||||||
|
+ if [ -z "$NETWORK" ]; then
|
||||||
|
+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
|
||||||
|
+ err_str="$err_str match $ipaddress' failed to find a matching route"
|
||||||
|
+
|
||||||
|
+ if [ "$__OCF_ACTION" = "start" ]; then
|
||||||
|
+ ocf_exit_reason "$err_str"
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
+ elif ! ocf_is_probe; then
|
||||||
|
+ ocf_log warn "$err_str"
|
||||||
|
+ else
|
||||||
|
+ ocf_log debug "$err_str"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
else
|
||||||
|
NETWORK="$OCF_RESKEY_destination"
|
||||||
|
fi
|
56
SOURCES/bz2064342-3-IPsrcaddr-fix-indentation.patch
Normal file
56
SOURCES/bz2064342-3-IPsrcaddr-fix-indentation.patch
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
From 0a197f1cd227e768837dff778a0c56fc1085d434 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Mon, 21 Feb 2022 13:54:04 +0100
|
||||||
|
Subject: [PATCH] IPsrcaddr: fix indentation in better error message code
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 30 +++++++++++++++---------------
|
||||||
|
1 file changed, 15 insertions(+), 15 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index f0216722d..c82adc0e9 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -542,27 +542,27 @@ fi
|
||||||
|
findif_out=`$FINDIF -C`
|
||||||
|
rc=$?
|
||||||
|
[ $rc -ne 0 ] && {
|
||||||
|
- ocf_exit_reason "[$FINDIF -C] failed"
|
||||||
|
- exit $rc
|
||||||
|
+ ocf_exit_reason "[$FINDIF -C] failed"
|
||||||
|
+ exit $rc
|
||||||
|
}
|
||||||
|
|
||||||
|
INTERFACE=`echo $findif_out | awk '{print $1}'`
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
|
||||||
|
|
||||||
|
- if [ -z "$NETWORK" ]; then
|
||||||
|
- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
|
||||||
|
- err_str="$err_str match $ipaddress' failed to find a matching route"
|
||||||
|
-
|
||||||
|
- if [ "$__OCF_ACTION" = "start" ]; then
|
||||||
|
- ocf_exit_reason "$err_str"
|
||||||
|
- exit $OCF_ERR_ARGS
|
||||||
|
- elif ! ocf_is_probe; then
|
||||||
|
- ocf_log warn "$err_str"
|
||||||
|
- else
|
||||||
|
- ocf_log debug "$err_str"
|
||||||
|
- fi
|
||||||
|
- fi
|
||||||
|
+ if [ -z "$NETWORK" ]; then
|
||||||
|
+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
|
||||||
|
+ err_str="$err_str match $ipaddress' failed to find a matching route"
|
||||||
|
+
|
||||||
|
+ if [ "$__OCF_ACTION" = "start" ]; then
|
||||||
|
+ ocf_exit_reason "$err_str"
|
||||||
|
+ exit $OCF_ERR_ARGS
|
||||||
|
+ elif ! ocf_is_probe; then
|
||||||
|
+ ocf_log warn "$err_str"
|
||||||
|
+ else
|
||||||
|
+ ocf_log debug "$err_str"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
else
|
||||||
|
NETWORK="$OCF_RESKEY_destination"
|
||||||
|
fi
|
117
SOURCES/bz2064342-4-IPsrcaddr-fixes.patch
Normal file
117
SOURCES/bz2064342-4-IPsrcaddr-fixes.patch
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
From 50a596bfb977b18902dc62b99145bbd1a087690a Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 1 Mar 2022 11:06:07 +0100
|
||||||
|
Subject: [PATCH] IPsrcaddr: fixes
|
||||||
|
|
||||||
|
- use findif.sh to detect secondary interfaces
|
||||||
|
- get metric and proto to update the correct route/update it correctly
|
||||||
|
- match route using interface to fail when trying to update secondary
|
||||||
|
interfaces without specifying destination (would update default route
|
||||||
|
before)
|
||||||
|
- also use PRIMARY_IP/OPTS during stop-action for default routes (to get
|
||||||
|
back to the exact routes we started with)
|
||||||
|
- dont fail during stop-action if route doesnt exist
|
||||||
|
- use [[:blank:]] for WS to follow POSIX standard (suggested by nrwahl)
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 35 +++++++++++++++++++----------------
|
||||||
|
1 file changed, 19 insertions(+), 16 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index c82adc0e9..7dbf65ff5 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -52,6 +52,7 @@
|
||||||
|
# Initialization:
|
||||||
|
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/findif.sh
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
OCF_RESKEY_ipaddress_default=""
|
||||||
|
@@ -181,19 +182,21 @@ errorexit() {
|
||||||
|
#
|
||||||
|
# where the src clause "src Y.Y.Y.Y" may or may not be present
|
||||||
|
|
||||||
|
-WS="[`echo -en ' \t'`]"
|
||||||
|
+WS="[[:blank:]]"
|
||||||
|
OCTET="[0-9]\{1,3\}"
|
||||||
|
IPADDR="\($OCTET\.\)\{3\}$OCTET"
|
||||||
|
SRCCLAUSE="src$WS$WS*\($IPADDR\)"
|
||||||
|
MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
|
||||||
|
-FINDIF=$HA_BIN/findif
|
||||||
|
+METRICCLAUSE=".*\(metric$WS[^ ]\+\)"
|
||||||
|
+PROTOCLAUSE=".*\(proto$WS[^ ]\+\)"
|
||||||
|
+FINDIF=findif
|
||||||
|
|
||||||
|
# findif needs that to be set
|
||||||
|
export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress
|
||||||
|
|
||||||
|
srca_read() {
|
||||||
|
# Capture matching route - doublequotes prevent word splitting...
|
||||||
|
- ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
|
||||||
|
+ ROUTE="`$CMDSHOW dev $INTERFACE 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
|
||||||
|
|
||||||
|
# ... so we can make sure there is only 1 matching route
|
||||||
|
[ 1 -eq `echo "$ROUTE" | wc -l` ] || \
|
||||||
|
@@ -201,7 +204,7 @@ srca_read() {
|
||||||
|
|
||||||
|
# But there might still be no matching route
|
||||||
|
[ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \
|
||||||
|
- ! ocf_is_probe && errorexit "no matching route exists"
|
||||||
|
+ ! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists"
|
||||||
|
|
||||||
|
# Sed out the source ip address if it exists
|
||||||
|
SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"`
|
||||||
|
@@ -232,8 +235,8 @@ srca_start() {
|
||||||
|
rc=$OCF_SUCCESS
|
||||||
|
ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)"
|
||||||
|
else
|
||||||
|
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \
|
||||||
|
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed"
|
||||||
|
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \
|
||||||
|
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed"
|
||||||
|
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
$CMDCHANGE $ROUTE_WO_SRC src $1 || \
|
||||||
|
@@ -266,14 +269,11 @@ srca_stop() {
|
||||||
|
|
||||||
|
[ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address"
|
||||||
|
|
||||||
|
- OPTS=""
|
||||||
|
- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then
|
||||||
|
- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
|
||||||
|
- OPTS="proto kernel scope host src $PRIMARY_IP"
|
||||||
|
- fi
|
||||||
|
+ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
|
||||||
|
+ OPTS="proto kernel scope link src $PRIMARY_IP"
|
||||||
|
|
||||||
|
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \
|
||||||
|
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed"
|
||||||
|
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \
|
||||||
|
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
|
||||||
|
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
$CMDCHANGE $ROUTE_WO_SRC || \
|
||||||
|
@@ -539,16 +539,19 @@ if [ $rc -ne $OCF_SUCCESS ]; then
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
-findif_out=`$FINDIF -C`
|
||||||
|
+findif_out=`$FINDIF`
|
||||||
|
rc=$?
|
||||||
|
[ $rc -ne 0 ] && {
|
||||||
|
- ocf_exit_reason "[$FINDIF -C] failed"
|
||||||
|
+ ocf_exit_reason "[$FINDIF] failed"
|
||||||
|
exit $rc
|
||||||
|
}
|
||||||
|
|
||||||
|
INTERFACE=`echo $findif_out | awk '{print $1}'`
|
||||||
|
+LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress`
|
||||||
|
+METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
|
||||||
|
+[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"`
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
- NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
|
||||||
|
+ NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'`
|
||||||
|
|
||||||
|
if [ -z "$NETWORK" ]; then
|
||||||
|
err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
|
102
SOURCES/bz2072043-LVM-activate-fix-fence-issue.patch
Normal file
102
SOURCES/bz2072043-LVM-activate-fix-fence-issue.patch
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
From e651576c1b5c1ffbe0fd1b78f209be9a3f9764e7 Mon Sep 17 00:00:00 2001
|
||||||
|
From: XingWei-Liu <liuxingwei@uniontech.com>
|
||||||
|
Date: Thu, 10 Mar 2022 10:38:11 +0800
|
||||||
|
Subject: [PATCH 1/4] change lvm_status return value from ocf_not_running to
|
||||||
|
ocf_err_generic
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/LVM-activate | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||||
|
index aed672ea3..0aef76706 100755
|
||||||
|
--- a/heartbeat/LVM-activate
|
||||||
|
+++ b/heartbeat/LVM-activate
|
||||||
|
@@ -790,7 +790,7 @@ lvm_status() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $dm_count -eq 0 ]; then
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$OCF_CHECK_LEVEL" in
|
||||||
|
|
||||||
|
From 540ae56436a4f9547bb17aa206fe0e8c7a7fea87 Mon Sep 17 00:00:00 2001
|
||||||
|
From: XingWei-Liu <liuxingwei@uniontech.com>
|
||||||
|
Date: Thu, 10 Mar 2022 16:44:25 +0800
|
||||||
|
Subject: [PATCH 2/4] add if ocf_is_probe in monitor func
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/LVM-activate | 6 +++++-
|
||||||
|
1 file changed, 5 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||||
|
index 0aef76706..c86606637 100755
|
||||||
|
--- a/heartbeat/LVM-activate
|
||||||
|
+++ b/heartbeat/LVM-activate
|
||||||
|
@@ -790,7 +790,11 @@ lvm_status() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $dm_count -eq 0 ]; then
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
+ if ocf_is_probe ;then
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ else
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$OCF_CHECK_LEVEL" in
|
||||||
|
|
||||||
|
From ae3f35d4f671f3288034a257c6dd8eff9a83447a Mon Sep 17 00:00:00 2001
|
||||||
|
From: XingWei-Liu <liuxingwei@uniontech.com>
|
||||||
|
Date: Thu, 10 Mar 2022 16:50:04 +0800
|
||||||
|
Subject: [PATCH 3/4] add if ocf_is_probe in monitor func
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/LVM-activate | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||||
|
index c86606637..f345f73a9 100755
|
||||||
|
--- a/heartbeat/LVM-activate
|
||||||
|
+++ b/heartbeat/LVM-activate
|
||||||
|
@@ -791,9 +791,9 @@ lvm_status() {
|
||||||
|
|
||||||
|
if [ $dm_count -eq 0 ]; then
|
||||||
|
if ocf_is_probe ;then
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
- else
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
+ else
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
From 1072c0490ef936a1a7dfd8411da434dce1569457 Mon Sep 17 00:00:00 2001
|
||||||
|
From: XingWei-Liu <liuxingwei@uniontech.com>
|
||||||
|
Date: Thu, 10 Mar 2022 18:10:21 +0800
|
||||||
|
Subject: [PATCH 4/4] reverse return value in monitor func
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/LVM-activate | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||||
|
index f345f73a9..c86606637 100755
|
||||||
|
--- a/heartbeat/LVM-activate
|
||||||
|
+++ b/heartbeat/LVM-activate
|
||||||
|
@@ -791,9 +791,9 @@ lvm_status() {
|
||||||
|
|
||||||
|
if [ $dm_count -eq 0 ]; then
|
||||||
|
if ocf_is_probe ;then
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
- else
|
||||||
|
return $OCF_NOT_RUNNING
|
||||||
|
+ else
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
@ -0,0 +1,25 @@
|
|||||||
|
From b3885f7d95fe390371f806c7f3debb3ec8ad012d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 7 Jun 2022 15:20:11 +0200
|
||||||
|
Subject: [PATCH] lvmlockd: fail when use_lvmlockd has not been set
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/lvmlockd | 5 +++++
|
||||||
|
1 file changed, 5 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd
|
||||||
|
index 05bb0a2e5..dc7bd2d7e 100755
|
||||||
|
--- a/heartbeat/lvmlockd
|
||||||
|
+++ b/heartbeat/lvmlockd
|
||||||
|
@@ -179,6 +179,11 @@ setup_lvm_config()
|
||||||
|
out=$(lvmconfig 'global/locking_type' 2> /dev/null)
|
||||||
|
lock_type=$(echo "$out" | cut -d'=' -f2)
|
||||||
|
|
||||||
|
+ if [ -z "$use_lvmlockd" ]; then
|
||||||
|
+ ocf_exit_reason "\"use_lvmlockd\" not set in /etc/lvm/lvm.conf ..."
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
if [ -n "$use_lvmlockd" ] && [ "$use_lvmlockd" != 1 ] ; then
|
||||||
|
ocf_log info "setting \"use_lvmlockd=1\" in /etc/lvm/lvm.conf ..."
|
||||||
|
sed -i 's,^[[:blank:]]*use_lvmlockd[[:blank:]]*=.*,\ \ \ \ use_lvmlockd = 1,g' /etc/lvm/lvm.conf
|
@ -0,0 +1,47 @@
|
|||||||
|
From 99c4f2af92a10155cf072198c72deffaed3883a5 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 3 Aug 2022 17:20:31 +0200
|
||||||
|
Subject: [PATCH] CTDB: move process to root cgroup if realtime scheduling is
|
||||||
|
enabled
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/CTDB.in | 2 ++
|
||||||
|
heartbeat/ocf-shellfuncs.in | 12 ++++++++++++
|
||||||
|
2 files changed, 14 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
|
||||||
|
index d25d026ca..46f56cfac 100755
|
||||||
|
--- a/heartbeat/CTDB.in
|
||||||
|
+++ b/heartbeat/CTDB.in
|
||||||
|
@@ -709,6 +709,8 @@ EOF
|
||||||
|
invoke_ctdbd() {
|
||||||
|
local vers="$1"
|
||||||
|
|
||||||
|
+ ocf_move_to_root_cgroup_if_rt_enabled
|
||||||
|
+
|
||||||
|
ocf_version_cmp "$vers" "4.9.0"
|
||||||
|
if [ "$?" -ne "0" ]; then
|
||||||
|
# With 4.9+, all ctdbd binary parameters are provided as
|
||||||
|
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||||
|
index 6be4e4e30..2c53a967a 100644
|
||||||
|
--- a/heartbeat/ocf-shellfuncs.in
|
||||||
|
+++ b/heartbeat/ocf-shellfuncs.in
|
||||||
|
@@ -672,6 +672,18 @@ EOF
|
||||||
|
systemctl daemon-reload
|
||||||
|
}
|
||||||
|
|
||||||
|
+# move process to root cgroup if realtime scheduling is enabled
|
||||||
|
+ocf_move_to_root_cgroup_if_rt_enabled()
|
||||||
|
+{
|
||||||
|
+ if [ -e "/sys/fs/cgroup/cpu/cpu.rt_runtime_us" ]; then
|
||||||
|
+ echo $$ >> /sys/fs/cgroup/cpu/tasks
|
||||||
|
+
|
||||||
|
+ if [ "$?" -ne "0" ]; then
|
||||||
|
+ ocf_log warn "Unable to move PID $$ to the root cgroup"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
# usage: crm_mon_no_validation args...
|
||||||
|
# run crm_mon without any cib schema validation
|
||||||
|
# This is useful when an agent runs in a bundle to avoid potential
|
@ -0,0 +1,82 @@
|
|||||||
|
From 4420ef84f3172c67fc7b8b6ae41ea173de017bf4 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||||
|
Date: Wed, 25 May 2022 15:12:33 +0200
|
||||||
|
Subject: [PATCH] aws-vpc-move-ip: Allow to set the interface label
|
||||||
|
|
||||||
|
Add a parameter to specify an interface label to distinguish the IP
|
||||||
|
address managed by aws-vpc-move-ip, similarly as can be done with
|
||||||
|
IPaddr2. This allows to easily recognize the address from other
|
||||||
|
addresses assigned to a given interface.
|
||||||
|
---
|
||||||
|
heartbeat/aws-vpc-move-ip | 30 +++++++++++++++++++++++++++++-
|
||||||
|
1 file changed, 29 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||||
|
index 5d5204080..dee040300 100755
|
||||||
|
--- a/heartbeat/aws-vpc-move-ip
|
||||||
|
+++ b/heartbeat/aws-vpc-move-ip
|
||||||
|
@@ -43,6 +43,7 @@ OCF_RESKEY_address_default=""
|
||||||
|
OCF_RESKEY_routing_table_default=""
|
||||||
|
OCF_RESKEY_routing_table_role_default=""
|
||||||
|
OCF_RESKEY_interface_default="eth0"
|
||||||
|
+OCF_RESKEY_iflabel_default=""
|
||||||
|
OCF_RESKEY_monapi_default="false"
|
||||||
|
OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
|
||||||
|
@@ -54,6 +55,7 @@ OCF_RESKEY_lookup_type_default="InstanceId"
|
||||||
|
: ${OCF_RESKEY_routing_table=${OCF_RESKEY_routing_table_default}}
|
||||||
|
: ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}}
|
||||||
|
: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}}
|
||||||
|
+: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||||
|
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
||||||
|
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
||||||
|
|
||||||
|
@@ -149,6 +151,18 @@ Name of the network interface, i.e. eth0
|
||||||
|
<content type="string" default="${OCF_RESKEY_interface_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="iflabel">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+You can specify an additional label for your IP address here.
|
||||||
|
+This label is appended to your interface name.
|
||||||
|
+
|
||||||
|
+The kernel allows alphanumeric labels up to a maximum length of 15
|
||||||
|
+characters including the interface name and colon (e.g. eth0:foobar1234)
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Interface label</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_iflabel_default}"/>
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="monapi">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Enable enhanced monitoring using AWS API calls to check route table entry
|
||||||
|
@@ -215,6 +229,14 @@ ec2ip_validate() {
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
|
||||||
|
+ if [ -n "$OCF_RESKEY_iflabel" ]; then
|
||||||
|
+ label=${OCF_RESKEY_interface}:${OFC_RESKEY_iflabel}
|
||||||
|
+ if [ ${#label} -gt 15 ]; then
|
||||||
|
+ ocf_exit_reason "Interface label [$label] exceeds maximum character limit of 15"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||||
|
EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||||
|
|
||||||
|
@@ -363,7 +385,13 @@ ec2ip_get_and_configure() {
|
||||||
|
|
||||||
|
# Reconfigure the local ip address
|
||||||
|
ec2ip_drop
|
||||||
|
- cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface"
|
||||||
|
+
|
||||||
|
+ extra_opts=""
|
||||||
|
+ if [ -n "$OCF_RESKEY_iflabel" ]; then
|
||||||
|
+ extra_opts="$extra_opts label $OCF_RESKEY_interface:$OCF_RESKEY_iflabel"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ cmd="ip addr add ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface $extra_opts"
|
||||||
|
ocf_log debug "executing command: $cmd"
|
||||||
|
$cmd
|
||||||
|
rc=$?
|
39
SOURCES/bz2103370-ocf-tester-1-update.patch
Normal file
39
SOURCES/bz2103370-ocf-tester-1-update.patch
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
From 46e8d346ca4803245f51a157591c4df1126d3b49 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 12 Jul 2022 12:45:52 +0200
|
||||||
|
Subject: [PATCH] ocf-tester: use promotable terms
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/ocf-tester.in | 8 ++++----
|
||||||
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in
|
||||||
|
index 10822a5a06..f1703ba1b7 100755
|
||||||
|
--- a/tools/ocf-tester.in
|
||||||
|
+++ b/tools/ocf-tester.in
|
||||||
|
@@ -295,10 +295,10 @@ if [ $rc -eq 3 ]; then
|
||||||
|
|
||||||
|
elif [ $rc -eq 8 ]; then
|
||||||
|
test_command demote "Cleanup, demote"
|
||||||
|
- assert $? 0 "Your agent was a master and could not be demoted" 1
|
||||||
|
+ assert $? 0 "Your agent was promoted and could not be demoted" 1
|
||||||
|
|
||||||
|
test_command stop "Cleanup, stop"
|
||||||
|
- assert $? 0 "Your agent was a master and could not be stopped" 1
|
||||||
|
+ assert $? 0 "Your agent was promoted and could not be stopped" 1
|
||||||
|
|
||||||
|
elif [ $rc -ne 7 ]; then
|
||||||
|
test_command stop
|
||||||
|
@@ -370,10 +370,10 @@ if [ $has_promote -eq 1 -a $has_demote -eq 1 ]; then
|
||||||
|
assert $? 0 "Demote failed" 1
|
||||||
|
|
||||||
|
elif [ $has_promote -eq 0 -a $has_demote -eq 0 ]; then
|
||||||
|
- info "* Your agent does not support master/slave (optional)"
|
||||||
|
+ info "* Your agent does not support promotable clones (optional)"
|
||||||
|
|
||||||
|
else
|
||||||
|
- echo "* Your agent partially supports master/slave"
|
||||||
|
+ echo "* Your agent partially supports promotable clones"
|
||||||
|
num_errors=`expr $num_errors + 1`
|
||||||
|
fi
|
||||||
|
|
@ -0,0 +1,166 @@
|
|||||||
|
From 687aa646852d5fd5d4e811b2ec562ebffa15e23d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 14 Jul 2022 14:52:07 +0200
|
||||||
|
Subject: [PATCH] ocf-tester: remove deprecated lrmd/lrmadmin code that hasnt
|
||||||
|
worked since pre-pacemaker days
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/ocf-tester.8 | 12 ++-----
|
||||||
|
tools/ocf-tester.in | 81 ---------------------------------------------
|
||||||
|
2 files changed, 2 insertions(+), 91 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/ocf-tester.8 b/tools/ocf-tester.8
|
||||||
|
index 850ec0be04..3f398282d2 100644
|
||||||
|
--- a/tools/ocf-tester.8
|
||||||
|
+++ b/tools/ocf-tester.8
|
||||||
|
@@ -1,9 +1,9 @@
|
||||||
|
-.TH OCF-TESTER "8" "January 2012" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities"
|
||||||
|
+.TH OCF-TESTER "8" "July 2022" "Tool for testing if a cluster resource is OCF compliant" "System Administration Utilities"
|
||||||
|
.SH NAME
|
||||||
|
ocf-tester \- Part of the Linux-HA project
|
||||||
|
.SH SYNOPSIS
|
||||||
|
.B ocf-tester
|
||||||
|
-[\fI-LhvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR
|
||||||
|
+[\fI-hvqdX\fR] \fI-n resource_name \fR[\fI-o name=value\fR]\fI* /full/path/to/resource/agent\fR
|
||||||
|
.SH DESCRIPTION
|
||||||
|
Tool for testing if a cluster resource is OCF compliant
|
||||||
|
.SH OPTIONS
|
||||||
|
@@ -26,11 +26,6 @@ Name of the resource
|
||||||
|
\fB\-o\fR name=value
|
||||||
|
Name and value of any parameters required by the agent
|
||||||
|
.TP
|
||||||
|
-\fB\-L\fR
|
||||||
|
-Use lrmadmin/lrmd for tests
|
||||||
|
-.PP
|
||||||
|
-Usage: ocf\-tester [\-Lh] \fB\-n\fR resource_name [\-o name=value]* /full/path/to/resource/agent
|
||||||
|
-.TP
|
||||||
|
\fB\-h\fR
|
||||||
|
This text
|
||||||
|
.TP
|
||||||
|
@@ -51,6 +46,3 @@ Name of the resource
|
||||||
|
.TP
|
||||||
|
\fB\-o\fR name=value
|
||||||
|
Name and value of any parameters required by the agent
|
||||||
|
-.TP
|
||||||
|
-\fB\-L\fR
|
||||||
|
-Use lrmadmin/lrmd for tests
|
||||||
|
diff --git a/tools/ocf-tester.in b/tools/ocf-tester.in
|
||||||
|
index 10822a5a06..15b14e51ea 100755
|
||||||
|
--- a/tools/ocf-tester.in
|
||||||
|
+++ b/tools/ocf-tester.in
|
||||||
|
@@ -25,8 +25,6 @@
|
||||||
|
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||||
|
#
|
||||||
|
|
||||||
|
-LRMD=@libdir@/heartbeat/lrmd
|
||||||
|
-LRMADMIN=@sbindir@/lrmadmin
|
||||||
|
DATADIR=@datadir@
|
||||||
|
METADATA_LINT="xmllint --noout --valid -"
|
||||||
|
|
||||||
|
@@ -61,7 +59,6 @@ usage() {
|
||||||
|
echo " -X Turn on RA tracing (expect large output)"
|
||||||
|
echo " -n name Name of the resource"
|
||||||
|
echo " -o name=value Name and value of any parameters required by the agent"
|
||||||
|
- echo " -L Use lrmadmin/lrmd for tests"
|
||||||
|
exit $1
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -104,7 +101,6 @@ while test "$done" = "0"; do
|
||||||
|
-o) name=${2%%=*}; value=${2#*=};
|
||||||
|
lrm_ra_args="$lrm_ra_args $2";
|
||||||
|
ra_args="$ra_args OCF_RESKEY_$name='$value'"; shift; shift;;
|
||||||
|
- -L) use_lrmd=1; shift;;
|
||||||
|
-v) verbose=1; shift;;
|
||||||
|
-d) export HA_debug=1; shift;;
|
||||||
|
-X) export OCF_TRACE_RA=1; verbose=1; shift;;
|
||||||
|
@@ -140,79 +136,6 @@ stopped_rc=7
|
||||||
|
has_demote=1
|
||||||
|
has_promote=1
|
||||||
|
|
||||||
|
-start_lrmd() {
|
||||||
|
- lrmd_timeout=0
|
||||||
|
- lrmd_interval=0
|
||||||
|
- lrmd_target_rc=EVERYTIME
|
||||||
|
- lrmd_started=""
|
||||||
|
- $LRMD -s 2>/dev/null
|
||||||
|
- rc=$?
|
||||||
|
- if [ $rc -eq 3 ]; then
|
||||||
|
- lrmd_started=1
|
||||||
|
- $LRMD &
|
||||||
|
- sleep 1
|
||||||
|
- $LRMD -s 2>/dev/null
|
||||||
|
- else
|
||||||
|
- return $rc
|
||||||
|
- fi
|
||||||
|
-}
|
||||||
|
-add_resource() {
|
||||||
|
- $LRMADMIN -A $OCF_RESOURCE_INSTANCE \
|
||||||
|
- ocf \
|
||||||
|
- `basename $agent` \
|
||||||
|
- $(basename `dirname $agent`) \
|
||||||
|
- $lrm_ra_args > /dev/null
|
||||||
|
-}
|
||||||
|
-del_resource() {
|
||||||
|
- $LRMADMIN -D $OCF_RESOURCE_INSTANCE
|
||||||
|
-}
|
||||||
|
-parse_lrmadmin_output() {
|
||||||
|
- awk '
|
||||||
|
-BEGIN{ rc=1; }
|
||||||
|
-/Waiting for lrmd to callback.../ { n=1; next; }
|
||||||
|
-n==1 && /----------------operation--------------/ { n++; next; }
|
||||||
|
-n==2 && /return code:/ { rc=$0; sub("return code: *","",rc); next }
|
||||||
|
-n==2 && /---------------------------------------/ {
|
||||||
|
- n++;
|
||||||
|
- next;
|
||||||
|
-}
|
||||||
|
-END{
|
||||||
|
- if( n!=3 ) exit 1;
|
||||||
|
- else exit rc;
|
||||||
|
-}
|
||||||
|
-'
|
||||||
|
-}
|
||||||
|
-exec_resource() {
|
||||||
|
- op="$1"
|
||||||
|
- args="$2"
|
||||||
|
- $LRMADMIN -E $OCF_RESOURCE_INSTANCE \
|
||||||
|
- $op $lrmd_timeout $lrmd_interval \
|
||||||
|
- $lrmd_target_rc \
|
||||||
|
- $args | parse_lrmadmin_output
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
-if [ "$use_lrmd" = 1 ]; then
|
||||||
|
- echo "Using lrmd/lrmadmin for all tests"
|
||||||
|
- start_lrmd || {
|
||||||
|
- echo "could not start lrmd" >&2
|
||||||
|
- exit 1
|
||||||
|
- }
|
||||||
|
- trap '
|
||||||
|
- [ "$lrmd_started" = 1 ] && $LRMD -k
|
||||||
|
- ' EXIT
|
||||||
|
- add_resource || {
|
||||||
|
- echo "failed to add resource to lrmd" >&2
|
||||||
|
- exit 1
|
||||||
|
- }
|
||||||
|
-fi
|
||||||
|
-
|
||||||
|
-lrm_test_command() {
|
||||||
|
- action="$1"
|
||||||
|
- msg="$2"
|
||||||
|
- debug "$msg"
|
||||||
|
- exec_resource $action "$lrm_ra_args"
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
test_permissions() {
|
||||||
|
action=meta-data
|
||||||
|
debug ${1:-"Testing permissions with uid nobody"}
|
||||||
|
@@ -233,10 +156,6 @@ test_command() {
|
||||||
|
action=$1; shift
|
||||||
|
export __OCF_ACTION=$action
|
||||||
|
msg=${1:-"Testing: $action"}
|
||||||
|
- if [ "$use_lrmd" = 1 ]; then
|
||||||
|
- lrm_test_command $action "$msg"
|
||||||
|
- return $?
|
||||||
|
- fi
|
||||||
|
#echo Running: "export $ra_args; $agent $action 2>&1 > /dev/null"
|
||||||
|
if [ $verbose -eq 0 ]; then
|
||||||
|
command_output=`$agent $action 2>&1`
|
79
SOURCES/bz2109159-storage_mon-1-exit-after-help.patch
Normal file
79
SOURCES/bz2109159-storage_mon-1-exit-after-help.patch
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
From b3eadb8523b599af800a7c772606aa0e90cf142f Mon Sep 17 00:00:00 2001
|
||||||
|
From: Fujii Masao <fujii@postgresql.org>
|
||||||
|
Date: Tue, 19 Jul 2022 17:03:02 +0900
|
||||||
|
Subject: [PATCH 1/2] Make storage_mon -h exit just after printing help
|
||||||
|
messages.
|
||||||
|
|
||||||
|
Previously, when -h or an invalid option was specified, storage_mon
|
||||||
|
printed the help messages, proceeded processing and then could
|
||||||
|
throw an error. This was not the behavior that, e.g., users who want
|
||||||
|
to specify -h option to see the help messages are expecting. To fix
|
||||||
|
this issue, this commit changes storage_mon so that it exits just
|
||||||
|
after printing the help messages when -h or an invalid option is
|
||||||
|
specified.
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 4 +++-
|
||||||
|
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 7b65bb419..1303371f7 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -28,7 +28,7 @@ static void usage(char *name, FILE *f)
|
||||||
|
fprintf(f, " --timeout <n> max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT);
|
||||||
|
fprintf(f, " --inject-errors-percent <n> Generate EIO errors <n>%% of the time (for testing only)\n");
|
||||||
|
fprintf(f, " --verbose emit extra output to stdout\n");
|
||||||
|
- fprintf(f, " --help print this messages\n");
|
||||||
|
+ fprintf(f, " --help print this messages, then exit\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check one device */
|
||||||
|
@@ -178,9 +178,11 @@ int main(int argc, char *argv[])
|
||||||
|
break;
|
||||||
|
case 'h':
|
||||||
|
usage(argv[0], stdout);
|
||||||
|
+ exit(0);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
usage(argv[0], stderr);
|
||||||
|
+ exit(-1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
From e62795f02d25a772a239e0a4f9eb9d6470c134ee Mon Sep 17 00:00:00 2001
|
||||||
|
From: Fujii Masao <fujii@postgresql.org>
|
||||||
|
Date: Tue, 19 Jul 2022 17:56:32 +0900
|
||||||
|
Subject: [PATCH 2/2] Fix typo in help message.
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 6 +++---
|
||||||
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 1303371f7..3c82d5ee8 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -28,7 +28,7 @@ static void usage(char *name, FILE *f)
|
||||||
|
fprintf(f, " --timeout <n> max time to wait for a device test to come back. in seconds (default %d)\n", DEFAULT_TIMEOUT);
|
||||||
|
fprintf(f, " --inject-errors-percent <n> Generate EIO errors <n>%% of the time (for testing only)\n");
|
||||||
|
fprintf(f, " --verbose emit extra output to stdout\n");
|
||||||
|
- fprintf(f, " --help print this messages, then exit\n");
|
||||||
|
+ fprintf(f, " --help print this message\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check one device */
|
||||||
|
@@ -178,11 +178,11 @@ int main(int argc, char *argv[])
|
||||||
|
break;
|
||||||
|
case 'h':
|
||||||
|
usage(argv[0], stdout);
|
||||||
|
- exit(0);
|
||||||
|
+ return 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
usage(argv[0], stderr);
|
||||||
|
- exit(-1);
|
||||||
|
+ return -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,36 @@
|
|||||||
|
From a68957e8f1e8169438acf5a4321f47ed7d8ceec1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Fujii Masao <fujii@postgresql.org>
|
||||||
|
Date: Tue, 19 Jul 2022 20:28:38 +0900
|
||||||
|
Subject: [PATCH] storage_mon: Fix bug in checking of number of specified
|
||||||
|
scores.
|
||||||
|
|
||||||
|
Previously specifying the maximum allowed number (MAX_DEVICES, currently 25)
|
||||||
|
of devices and scores as arguments could cause storage_mon to fail unexpectedly
|
||||||
|
with the error message "too many scores, max is 25". This issue happened
|
||||||
|
because storage_mon checked whether the number of specified scores
|
||||||
|
exceeded the upper limit by using the local variable "device_count" indicating
|
||||||
|
the number of specified devices (not scores). So after the maximum number
|
||||||
|
of devices arguments were interpreted, the appearance of next score argument
|
||||||
|
caused the error even when the number of interpreted scores arguments had
|
||||||
|
not exceeded the maximum.
|
||||||
|
|
||||||
|
This patch fixes storage_mon so that it uses the local variable "score_count"
|
||||||
|
indicating the number of specified scores, to check whether arguments for
|
||||||
|
scores are specified more than the upper limit.
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 3c82d5ee8..c749076c2 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -154,7 +154,7 @@ int main(int argc, char *argv[])
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 's':
|
||||||
|
- if (device_count < MAX_DEVICES) {
|
||||||
|
+ if (score_count < MAX_DEVICES) {
|
||||||
|
int score = atoi(optarg);
|
||||||
|
if (score < 1 || score > 10) {
|
||||||
|
fprintf(stderr, "Score must be between 1 and 10 inclusive\n");
|
43
SOURCES/bz2109159-storage_mon-3-fix-child-process-exit.patch
Normal file
43
SOURCES/bz2109159-storage_mon-3-fix-child-process-exit.patch
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
From c6ea93fcb499c84c3d8e9aad2ced65065a3f6d51 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Fujii Masao <fujii@postgresql.org>
|
||||||
|
Date: Tue, 19 Jul 2022 22:34:08 +0900
|
||||||
|
Subject: [PATCH] Fix bug in handling of child process exit.
|
||||||
|
|
||||||
|
When storage_mon detects that a child process exits with zero,
|
||||||
|
it resets the test_forks[] entry for the child process to 0, to avoid
|
||||||
|
waitpid() for the process again in the loop. But, previously,
|
||||||
|
storage_mon didn't do that when it detected that a child process
|
||||||
|
exited with non-zero. Which caused waitpid() to be called again
|
||||||
|
for the process already gone and to report an error like
|
||||||
|
"waitpid on XXX failed: No child processes" unexpectedly.
|
||||||
|
In this case, basically storage_mon should wait until all the child
|
||||||
|
processes exit and return the final score, instead.
|
||||||
|
|
||||||
|
This patch fixes this issue by making storage_mon reset test_works[]
|
||||||
|
entry even when a child process exits with non-zero.
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 8 ++++----
|
||||||
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 3c82d5ee8..83a48ca36 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -232,13 +232,13 @@ int main(int argc, char *argv[])
|
||||||
|
|
||||||
|
if (w == test_forks[i]) {
|
||||||
|
if (WIFEXITED(wstatus)) {
|
||||||
|
- if (WEXITSTATUS(wstatus) == 0) {
|
||||||
|
- finished_count++;
|
||||||
|
- test_forks[i] = 0;
|
||||||
|
- } else {
|
||||||
|
+ if (WEXITSTATUS(wstatus) != 0) {
|
||||||
|
syslog(LOG_ERR, "Error reading from device %s", devices[i]);
|
||||||
|
final_score += scores[i];
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+ finished_count++;
|
||||||
|
+ test_forks[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,417 @@
|
|||||||
|
From 0bb52cf9985bda47e13940761b3d8e2eaddf377c Mon Sep 17 00:00:00 2001
|
||||||
|
From: Kazunori INOUE <kazunori_inoue@newson.co.jp>
|
||||||
|
Date: Wed, 10 Aug 2022 17:35:54 +0900
|
||||||
|
Subject: [PATCH 1/4] storage_mon: Use the O_DIRECT flag in open() to eliminate
|
||||||
|
cache effects
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/Makefile.am | 1 +
|
||||||
|
tools/storage_mon.c | 82 +++++++++++++++++++++++++++++++++------------
|
||||||
|
2 files changed, 61 insertions(+), 22 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/Makefile.am b/tools/Makefile.am
|
||||||
|
index 1309223b4..08323fee3 100644
|
||||||
|
--- a/tools/Makefile.am
|
||||||
|
+++ b/tools/Makefile.am
|
||||||
|
@@ -74,6 +74,7 @@ sfex_stat_LDADD = $(GLIBLIB) -lplumb -lplumbgpl
|
||||||
|
findif_SOURCES = findif.c
|
||||||
|
|
||||||
|
storage_mon_SOURCES = storage_mon.c
|
||||||
|
+storage_mon_CFLAGS = -D_GNU_SOURCE
|
||||||
|
|
||||||
|
if BUILD_TICKLE
|
||||||
|
halib_PROGRAMS += tickle_tcp
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 930ead41c..ba87492fc 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -31,23 +31,27 @@ static void usage(char *name, FILE *f)
|
||||||
|
fprintf(f, " --help print this message\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
-/* Check one device */
|
||||||
|
-static void *test_device(const char *device, int verbose, int inject_error_percent)
|
||||||
|
+static int open_device(const char *device, int verbose)
|
||||||
|
{
|
||||||
|
- uint64_t devsize;
|
||||||
|
int device_fd;
|
||||||
|
int res;
|
||||||
|
+ uint64_t devsize;
|
||||||
|
off_t seek_spot;
|
||||||
|
- char buffer[512];
|
||||||
|
|
||||||
|
- if (verbose) {
|
||||||
|
- printf("Testing device %s\n", device);
|
||||||
|
+#if defined(__linux__) || defined(__FreeBSD__)
|
||||||
|
+ device_fd = open(device, O_RDONLY|O_DIRECT);
|
||||||
|
+ if (device_fd >= 0) {
|
||||||
|
+ return device_fd;
|
||||||
|
+ } else if (errno != EINVAL) {
|
||||||
|
+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
+ return -1;
|
||||||
|
}
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
device_fd = open(device, O_RDONLY);
|
||||||
|
if (device_fd < 0) {
|
||||||
|
fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
- exit(-1);
|
||||||
|
+ return -1;
|
||||||
|
}
|
||||||
|
#ifdef __FreeBSD__
|
||||||
|
res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize);
|
||||||
|
@@ -57,11 +61,12 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
if (res != 0) {
|
||||||
|
fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
close(device_fd);
|
||||||
|
- exit(-1);
|
||||||
|
+ return -1;
|
||||||
|
}
|
||||||
|
if (verbose) {
|
||||||
|
fprintf(stderr, "%s: size=%zu\n", device, devsize);
|
||||||
|
}
|
||||||
|
+
|
||||||
|
/* Don't fret about real randomness */
|
||||||
|
srand(time(NULL) + getpid());
|
||||||
|
/* Pick a random place on the device - sector aligned */
|
||||||
|
@@ -70,35 +75,64 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
if (res < 0) {
|
||||||
|
fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno));
|
||||||
|
close(device_fd);
|
||||||
|
- exit(-1);
|
||||||
|
+ return -1;
|
||||||
|
}
|
||||||
|
-
|
||||||
|
if (verbose) {
|
||||||
|
printf("%s: reading from pos %ld\n", device, seek_spot);
|
||||||
|
}
|
||||||
|
+ return device_fd;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+/* Check one device */
|
||||||
|
+static void *test_device(const char *device, int verbose, int inject_error_percent)
|
||||||
|
+{
|
||||||
|
+ int device_fd;
|
||||||
|
+ int sec_size = 0;
|
||||||
|
+ int res;
|
||||||
|
+ void *buffer;
|
||||||
|
+
|
||||||
|
+ if (verbose) {
|
||||||
|
+ printf("Testing device %s\n", device);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ device_fd = open_device(device, verbose);
|
||||||
|
+ if (device_fd < 0) {
|
||||||
|
+ exit(-1);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ ioctl(device_fd, BLKSSZGET, &sec_size);
|
||||||
|
+ if (sec_size == 0) {
|
||||||
|
+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
- res = read(device_fd, buffer, sizeof(buffer));
|
||||||
|
+ if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) {
|
||||||
|
+ fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ res = read(device_fd, buffer, sec_size);
|
||||||
|
+ free(buffer);
|
||||||
|
if (res < 0) {
|
||||||
|
fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
|
||||||
|
- close(device_fd);
|
||||||
|
- exit(-1);
|
||||||
|
+ goto error;
|
||||||
|
}
|
||||||
|
- if (res < (int)sizeof(buffer)) {
|
||||||
|
- fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res);
|
||||||
|
- close(device_fd);
|
||||||
|
- exit(-1);
|
||||||
|
+ if (res < sec_size) {
|
||||||
|
+ fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res);
|
||||||
|
+ goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fake an error */
|
||||||
|
- if (inject_error_percent && ((rand() % 100) < inject_error_percent)) {
|
||||||
|
- fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
|
||||||
|
- close(device_fd);
|
||||||
|
- exit(-1);
|
||||||
|
+ if (inject_error_percent) {
|
||||||
|
+ srand(time(NULL) + getpid());
|
||||||
|
+ if ((rand() % 100) < inject_error_percent) {
|
||||||
|
+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
res = close(device_fd);
|
||||||
|
if (res != 0) {
|
||||||
|
fprintf(stderr, "Failed to close %s: %s\n", device, strerror(errno));
|
||||||
|
- close(device_fd);
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -106,6 +140,10 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
printf("%s: done\n", device);
|
||||||
|
}
|
||||||
|
exit(0);
|
||||||
|
+
|
||||||
|
+error:
|
||||||
|
+ close(device_fd);
|
||||||
|
+ exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
|
||||||
|
From ce4e632f29ed6b86b82a959eac5844655baed153 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Kazunori INOUE <kazunori_inoue@newson.co.jp>
|
||||||
|
Date: Mon, 15 Aug 2022 19:17:21 +0900
|
||||||
|
Subject: [PATCH 2/4] storage_mon: fix build-related issues
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 6 ++++--
|
||||||
|
1 file changed, 4 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index ba87492fc..e34d1975a 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -38,7 +38,6 @@ static int open_device(const char *device, int verbose)
|
||||||
|
uint64_t devsize;
|
||||||
|
off_t seek_spot;
|
||||||
|
|
||||||
|
-#if defined(__linux__) || defined(__FreeBSD__)
|
||||||
|
device_fd = open(device, O_RDONLY|O_DIRECT);
|
||||||
|
if (device_fd >= 0) {
|
||||||
|
return device_fd;
|
||||||
|
@@ -46,7 +45,6 @@ static int open_device(const char *device, int verbose)
|
||||||
|
fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
-#endif
|
||||||
|
|
||||||
|
device_fd = open(device, O_RDONLY);
|
||||||
|
if (device_fd < 0) {
|
||||||
|
@@ -100,7 +98,11 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifdef __FreeBSD__
|
||||||
|
+ ioctl(device_fd, DIOCGSECTORSIZE, &sec_size);
|
||||||
|
+#else
|
||||||
|
ioctl(device_fd, BLKSSZGET, &sec_size);
|
||||||
|
+#endif
|
||||||
|
if (sec_size == 0) {
|
||||||
|
fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
From 7a0aaa0dfdebeab3fae9fe9ddc412c3d1f610273 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Kazunori INOUE <kazunori_inoue@newson.co.jp>
|
||||||
|
Date: Wed, 24 Aug 2022 17:36:23 +0900
|
||||||
|
Subject: [PATCH 3/4] storage_mon: do random lseek even with O_DIRECT, etc
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 118 ++++++++++++++++++++++----------------------
|
||||||
|
1 file changed, 58 insertions(+), 60 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index e34d1975a..0bdb48649 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -31,38 +31,43 @@ static void usage(char *name, FILE *f)
|
||||||
|
fprintf(f, " --help print this message\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
-static int open_device(const char *device, int verbose)
|
||||||
|
+/* Check one device */
|
||||||
|
+static void *test_device(const char *device, int verbose, int inject_error_percent)
|
||||||
|
{
|
||||||
|
+ uint64_t devsize;
|
||||||
|
+ int flags = O_RDONLY | O_DIRECT;
|
||||||
|
int device_fd;
|
||||||
|
int res;
|
||||||
|
- uint64_t devsize;
|
||||||
|
off_t seek_spot;
|
||||||
|
|
||||||
|
- device_fd = open(device, O_RDONLY|O_DIRECT);
|
||||||
|
- if (device_fd >= 0) {
|
||||||
|
- return device_fd;
|
||||||
|
- } else if (errno != EINVAL) {
|
||||||
|
- fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
- return -1;
|
||||||
|
+ if (verbose) {
|
||||||
|
+ printf("Testing device %s\n", device);
|
||||||
|
}
|
||||||
|
|
||||||
|
- device_fd = open(device, O_RDONLY);
|
||||||
|
+ device_fd = open(device, flags);
|
||||||
|
if (device_fd < 0) {
|
||||||
|
- fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
- return -1;
|
||||||
|
+ if (errno != EINVAL) {
|
||||||
|
+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
+ exit(-1);
|
||||||
|
+ }
|
||||||
|
+ flags &= ~O_DIRECT;
|
||||||
|
+ device_fd = open(device, flags);
|
||||||
|
+ if (device_fd < 0) {
|
||||||
|
+ fprintf(stderr, "Failed to open %s: %s\n", device, strerror(errno));
|
||||||
|
+ exit(-1);
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
#ifdef __FreeBSD__
|
||||||
|
res = ioctl(device_fd, DIOCGMEDIASIZE, &devsize);
|
||||||
|
#else
|
||||||
|
res = ioctl(device_fd, BLKGETSIZE64, &devsize);
|
||||||
|
#endif
|
||||||
|
- if (res != 0) {
|
||||||
|
+ if (res < 0) {
|
||||||
|
fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
- close(device_fd);
|
||||||
|
- return -1;
|
||||||
|
+ goto error;
|
||||||
|
}
|
||||||
|
if (verbose) {
|
||||||
|
- fprintf(stderr, "%s: size=%zu\n", device, devsize);
|
||||||
|
+ printf("%s: opened %s O_DIRECT, size=%zu\n", device, (flags & O_DIRECT)?"with":"without", devsize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't fret about real randomness */
|
||||||
|
@@ -72,65 +77,58 @@ static int open_device(const char *device, int verbose)
|
||||||
|
res = lseek(device_fd, seek_spot, SEEK_SET);
|
||||||
|
if (res < 0) {
|
||||||
|
fprintf(stderr, "Failed to seek %s: %s\n", device, strerror(errno));
|
||||||
|
- close(device_fd);
|
||||||
|
- return -1;
|
||||||
|
+ goto error;
|
||||||
|
}
|
||||||
|
if (verbose) {
|
||||||
|
printf("%s: reading from pos %ld\n", device, seek_spot);
|
||||||
|
}
|
||||||
|
- return device_fd;
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
-/* Check one device */
|
||||||
|
-static void *test_device(const char *device, int verbose, int inject_error_percent)
|
||||||
|
-{
|
||||||
|
- int device_fd;
|
||||||
|
- int sec_size = 0;
|
||||||
|
- int res;
|
||||||
|
- void *buffer;
|
||||||
|
-
|
||||||
|
- if (verbose) {
|
||||||
|
- printf("Testing device %s\n", device);
|
||||||
|
- }
|
||||||
|
|
||||||
|
- device_fd = open_device(device, verbose);
|
||||||
|
- if (device_fd < 0) {
|
||||||
|
- exit(-1);
|
||||||
|
- }
|
||||||
|
+ if (flags & O_DIRECT) {
|
||||||
|
+ int sec_size = 0;
|
||||||
|
+ void *buffer;
|
||||||
|
|
||||||
|
#ifdef __FreeBSD__
|
||||||
|
- ioctl(device_fd, DIOCGSECTORSIZE, &sec_size);
|
||||||
|
+ res = ioctl(device_fd, DIOCGSECTORSIZE, &sec_size);
|
||||||
|
#else
|
||||||
|
- ioctl(device_fd, BLKSSZGET, &sec_size);
|
||||||
|
+ res = ioctl(device_fd, BLKSSZGET, &sec_size);
|
||||||
|
#endif
|
||||||
|
- if (sec_size == 0) {
|
||||||
|
- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
- goto error;
|
||||||
|
- }
|
||||||
|
+ if (res < 0) {
|
||||||
|
+ fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
- if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) {
|
||||||
|
- fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno));
|
||||||
|
- goto error;
|
||||||
|
- }
|
||||||
|
+ if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), sec_size) != 0) {
|
||||||
|
+ fprintf(stderr, "Failed to allocate aligned memory: %s\n", strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
+ res = read(device_fd, buffer, sec_size);
|
||||||
|
+ free(buffer);
|
||||||
|
+ if (res < 0) {
|
||||||
|
+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
+ if (res < sec_size) {
|
||||||
|
+ fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res);
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
+ } else {
|
||||||
|
+ char buffer[512];
|
||||||
|
|
||||||
|
- res = read(device_fd, buffer, sec_size);
|
||||||
|
- free(buffer);
|
||||||
|
- if (res < 0) {
|
||||||
|
- fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
|
||||||
|
- goto error;
|
||||||
|
- }
|
||||||
|
- if (res < sec_size) {
|
||||||
|
- fprintf(stderr, "Failed to read %d bytes from %s, got %d\n", sec_size, device, res);
|
||||||
|
- goto error;
|
||||||
|
+ res = read(device_fd, buffer, sizeof(buffer));
|
||||||
|
+ if (res < 0) {
|
||||||
|
+ fprintf(stderr, "Failed to read %s: %s\n", device, strerror(errno));
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
+ if (res < (int)sizeof(buffer)) {
|
||||||
|
+ fprintf(stderr, "Failed to read %ld bytes from %s, got %d\n", sizeof(buffer), device, res);
|
||||||
|
+ goto error;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fake an error */
|
||||||
|
- if (inject_error_percent) {
|
||||||
|
- srand(time(NULL) + getpid());
|
||||||
|
- if ((rand() % 100) < inject_error_percent) {
|
||||||
|
- fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
|
||||||
|
- goto error;
|
||||||
|
- }
|
||||||
|
+ if (inject_error_percent && ((rand() % 100) < inject_error_percent)) {
|
||||||
|
+ fprintf(stderr, "People, please fasten your seatbelts, injecting errors!\n");
|
||||||
|
+ goto error;
|
||||||
|
}
|
||||||
|
res = close(device_fd);
|
||||||
|
if (res != 0) {
|
||||||
|
|
||||||
|
From db97e055a17526cec056c595844a9d8851e3ee19 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Kazunori INOUE <kazunori_inoue@newson.co.jp>
|
||||||
|
Date: Thu, 25 Aug 2022 16:03:46 +0900
|
||||||
|
Subject: [PATCH 4/4] storage_mon: improve error messages when ioctl() fails
|
||||||
|
|
||||||
|
---
|
||||||
|
tools/storage_mon.c | 4 ++--
|
||||||
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||||
|
index 0bdb48649..f829c5081 100644
|
||||||
|
--- a/tools/storage_mon.c
|
||||||
|
+++ b/tools/storage_mon.c
|
||||||
|
@@ -63,7 +63,7 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
res = ioctl(device_fd, BLKGETSIZE64, &devsize);
|
||||||
|
#endif
|
||||||
|
if (res < 0) {
|
||||||
|
- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
+ fprintf(stderr, "Failed to get device size for %s: %s\n", device, strerror(errno));
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
if (verbose) {
|
||||||
|
@@ -93,7 +93,7 @@ static void *test_device(const char *device, int verbose, int inject_error_perce
|
||||||
|
res = ioctl(device_fd, BLKSSZGET, &sec_size);
|
||||||
|
#endif
|
||||||
|
if (res < 0) {
|
||||||
|
- fprintf(stderr, "Failed to stat %s: %s\n", device, strerror(errno));
|
||||||
|
+ fprintf(stderr, "Failed to get block device sector size for %s: %s\n", device, strerror(errno));
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,75 @@
|
|||||||
|
From 0063164d72bbaca68f12a2f0a7dbae9ccb41fa39 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 26 Jul 2022 09:08:26 +0200
|
||||||
|
Subject: [PATCH] ethmonitor/ovsmonitor/pgsql: remove ignored attrd_updater
|
||||||
|
"-q" parameter
|
||||||
|
|
||||||
|
attrd_updater in 2.1.3 no longer ignores the -q parameter, which makes
|
||||||
|
these agents break. It never did anything in attrd_updater, and is
|
||||||
|
probably left-over from copy/paste crm_attribute code that got changed
|
||||||
|
to attrd_updater.
|
||||||
|
---
|
||||||
|
heartbeat/ethmonitor | 2 +-
|
||||||
|
heartbeat/ovsmonitor | 2 +-
|
||||||
|
heartbeat/pgsql | 8 ++++----
|
||||||
|
3 files changed, 6 insertions(+), 6 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
|
||||||
|
index ba8574131..451738a0b 100755
|
||||||
|
--- a/heartbeat/ethmonitor
|
||||||
|
+++ b/heartbeat/ethmonitor
|
||||||
|
@@ -464,7 +464,7 @@ END
|
||||||
|
|
||||||
|
set_cib_value() {
|
||||||
|
local score=`expr $1 \* $OCF_RESKEY_multiplier`
|
||||||
|
- attrd_updater -n $ATTRNAME -v $score -q
|
||||||
|
+ attrd_updater -n $ATTRNAME -v $score
|
||||||
|
local rc=$?
|
||||||
|
case $rc in
|
||||||
|
0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;;
|
||||||
|
diff --git a/heartbeat/ovsmonitor b/heartbeat/ovsmonitor
|
||||||
|
index 872ce86eb..6765da4b9 100755
|
||||||
|
--- a/heartbeat/ovsmonitor
|
||||||
|
+++ b/heartbeat/ovsmonitor
|
||||||
|
@@ -355,7 +355,7 @@ END
|
||||||
|
|
||||||
|
set_cib_value() {
|
||||||
|
local score=`expr $1 \* $OCF_RESKEY_multiplier`
|
||||||
|
- attrd_updater -n $ATTRNAME -v $score -q
|
||||||
|
+ attrd_updater -n $ATTRNAME -v $score
|
||||||
|
local rc=$?
|
||||||
|
case $rc in
|
||||||
|
0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;;
|
||||||
|
diff --git a/heartbeat/pgsql b/heartbeat/pgsql
|
||||||
|
index 94aceb324..e93d66855 100755
|
||||||
|
--- a/heartbeat/pgsql
|
||||||
|
+++ b/heartbeat/pgsql
|
||||||
|
@@ -808,7 +808,7 @@ pgsql_real_stop() {
|
||||||
|
local stop_escalate
|
||||||
|
|
||||||
|
if ocf_is_true ${OCF_RESKEY_check_wal_receiver}; then
|
||||||
|
- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D -q
|
||||||
|
+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -D
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! pgsql_status
|
||||||
|
@@ -937,16 +937,16 @@ pgsql_wal_receiver_status() {
|
||||||
|
receiver_parent_pids=`ps -ef | tr -s " " | grep "[w]al\s*receiver" | cut -d " " -f 3`
|
||||||
|
|
||||||
|
if echo "$receiver_parent_pids" | grep -q -w "$PID" ; then
|
||||||
|
- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" -q
|
||||||
|
+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $pgsql_real_monitor_status -eq "$OCF_RUNNING_MASTER" ]; then
|
||||||
|
- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" -q
|
||||||
|
+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
- attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" -q
|
||||||
|
+ attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR"
|
||||||
|
ocf_log warn "wal receiver process is not running"
|
||||||
|
return 1
|
||||||
|
}
|
298
SOURCES/bz2127117-nfsserver-nfsv4_only-parameter.patch
Normal file
298
SOURCES/bz2127117-nfsserver-nfsv4_only-parameter.patch
Normal file
@ -0,0 +1,298 @@
|
|||||||
|
From 764757380af19d3a21d40f3c9624e4135ff074e1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 2 Nov 2022 10:26:31 +0100
|
||||||
|
Subject: [PATCH] nfsserver: add nfsv4_only parameter to make it run without
|
||||||
|
rpc-statd/rpcbind services
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/nfsserver | 200 +++++++++++++++++++++++++-------------------
|
||||||
|
1 file changed, 114 insertions(+), 86 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||||
|
index 9bbd603e5..cb2d43ab1 100755
|
||||||
|
--- a/heartbeat/nfsserver
|
||||||
|
+++ b/heartbeat/nfsserver
|
||||||
|
@@ -79,6 +79,16 @@ Init script for nfsserver
|
||||||
|
<content type="string" default="auto detected" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="nfsv4_only" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Run in NFSv4 only mode (rpc-statd and rpcbind services masked).
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">
|
||||||
|
+NFSv4 only mode.
|
||||||
|
+</shortdesc>
|
||||||
|
+<content type="boolean" default="false" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="nfs_no_notify" unique="0" required="0">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Do not send reboot notifications to NFSv3 clients during server startup.
|
||||||
|
@@ -332,7 +342,7 @@ v3locking_exec()
|
||||||
|
if [ $EXEC_MODE -eq 2 ]; then
|
||||||
|
nfs_exec $cmd nfs-lock.service
|
||||||
|
elif [ $EXEC_MODE -eq 3 ]; then
|
||||||
|
- nfs_exec $cmd rpc-statd.service
|
||||||
|
+ nfs_exec $cmd rpc-statd.service
|
||||||
|
else
|
||||||
|
case $cmd in
|
||||||
|
start) locking_start;;
|
||||||
|
@@ -348,20 +358,22 @@ nfsserver_systemd_monitor()
|
||||||
|
local rc
|
||||||
|
local fn
|
||||||
|
|
||||||
|
- ocf_log debug "Status: rpcbind"
|
||||||
|
- rpcinfo > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -ne "0" ]; then
|
||||||
|
- ocf_exit_reason "rpcbind is not running"
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
- fi
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ ocf_log debug "Status: rpcbind"
|
||||||
|
+ rpcinfo > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -ne "0" ]; then
|
||||||
|
+ ocf_exit_reason "rpcbind is not running"
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
|
||||||
|
- ocf_log debug "Status: nfs-mountd"
|
||||||
|
- ps axww | grep -q "[r]pc.mountd"
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -ne "0" ]; then
|
||||||
|
- ocf_exit_reason "nfs-mountd is not running"
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
+ ocf_log debug "Status: nfs-mountd"
|
||||||
|
+ ps axww | grep -q "[r]pc.mountd"
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -ne "0" ]; then
|
||||||
|
+ ocf_exit_reason "nfs-mountd is not running"
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
ocf_log debug "Status: nfs-idmapd"
|
||||||
|
@@ -375,12 +387,14 @@ nfsserver_systemd_monitor()
|
||||||
|
return $OCF_NOT_RUNNING
|
||||||
|
fi
|
||||||
|
|
||||||
|
- ocf_log debug "Status: rpc-statd"
|
||||||
|
- rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -ne "0" ]; then
|
||||||
|
- ocf_exit_reason "rpc-statd is not running"
|
||||||
|
- return $OCF_NOT_RUNNING
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ ocf_log debug "Status: rpc-statd"
|
||||||
|
+ rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -ne "0" ]; then
|
||||||
|
+ ocf_exit_reason "rpc-statd is not running"
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
nfs_exec is-active nfs-server
|
||||||
|
@@ -424,7 +438,7 @@ nfsserver_monitor ()
|
||||||
|
if [ $rc -eq 0 ]; then
|
||||||
|
# don't report success if nfs servers are up
|
||||||
|
# without locking daemons.
|
||||||
|
- v3locking_exec "status"
|
||||||
|
+ ocf_is_true "$OCF_RESKEY_nfsv4_only" || v3locking_exec "status"
|
||||||
|
rc=$?
|
||||||
|
if [ $rc -ne 0 ]; then
|
||||||
|
ocf_exit_reason "NFS server is up, but the locking daemons are down"
|
||||||
|
@@ -786,48 +800,54 @@ nfsserver_start ()
|
||||||
|
|
||||||
|
# systemd
|
||||||
|
case $EXEC_MODE in
|
||||||
|
- [23]) nfs_exec start rpcbind
|
||||||
|
- local i=1
|
||||||
|
- while : ; do
|
||||||
|
- ocf_log info "Start: rpcbind i: $i"
|
||||||
|
- rpcinfo > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- break;
|
||||||
|
- fi
|
||||||
|
- sleep 1
|
||||||
|
- i=$((i + 1))
|
||||||
|
- done
|
||||||
|
+ [23]) if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ nfs_exec start rpcbind
|
||||||
|
+ local i=1
|
||||||
|
+ while : ; do
|
||||||
|
+ ocf_log info "Start: rpcbind i: $i"
|
||||||
|
+ rpcinfo > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ break
|
||||||
|
+ fi
|
||||||
|
+ sleep 1
|
||||||
|
+ i=$((i + 1))
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
- # check to see if we need to start rpc.statd
|
||||||
|
- v3locking_exec "status"
|
||||||
|
- if [ $? -ne $OCF_SUCCESS ]; then
|
||||||
|
- v3locking_exec "start"
|
||||||
|
- rc=$?
|
||||||
|
- if [ $rc -ne 0 ]; then
|
||||||
|
- ocf_exit_reason "Failed to start NFS server locking daemons"
|
||||||
|
- return $rc
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ # check to see if we need to start rpc.statd
|
||||||
|
+ v3locking_exec "status"
|
||||||
|
+ if [ $? -ne $OCF_SUCCESS ]; then
|
||||||
|
+ v3locking_exec "start"
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ $rc -ne 0 ]; then
|
||||||
|
+ ocf_exit_reason "Failed to start NFS server locking daemons"
|
||||||
|
+ return $rc
|
||||||
|
+ fi
|
||||||
|
+ else
|
||||||
|
+ ocf_log info "rpc.statd already up"
|
||||||
|
fi
|
||||||
|
- else
|
||||||
|
- ocf_log info "rpc.statd already up"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# systemd
|
||||||
|
case $EXEC_MODE in
|
||||||
|
- [23]) nfs_exec start nfs-mountd
|
||||||
|
- local i=1
|
||||||
|
- while : ; do
|
||||||
|
- ocf_log info "Start: nfs-mountd i: $i"
|
||||||
|
- ps axww | grep -q "[r]pc.mountd"
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- break;
|
||||||
|
- fi
|
||||||
|
- sleep 1
|
||||||
|
- i=$((i + 1))
|
||||||
|
- done
|
||||||
|
+ [23]) if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ nfs_exec start nfs-mountd
|
||||||
|
+ local i=1
|
||||||
|
+ while : ; do
|
||||||
|
+ ocf_log info "Start: nfs-mountd i: $i"
|
||||||
|
+ ps axww | grep -q "[r]pc.mountd"
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ break
|
||||||
|
+ fi
|
||||||
|
+ sleep 1
|
||||||
|
+ i=$((i + 1))
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
|
||||||
|
nfs_exec start nfs-idmapd
|
||||||
|
local i=1
|
||||||
|
@@ -839,24 +859,26 @@ nfsserver_start ()
|
||||||
|
ocf_log debug "$(cat $fn)"
|
||||||
|
rm -f $fn
|
||||||
|
if [ "$rc" -eq "0" ]; then
|
||||||
|
- break;
|
||||||
|
+ break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
i=$((i + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
- nfs_exec start rpc-statd
|
||||||
|
- local i=1
|
||||||
|
- while : ; do
|
||||||
|
- ocf_log info "Start: rpc-statd i: $i"
|
||||||
|
- rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- break;
|
||||||
|
- fi
|
||||||
|
- sleep 1
|
||||||
|
- i=$((i + 1))
|
||||||
|
- done
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ nfs_exec start rpc-statd
|
||||||
|
+ local i=1
|
||||||
|
+ while : ; do
|
||||||
|
+ ocf_log info "Start: rpc-statd i: $i"
|
||||||
|
+ rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ break
|
||||||
|
+ fi
|
||||||
|
+ sleep 1
|
||||||
|
+ i=$((i + 1))
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
|
@@ -914,13 +936,15 @@ nfsserver_stop ()
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
- nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||||
|
- ocf_log info "Stop: rpc-statd"
|
||||||
|
- rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- ocf_exit_reason "Failed to stop rpc-statd"
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||||
|
+ ocf_log info "Stop: rpc-statd"
|
||||||
|
+ rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ ocf_exit_reason "Failed to stop rpc-statd"
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
nfs_exec stop nfs-idmapd > /dev/null 2>&1
|
||||||
|
@@ -935,13 +959,15 @@ nfsserver_stop ()
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
- nfs_exec stop nfs-mountd > /dev/null 2>&1
|
||||||
|
- ocf_log info "Stop: nfs-mountd"
|
||||||
|
- ps axww | grep -q "[r]pc.mountd"
|
||||||
|
- rc=$?
|
||||||
|
- if [ "$rc" -eq "0" ]; then
|
||||||
|
- ocf_exit_reason "Failed to stop nfs-mountd"
|
||||||
|
- return $OCF_ERR_GENERIC
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ nfs_exec stop nfs-mountd > /dev/null 2>&1
|
||||||
|
+ ocf_log info "Stop: nfs-mountd"
|
||||||
|
+ ps axww | grep -q "[r]pc.mountd"
|
||||||
|
+ rc=$?
|
||||||
|
+ if [ "$rc" -eq "0" ]; then
|
||||||
|
+ ocf_exit_reason "Failed to stop nfs-mountd"
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if systemctl --no-legend list-unit-files "nfsdcld*" | grep -q nfsdcld; then
|
||||||
|
@@ -960,10 +986,12 @@ nfsserver_stop ()
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
|
- v3locking_exec "stop"
|
||||||
|
- if [ $? -ne 0 ]; then
|
||||||
|
- ocf_exit_reason "Failed to stop NFS locking daemons"
|
||||||
|
- rc=$OCF_ERR_GENERIC
|
||||||
|
+ if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||||
|
+ v3locking_exec "stop"
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ ocf_exit_reason "Failed to stop NFS locking daemons"
|
||||||
|
+ rc=$OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# systemd
|
@ -0,0 +1,147 @@
|
|||||||
|
From 237d55120a7c8d761f839c96651e722b3bb3bc88 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 12 Oct 2022 13:57:30 +0200
|
||||||
|
Subject: [PATCH 1/4] IPsrcaddr: fix PROTO regex
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 7dbf65ff5..24406d296 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -188,7 +188,7 @@ IPADDR="\($OCTET\.\)\{3\}$OCTET"
|
||||||
|
SRCCLAUSE="src$WS$WS*\($IPADDR\)"
|
||||||
|
MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
|
||||||
|
METRICCLAUSE=".*\(metric$WS[^ ]\+\)"
|
||||||
|
-PROTOCLAUSE=".*\(proto$WS[^ ]\+\)"
|
||||||
|
+PROTOCLAUSE=".*\(proto$WS[^ ]\+\).*"
|
||||||
|
FINDIF=findif
|
||||||
|
|
||||||
|
# findif needs that to be set
|
||||||
|
|
||||||
|
From c70ba457851a401cb201cb87d23bdbc5f4fcd2b3 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 12 Oct 2022 14:00:30 +0200
|
||||||
|
Subject: [PATCH 2/4] IPsrcaddr: detect metric for main table only, and allow
|
||||||
|
specifying metric if necessary
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 18 +++++++++++++++++-
|
||||||
|
1 file changed, 17 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 24406d296..4745eb8a7 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -59,12 +59,14 @@ OCF_RESKEY_ipaddress_default=""
|
||||||
|
OCF_RESKEY_cidr_netmask_default=""
|
||||||
|
OCF_RESKEY_destination_default="0.0.0.0/0"
|
||||||
|
OCF_RESKEY_proto_default=""
|
||||||
|
+OCF_RESKEY_metric_default=""
|
||||||
|
OCF_RESKEY_table_default=""
|
||||||
|
|
||||||
|
: ${OCF_RESKEY_ipaddress=${OCF_RESKEY_ipaddress_default}}
|
||||||
|
: ${OCF_RESKEY_cidr_netmask=${OCF_RESKEY_cidr_netmask_default}}
|
||||||
|
: ${OCF_RESKEY_destination=${OCF_RESKEY_destination_default}}
|
||||||
|
: ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}}
|
||||||
|
+: ${OCF_RESKEY_metric=${OCF_RESKEY_metric_default}}
|
||||||
|
: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}}
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
@@ -143,6 +145,14 @@ Proto to match when finding network. E.g. "kernel".
|
||||||
|
<content type="string" default="${OCF_RESKEY_proto_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="metric">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Metric. Only needed if incorrect metric value is used.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Metric</shortdesc>
|
||||||
|
+<content type="string" default="${OCF_RESKEY_metric_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
<parameter name="table">
|
||||||
|
<longdesc lang="en">
|
||||||
|
Table to modify. E.g. "local".
|
||||||
|
@@ -548,8 +558,14 @@ rc=$?
|
||||||
|
|
||||||
|
INTERFACE=`echo $findif_out | awk '{print $1}'`
|
||||||
|
LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress`
|
||||||
|
-METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
|
||||||
|
[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"`
|
||||||
|
+if [ -n "$OCF_RESKEY_metric" ]; then
|
||||||
|
+ METRIC="metric $OCF_RESKEY_metric"
|
||||||
|
+elif [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ]; then
|
||||||
|
+ METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
|
||||||
|
+else
|
||||||
|
+ METRIC=""
|
||||||
|
+fi
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'`
|
||||||
|
|
||||||
|
|
||||||
|
From c514f12f7a19440f475938f2a4659e5e9667fa25 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 12 Oct 2022 14:01:26 +0200
|
||||||
|
Subject: [PATCH 3/4] IPsrcaddr: use scope host when using non-main tables
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 8 +++++++-
|
||||||
|
1 file changed, 7 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 4745eb8a7..926246008 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -279,8 +279,14 @@ srca_stop() {
|
||||||
|
|
||||||
|
[ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address"
|
||||||
|
|
||||||
|
+ if [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ]; then
|
||||||
|
+ SCOPE="link"
|
||||||
|
+ else
|
||||||
|
+ SCOPE="host"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
|
||||||
|
- OPTS="proto kernel scope link src $PRIMARY_IP"
|
||||||
|
+ OPTS="proto kernel scope $SCOPE src $PRIMARY_IP"
|
||||||
|
|
||||||
|
$IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \
|
||||||
|
errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
|
||||||
|
|
||||||
|
From 1f387ac8017b3eee23b41eadafd58ce21a29eb21 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 13 Oct 2022 13:11:28 +0200
|
||||||
|
Subject: [PATCH 4/4] IPsrcaddr: fix monitor/status for default route not being
|
||||||
|
equal to src IP before start, and change route src correctly in stop-action
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 5 +++--
|
||||||
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 926246008..1bd41a930 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -229,6 +229,7 @@ srca_read() {
|
||||||
|
|
||||||
|
[ -z "$SRCIP" ] && return 1
|
||||||
|
[ $SRCIP = $1 ] && return 0
|
||||||
|
+ [ "$__OCF_ACTION" = "monitor" ] || [ "$__OCF_ACTION" = "status" ] && [ "${ROUTE%% *}" = "default" ] && return 1
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -292,8 +293,8 @@ srca_stop() {
|
||||||
|
errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
|
||||||
|
|
||||||
|
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||||
|
- $CMDCHANGE $ROUTE_WO_SRC || \
|
||||||
|
- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC' failed"
|
||||||
|
+ $CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP || \
|
||||||
|
+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP' failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $?
|
@ -0,0 +1,25 @@
|
|||||||
|
From 97a05e0e662ed922c9ecd016b39ab90ee233d5c9 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 24 Nov 2022 10:36:56 +0100
|
||||||
|
Subject: [PATCH] mysql-common: return error in stop-action if kill fails to
|
||||||
|
stop the process, so the node can get fenced
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/mysql-common.sh | 4 ++++
|
||||||
|
1 file changed, 4 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||||
|
index 34e1c6748..8104019b0 100755
|
||||||
|
--- a/heartbeat/mysql-common.sh
|
||||||
|
+++ b/heartbeat/mysql-common.sh
|
||||||
|
@@ -318,6 +318,10 @@ mysql_common_stop()
|
||||||
|
if [ $? != $OCF_NOT_RUNNING ]; then
|
||||||
|
ocf_log info "MySQL failed to stop after ${shutdown_timeout}s using SIGTERM. Trying SIGKILL..."
|
||||||
|
/bin/kill -KILL $pid > /dev/null
|
||||||
|
+ mysql_common_status info $pid
|
||||||
|
+ if [ $? != $OCF_NOT_RUNNING ]; then
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
ocf_log info "MySQL stopped";
|
27
SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch
Normal file
27
SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
From 739e6ce9096facd6d37dffd524c79c961e3fae38 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Fri, 11 Nov 2022 14:17:39 +0100
|
||||||
|
Subject: [PATCH] vdo-vol: dont fail probe action when the underlying device
|
||||||
|
doesnt exist
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/vdo-vol | 6 ++++++
|
||||||
|
1 file changed, 6 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol
|
||||||
|
index 94822cb82..29bd7b8fd 100755
|
||||||
|
--- a/heartbeat/vdo-vol
|
||||||
|
+++ b/heartbeat/vdo-vol
|
||||||
|
@@ -148,6 +148,12 @@ vdo_monitor(){
|
||||||
|
MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}')
|
||||||
|
|
||||||
|
case "$status" in
|
||||||
|
+ *"ERROR - vdodumpconfig: Failed to make FileLayer from"*)
|
||||||
|
+ if ocf_is_probe; then
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
+ return $OCF_ERR_GENERIC
|
||||||
|
+ ;;
|
||||||
|
*"Device mapper status: not available"*)
|
||||||
|
return $OCF_NOT_RUNNING
|
||||||
|
;;
|
42
SOURCES/bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch
Normal file
42
SOURCES/bz2149970-lvmlockd-add-use_lvmlockd-if-missing.patch
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
From 2695888c983df331b0fee407a5c69c493a360313 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 30 Nov 2022 12:07:05 +0100
|
||||||
|
Subject: [PATCH] lvmlockd: add "use_lvmlockd = 1" if it's commented out or
|
||||||
|
missing
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/lvmlockd | 17 +++++++++++++----
|
||||||
|
1 file changed, 13 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd
|
||||||
|
index dc7bd2d7e..f4b299f28 100755
|
||||||
|
--- a/heartbeat/lvmlockd
|
||||||
|
+++ b/heartbeat/lvmlockd
|
||||||
|
@@ -180,14 +180,23 @@ setup_lvm_config()
|
||||||
|
lock_type=$(echo "$out" | cut -d'=' -f2)
|
||||||
|
|
||||||
|
if [ -z "$use_lvmlockd" ]; then
|
||||||
|
- ocf_exit_reason "\"use_lvmlockd\" not set in /etc/lvm/lvm.conf ..."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
+ ocf_log info "adding \"use_lvmlockd=1\" to /etc/lvm/lvm.conf ..."
|
||||||
|
+ cat >> /etc/lvm/lvm.conf << EOF
|
||||||
|
+
|
||||||
|
+global {
|
||||||
|
+ use_lvmlockd = 1
|
||||||
|
+}
|
||||||
|
+EOF
|
||||||
|
|
||||||
|
- if [ -n "$use_lvmlockd" ] && [ "$use_lvmlockd" != 1 ] ; then
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ ocf_exit_reason "unable to add \"use_lvmlockd=1\" to /etc/lvm/lvm.conf ..."
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+ elif [ "$use_lvmlockd" != 1 ] ; then
|
||||||
|
ocf_log info "setting \"use_lvmlockd=1\" in /etc/lvm/lvm.conf ..."
|
||||||
|
sed -i 's,^[[:blank:]]*use_lvmlockd[[:blank:]]*=.*,\ \ \ \ use_lvmlockd = 1,g' /etc/lvm/lvm.conf
|
||||||
|
fi
|
||||||
|
+
|
||||||
|
if [ -n "$lock_type" ] ; then
|
||||||
|
# locking_type was removed from config in v2.03
|
||||||
|
ocf_version_cmp "$(lvmconfig --version | awk '/LVM ver/ {sub(/\(.*/, "", $3); print $3}')" "2.03"
|
@ -0,0 +1,24 @@
|
|||||||
|
From e7a748d35fe56f2be727ecae1885a2f1366f41bf Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 15 Mar 2023 13:03:07 +0100
|
||||||
|
Subject: [PATCH] ethmonitor: dont log "Interface does not exist" for
|
||||||
|
monitor-action
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/ethmonitor | 3 +++
|
||||||
|
1 file changed, 3 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
|
||||||
|
index 451738a0b5..f9c9ef4bdd 100755
|
||||||
|
--- a/heartbeat/ethmonitor
|
||||||
|
+++ b/heartbeat/ethmonitor
|
||||||
|
@@ -271,6 +271,9 @@ if_init() {
|
||||||
|
validate-all)
|
||||||
|
ocf_exit_reason "Interface $NIC does not exist"
|
||||||
|
exit $OCF_ERR_CONFIGURED;;
|
||||||
|
+ monitor)
|
||||||
|
+ ocf_log debug "Interface $NIC does not exist"
|
||||||
|
+ ;;
|
||||||
|
*)
|
||||||
|
## It might be a bond interface which is temporarily not available, therefore we want to continue here
|
||||||
|
ocf_log warn "Interface $NIC does not exist"
|
@ -0,0 +1,137 @@
|
|||||||
|
From bf89ad06d5da5c05533c80a37a37c8dbbcd123aa Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 8 Dec 2022 15:40:07 +0100
|
||||||
|
Subject: [PATCH] galera/mpathpersist/sg_persist/IPsrcaddr: only check notify
|
||||||
|
and promotable when OCF_CHECK_LEVEL=10
|
||||||
|
|
||||||
|
Pacemaker has started running validate-all action before creating the
|
||||||
|
resource. It doesnt provide notify/promotable settings while doing so,
|
||||||
|
so this patch moves these checks to OCF_CHECK_LEVEL 10 and runs the
|
||||||
|
validate action at OCF_CHECK_LEVEL 10 in the start-action.
|
||||||
|
---
|
||||||
|
heartbeat/IPsrcaddr | 13 ++++++++-----
|
||||||
|
heartbeat/galera.in | 9 ++++++---
|
||||||
|
heartbeat/mpathpersist.in | 13 +++++++++----
|
||||||
|
heartbeat/sg_persist.in | 13 +++++++++----
|
||||||
|
4 files changed, 32 insertions(+), 16 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||||
|
index 1bd41a930..66e2ad8cd 100755
|
||||||
|
--- a/heartbeat/IPsrcaddr
|
||||||
|
+++ b/heartbeat/IPsrcaddr
|
||||||
|
@@ -510,11 +510,13 @@ srca_validate_all() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
# We should serve this IP address of course
|
||||||
|
- if ip_status "$ipaddress"; then
|
||||||
|
- :
|
||||||
|
- else
|
||||||
|
- ocf_exit_reason "We are not serving [$ipaddress], hence can not make it a preferred source address"
|
||||||
|
- return $OCF_ERR_INSTALLED
|
||||||
|
+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then
|
||||||
|
+ if ip_status "$ipaddress"; then
|
||||||
|
+ :
|
||||||
|
+ else
|
||||||
|
+ ocf_exit_reason "We are not serving [$ipaddress], hence can not make it a preferred source address"
|
||||||
|
+ return $OCF_ERR_INSTALLED
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
}
|
||||||
|
@@ -540,6 +542,7 @@ esac
|
||||||
|
|
||||||
|
ipaddress="$OCF_RESKEY_ipaddress"
|
||||||
|
|
||||||
|
+[ "$__OCF_ACTION" != "validate-all" ] && OCF_CHECK_LEVEL=10
|
||||||
|
srca_validate_all
|
||||||
|
rc=$?
|
||||||
|
if [ $rc -ne $OCF_SUCCESS ]; then
|
||||||
|
diff --git a/heartbeat/galera.in b/heartbeat/galera.in
|
||||||
|
index cd2fee7c0..6aed3e4b6 100755
|
||||||
|
--- a/heartbeat/galera.in
|
||||||
|
+++ b/heartbeat/galera.in
|
||||||
|
@@ -1015,9 +1015,11 @@ galera_stop()
|
||||||
|
|
||||||
|
galera_validate()
|
||||||
|
{
|
||||||
|
- if ! ocf_is_ms; then
|
||||||
|
- ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource."
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then
|
||||||
|
+ if ! ocf_is_ms; then
|
||||||
|
+ ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource."
|
||||||
|
+ return $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$OCF_RESKEY_wsrep_cluster_address" ]; then
|
||||||
|
@@ -1035,6 +1037,7 @@ case "$1" in
|
||||||
|
exit $OCF_SUCCESS;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
+[ "$__OCF_ACTION" = "start" ] && OCF_CHECK_LEVEL=10
|
||||||
|
galera_validate
|
||||||
|
rc=$?
|
||||||
|
LSB_STATUS_STOPPED=3
|
||||||
|
diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in
|
||||||
|
index 0e2c2a4a0..8a46b9930 100644
|
||||||
|
--- a/heartbeat/mpathpersist.in
|
||||||
|
+++ b/heartbeat/mpathpersist.in
|
||||||
|
@@ -630,10 +630,11 @@ mpathpersist_action_notify() {
|
||||||
|
}
|
||||||
|
|
||||||
|
mpathpersist_action_validate_all () {
|
||||||
|
-
|
||||||
|
- if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then
|
||||||
|
- ocf_log err "Master options misconfigured."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then
|
||||||
|
+ if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then
|
||||||
|
+ ocf_log err "Master options misconfigured."
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -659,6 +660,10 @@ case $ACTION in
|
||||||
|
start|promote|monitor|stop|demote)
|
||||||
|
ocf_log debug "$RESOURCE: starting action \"$ACTION\""
|
||||||
|
mpathpersist_init
|
||||||
|
+ if [ "$__OCF_ACTION" = "start" ]; then
|
||||||
|
+ OCF_CHECK_LEVEL=10
|
||||||
|
+ mpathpersist_action_validate_all
|
||||||
|
+ fi
|
||||||
|
mpathpersist_action_$ACTION
|
||||||
|
exit $?
|
||||||
|
;;
|
||||||
|
diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in
|
||||||
|
index 16048ea6f..620c02f4a 100644
|
||||||
|
--- a/heartbeat/sg_persist.in
|
||||||
|
+++ b/heartbeat/sg_persist.in
|
||||||
|
@@ -643,10 +643,11 @@ sg_persist_action_notify() {
|
||||||
|
}
|
||||||
|
|
||||||
|
sg_persist_action_validate_all () {
|
||||||
|
-
|
||||||
|
- if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then
|
||||||
|
- ocf_log err "Master options misconfigured."
|
||||||
|
- exit $OCF_ERR_CONFIGURED
|
||||||
|
+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then
|
||||||
|
+ if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then
|
||||||
|
+ ocf_log err "Master options misconfigured."
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -672,6 +673,10 @@ case $ACTION in
|
||||||
|
start|promote|monitor|stop|demote)
|
||||||
|
ocf_log debug "$RESOURCE: starting action \"$ACTION\""
|
||||||
|
sg_persist_init
|
||||||
|
+ if [ "$__OCF_ACTION" = "start" ]; then
|
||||||
|
+ OCF_CHECK_LEVEL=10
|
||||||
|
+ sg_persist_action_validate_all
|
||||||
|
+ fi
|
||||||
|
sg_persist_action_$ACTION
|
||||||
|
exit $?
|
||||||
|
;;
|
@ -0,0 +1,49 @@
|
|||||||
|
From 21666c5c842b8a6028699ee78db75a1d7134fad0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 4 Jan 2023 10:39:16 +0100
|
||||||
|
Subject: [PATCH 1/2] Filesystem: remove validate-all mountpoint warning as it
|
||||||
|
is auto-created during start-action if it doesnt exist
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 4 ----
|
||||||
|
1 file changed, 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 44270ad98..65088029e 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -851,10 +851,6 @@ Filesystem_monitor()
|
||||||
|
#
|
||||||
|
Filesystem_validate_all()
|
||||||
|
{
|
||||||
|
- if [ -n "$MOUNTPOINT" ] && [ ! -d "$MOUNTPOINT" ]; then
|
||||||
|
- ocf_log warn "Mountpoint $MOUNTPOINT does not exist"
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
# Check if the $FSTYPE is workable
|
||||||
|
# NOTE: Without inserting the $FSTYPE module, this step may be imprecise
|
||||||
|
# TODO: This is Linux specific crap.
|
||||||
|
|
||||||
|
From 8a7f40b6ab93d8d39230d864ab06a57ff48d6f1f Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 5 Jan 2023 13:09:48 +0100
|
||||||
|
Subject: [PATCH 2/2] CTDB: change public_addresses validate-all warning to
|
||||||
|
info
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/CTDB.in | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
|
||||||
|
index 46f56cfac..b4af66bc1 100755
|
||||||
|
--- a/heartbeat/CTDB.in
|
||||||
|
+++ b/heartbeat/CTDB.in
|
||||||
|
@@ -940,7 +940,7 @@ ctdb_validate() {
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then
|
||||||
|
- ocf_log warn "CTDB file '${OCF_RESKEY_ctdb_config_dir}/public_addresses' exists - CTDB will try to manage IP failover!"
|
||||||
|
+ ocf_log info "CTDB file '${OCF_RESKEY_ctdb_config_dir}/public_addresses' exists - CTDB will try to manage IP failover!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "$OCF_RESKEY_ctdb_config_dir/nodes" ]; then
|
@ -0,0 +1,68 @@
|
|||||||
|
--- a/heartbeat/pgsqlms 2023-01-04 14:42:36.093258702 +0100
|
||||||
|
+++ b/heartbeat/pgsqlms 2023-01-04 14:40:52.403994545 +0100
|
||||||
|
@@ -66,6 +66,7 @@
|
||||||
|
my $maxlag = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default;
|
||||||
|
my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'}
|
||||||
|
|| "$pgdata/recovery.conf.pcmk";
|
||||||
|
+my $ocf_check_level = $ENV{'OCF_CHECK_LEVEL'} || 0;
|
||||||
|
|
||||||
|
|
||||||
|
# PostgreSQL commands path
|
||||||
|
@@ -1304,26 +1305,28 @@
|
||||||
|
return $OCF_ERR_INSTALLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
- # check notify=true
|
||||||
|
- $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\
|
||||||
|
- --meta --get-parameter notify 2>/dev/null };
|
||||||
|
- chomp $ans;
|
||||||
|
- unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
|
||||||
|
- ocf_exit_reason(
|
||||||
|
- 'You must set meta parameter notify=true for your master resource'
|
||||||
|
- );
|
||||||
|
- return $OCF_ERR_INSTALLED;
|
||||||
|
- }
|
||||||
|
+ if ( $ocf_check_level == 10 ) {
|
||||||
|
+ # check notify=true
|
||||||
|
+ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\
|
||||||
|
+ --meta --get-parameter notify 2>/dev/null };
|
||||||
|
+ chomp $ans;
|
||||||
|
+ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
|
||||||
|
+ ocf_exit_reason(
|
||||||
|
+ 'You must set meta parameter notify=true for your "master" resource'
|
||||||
|
+ );
|
||||||
|
+ return $OCF_ERR_INSTALLED;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
- # check master-max=1
|
||||||
|
- unless (
|
||||||
|
- defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
|
||||||
|
- and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
|
||||||
|
- ) {
|
||||||
|
- ocf_exit_reason(
|
||||||
|
- 'You must set meta parameter master-max=1 for your master resource'
|
||||||
|
- );
|
||||||
|
- return $OCF_ERR_INSTALLED;
|
||||||
|
+ # check master-max=1
|
||||||
|
+ unless (
|
||||||
|
+ defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
|
||||||
|
+ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
|
||||||
|
+ ) {
|
||||||
|
+ ocf_exit_reason(
|
||||||
|
+ 'You must set meta parameter master-max=1 for your "master" resource'
|
||||||
|
+ );
|
||||||
|
+ return $OCF_ERR_INSTALLED;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( $PGVERNUM >= $PGVER_12 ) {
|
||||||
|
@@ -2242,6 +2245,9 @@
|
||||||
|
# Set current node name.
|
||||||
|
$nodename = ocf_local_nodename();
|
||||||
|
|
||||||
|
+if ( $__OCF_ACTION ne 'validate-all' ) {
|
||||||
|
+ $ocf_check_level = 10;
|
||||||
|
+}
|
||||||
|
$exit_code = pgsql_validate_all();
|
||||||
|
|
||||||
|
exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all';
|
187
SOURCES/bz2157873-4-exportfs-pgsql-validate-all-fixes.patch
Normal file
187
SOURCES/bz2157873-4-exportfs-pgsql-validate-all-fixes.patch
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
From 81f9e1a04dfd2274ccb906310b4f191485e342ab Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 11 Jan 2023 13:22:24 +0100
|
||||||
|
Subject: [PATCH 1/2] exportfs: move testdir() to start-action to avoid failing
|
||||||
|
during resource creation (validate-all) and make it create the directory if
|
||||||
|
it doesnt exist
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/exportfs | 27 +++++++++++++++------------
|
||||||
|
1 file changed, 15 insertions(+), 12 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/exportfs b/heartbeat/exportfs
|
||||||
|
index c10777fa9..2307a9e67 100755
|
||||||
|
--- a/heartbeat/exportfs
|
||||||
|
+++ b/heartbeat/exportfs
|
||||||
|
@@ -301,6 +301,16 @@ exportfs_monitor ()
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
+testdir() {
|
||||||
|
+ if [ ! -d $1 ]; then
|
||||||
|
+ mkdir -p "$1"
|
||||||
|
+ if [ $? -ne 0 ]; then
|
||||||
|
+ ocf_exit_reason "Unable to create directory $1"
|
||||||
|
+ return 1
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+ return 0
|
||||||
|
+}
|
||||||
|
export_one() {
|
||||||
|
local dir=$1
|
||||||
|
local opts sep
|
||||||
|
@@ -331,6 +341,10 @@ export_one() {
|
||||||
|
}
|
||||||
|
exportfs_start ()
|
||||||
|
{
|
||||||
|
+ if ! forall testdir; then
|
||||||
|
+ return $OCF_ERR_INSTALLED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
if exportfs_monitor; then
|
||||||
|
ocf_log debug "already exported"
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
@@ -428,14 +442,6 @@ exportfs_stop ()
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
-testdir() {
|
||||||
|
- if [ ! -d $1 ]; then
|
||||||
|
- ocf_is_probe ||
|
||||||
|
- ocf_log err "$1 does not exist or is not a directory"
|
||||||
|
- return 1
|
||||||
|
- fi
|
||||||
|
- return 0
|
||||||
|
-}
|
||||||
|
exportfs_validate_all ()
|
||||||
|
{
|
||||||
|
if echo "$OCF_RESKEY_fsid" | grep -q -F ','; then
|
||||||
|
@@ -447,9 +453,6 @@ exportfs_validate_all ()
|
||||||
|
ocf_exit_reason "use integer fsid when exporting multiple directories"
|
||||||
|
return $OCF_ERR_CONFIGURED
|
||||||
|
fi
|
||||||
|
- if ! forall testdir; then
|
||||||
|
- return $OCF_ERR_INSTALLED
|
||||||
|
- fi
|
||||||
|
}
|
||||||
|
|
||||||
|
for dir in $OCF_RESKEY_directory; do
|
||||||
|
@@ -466,7 +469,7 @@ for dir in $OCF_RESKEY_directory; do
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
case "$__OCF_ACTION" in
|
||||||
|
- stop|monitor)
|
||||||
|
+ stop|monitor|validate-all)
|
||||||
|
canonicalized_dir="$dir"
|
||||||
|
ocf_log debug "$dir does not exist"
|
||||||
|
;;
|
||||||
|
|
||||||
|
From 8ee41af82cda35149f8e0cfede6a8ddef3e221e1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Wed, 11 Jan 2023 13:25:57 +0100
|
||||||
|
Subject: [PATCH 2/2] pgsql: dont run promotable and file checks that could be
|
||||||
|
on shared storage during validate-all action
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/pgsql | 53 +++++++++++++++++++++++++++++--------------------
|
||||||
|
1 file changed, 32 insertions(+), 21 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/pgsql b/heartbeat/pgsql
|
||||||
|
index aa8a13a84..532063ac5 100755
|
||||||
|
--- a/heartbeat/pgsql
|
||||||
|
+++ b/heartbeat/pgsql
|
||||||
|
@@ -1835,7 +1835,7 @@ check_config() {
|
||||||
|
|
||||||
|
if [ ! -f "$1" ]; then
|
||||||
|
if ocf_is_probe; then
|
||||||
|
- ocf_log info "Configuration file is $1 not readable during probe."
|
||||||
|
+ ocf_log info "Unable to read $1 during probe."
|
||||||
|
rc=1
|
||||||
|
else
|
||||||
|
ocf_exit_reason "Configuration file $1 doesn't exist"
|
||||||
|
@@ -1846,8 +1846,7 @@ check_config() {
|
||||||
|
return $rc
|
||||||
|
}
|
||||||
|
|
||||||
|
-# Validate most critical parameters
|
||||||
|
-pgsql_validate_all() {
|
||||||
|
+validate_ocf_check_level_10() {
|
||||||
|
local version
|
||||||
|
local check_config_rc
|
||||||
|
local rep_mode_string
|
||||||
|
@@ -1883,12 +1882,6 @@ pgsql_validate_all() {
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- getent passwd $OCF_RESKEY_pgdba >/dev/null 2>&1
|
||||||
|
- if [ ! $? -eq 0 ]; then
|
||||||
|
- ocf_exit_reason "User $OCF_RESKEY_pgdba doesn't exist";
|
||||||
|
- return $OCF_ERR_INSTALLED;
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
if ocf_is_probe; then
|
||||||
|
ocf_log info "Don't check $OCF_RESKEY_pgdata during probe"
|
||||||
|
else
|
||||||
|
@@ -1898,18 +1891,6 @@ pgsql_validate_all() {
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- if [ -n "$OCF_RESKEY_monitor_user" -a ! -n "$OCF_RESKEY_monitor_password" ]
|
||||||
|
- then
|
||||||
|
- ocf_exit_reason "monitor password can't be empty"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
- if [ ! -n "$OCF_RESKEY_monitor_user" -a -n "$OCF_RESKEY_monitor_password" ]
|
||||||
|
- then
|
||||||
|
- ocf_exit_reason "monitor_user has to be set if monitor_password is set"
|
||||||
|
- return $OCF_ERR_CONFIGURED
|
||||||
|
- fi
|
||||||
|
-
|
||||||
|
if is_replication || [ "$OCF_RESKEY_rep_mode" = "slave" ]; then
|
||||||
|
if [ `printf "$version\n9.1" | sort -n | head -1` != "9.1" ]; then
|
||||||
|
ocf_exit_reason "Replication mode needs PostgreSQL 9.1 or higher."
|
||||||
|
@@ -2027,6 +2008,35 @@ pgsql_validate_all() {
|
||||||
|
return $OCF_SUCCESS
|
||||||
|
}
|
||||||
|
|
||||||
|
+# Validate most critical parameters
|
||||||
|
+pgsql_validate_all() {
|
||||||
|
+ local rc
|
||||||
|
+
|
||||||
|
+ getent passwd $OCF_RESKEY_pgdba >/dev/null 2>&1
|
||||||
|
+ if [ ! $? -eq 0 ]; then
|
||||||
|
+ ocf_exit_reason "User $OCF_RESKEY_pgdba doesn't exist";
|
||||||
|
+ return $OCF_ERR_INSTALLED;
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "$OCF_RESKEY_monitor_user" ] && [ -z "$OCF_RESKEY_monitor_password" ]; then
|
||||||
|
+ ocf_exit_reason "monitor password can't be empty"
|
||||||
|
+ return $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -z "$OCF_RESKEY_monitor_user" ] && [ -n "$OCF_RESKEY_monitor_password" ]; then
|
||||||
|
+ ocf_exit_reason "monitor_user has to be set if monitor_password is set"
|
||||||
|
+ return $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ "$OCF_CHECK_LEVEL" -eq 10 ]; then
|
||||||
|
+ validate_ocf_check_level_10
|
||||||
|
+ rc=$?
|
||||||
|
+ [ $rc -ne "$OCF_SUCCESS" ] && exit $rc
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
|
||||||
|
#
|
||||||
|
# Check if we need to create a log file
|
||||||
|
@@ -2163,6 +2173,7 @@ case "$1" in
|
||||||
|
exit $OCF_SUCCESS;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
+[ "$__OCF_ACTION" != "validate-all" ] && OCF_CHECK_LEVEL=10
|
||||||
|
pgsql_validate_all
|
||||||
|
rc=$?
|
||||||
|
|
@ -0,0 +1,23 @@
|
|||||||
|
--- ClusterLabs-resource-agents-fd0720f7/heartbeat/pgsqlms 2023-01-16 10:54:30.897188238 +0100
|
||||||
|
+++ pgsqlms 2023-01-10 14:21:19.281286242 +0100
|
||||||
|
@@ -1351,12 +1351,14 @@
|
||||||
|
return $OCF_ERR_ARGS;
|
||||||
|
}
|
||||||
|
|
||||||
|
- $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts};
|
||||||
|
- unless ($guc =~ /\bapplication_name='?$nodename'?\b/) {
|
||||||
|
- ocf_exit_reason(
|
||||||
|
- q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }.
|
||||||
|
- q{It is currently set to '%s'}, $nodename, $guc );
|
||||||
|
- return $OCF_ERR_ARGS;
|
||||||
|
+ if ( $ocf_check_level == 10 ) {
|
||||||
|
+ $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts};
|
||||||
|
+ unless ($guc =~ /\bapplication_name='?$nodename'?\b/) {
|
||||||
|
+ ocf_exit_reason(
|
||||||
|
+ q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }.
|
||||||
|
+ q{It is currently set to '%s'}, $nodename, $guc );
|
||||||
|
+ return $OCF_ERR_ARGS;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
@ -0,0 +1,54 @@
|
|||||||
|
From 81bb58b05d2ddabd17fe31af39f0e857e61db3c9 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 28 Mar 2023 16:53:45 +0200
|
||||||
|
Subject: [PATCH] azure-events*: fix for no "Transition Summary" for Pacemaker
|
||||||
|
2.1+
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/azure-events-az.in | 8 ++++----
|
||||||
|
heartbeat/azure-events.in | 6 +++---
|
||||||
|
2 files changed, 7 insertions(+), 7 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||||
|
index 59d0953061..67c02c6422 100644
|
||||||
|
--- a/heartbeat/azure-events-az.in
|
||||||
|
+++ b/heartbeat/azure-events-az.in
|
||||||
|
@@ -311,10 +311,10 @@ class clusterHelper:
|
||||||
|
summary = clusterHelper._exec("crm_simulate", "-Ls")
|
||||||
|
if not summary:
|
||||||
|
ocf.logger.warning("transitionSummary: could not load transition summary")
|
||||||
|
- return False
|
||||||
|
+ return ""
|
||||||
|
if summary.find("Transition Summary:") < 0:
|
||||||
|
- ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary)
|
||||||
|
- return False
|
||||||
|
+ ocf.logger.debug("transitionSummary: no transactions: %s" % summary)
|
||||||
|
+ return ""
|
||||||
|
summary = summary.split("Transition Summary:")[1]
|
||||||
|
ret = summary.split("\n").pop(0)
|
||||||
|
|
||||||
|
@@ -768,4 +768,4 @@ def main():
|
||||||
|
agent.run()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
- main()
|
||||||
|
\ No newline at end of file
|
||||||
|
+ main()
|
||||||
|
diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in
|
||||||
|
index 66e129060a..5ad658df93 100644
|
||||||
|
--- a/heartbeat/azure-events.in
|
||||||
|
+++ b/heartbeat/azure-events.in
|
||||||
|
@@ -310,10 +310,10 @@ class clusterHelper:
|
||||||
|
summary = clusterHelper._exec("crm_simulate", "-Ls")
|
||||||
|
if not summary:
|
||||||
|
ocf.logger.warning("transitionSummary: could not load transition summary")
|
||||||
|
- return False
|
||||||
|
+ return ""
|
||||||
|
if summary.find("Transition Summary:") < 0:
|
||||||
|
- ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary)
|
||||||
|
- return False
|
||||||
|
+ ocf.logger.debug("transitionSummary: no transactions: %s" % summary)
|
||||||
|
+ return ""
|
||||||
|
summary = summary.split("Transition Summary:")[1]
|
||||||
|
ret = summary.split("\n").pop(0)
|
||||||
|
|
77
SOURCES/bz2181019-azure-events-2-improve-logic.patch
Normal file
77
SOURCES/bz2181019-azure-events-2-improve-logic.patch
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
From ff53e5c8d6867e580506d132fba6fcf6aa46b804 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Peter Varkoly <varkoly@suse.com>
|
||||||
|
Date: Sat, 29 Apr 2023 08:09:11 +0200
|
||||||
|
Subject: [PATCH] Use -LS instead of -Ls as parameter to get the Transition
|
||||||
|
Summary
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/azure-events-az.in | 9 +++++----
|
||||||
|
heartbeat/azure-events.in | 9 +++++----
|
||||||
|
2 files changed, 10 insertions(+), 8 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||||
|
index 67c02c642..46d4d1f3d 100644
|
||||||
|
--- a/heartbeat/azure-events-az.in
|
||||||
|
+++ b/heartbeat/azure-events-az.in
|
||||||
|
@@ -298,7 +298,7 @@ class clusterHelper:
|
||||||
|
Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby)
|
||||||
|
"""
|
||||||
|
# <tniek> Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node?
|
||||||
|
- # # crm_simulate -Ls
|
||||||
|
+ # # crm_simulate -LS
|
||||||
|
# Transition Summary:
|
||||||
|
# * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1)
|
||||||
|
# * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0)
|
||||||
|
@@ -308,15 +308,16 @@ class clusterHelper:
|
||||||
|
# Transition Summary:
|
||||||
|
ocf.logger.debug("transitionSummary: begin")
|
||||||
|
|
||||||
|
- summary = clusterHelper._exec("crm_simulate", "-Ls")
|
||||||
|
+ summary = clusterHelper._exec("crm_simulate", "-LS")
|
||||||
|
if not summary:
|
||||||
|
ocf.logger.warning("transitionSummary: could not load transition summary")
|
||||||
|
return ""
|
||||||
|
if summary.find("Transition Summary:") < 0:
|
||||||
|
ocf.logger.debug("transitionSummary: no transactions: %s" % summary)
|
||||||
|
return ""
|
||||||
|
- summary = summary.split("Transition Summary:")[1]
|
||||||
|
- ret = summary.split("\n").pop(0)
|
||||||
|
+ j=summary.find('Transition Summary:') + len('Transition Summary:')
|
||||||
|
+ l=summary.lower().find('executing cluster transition:')
|
||||||
|
+ ret = list(filter(str.strip, summary[j:l].split("\n")))
|
||||||
|
|
||||||
|
ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret))
|
||||||
|
return ret
|
||||||
|
diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in
|
||||||
|
index 5ad658df9..90acaba62 100644
|
||||||
|
--- a/heartbeat/azure-events.in
|
||||||
|
+++ b/heartbeat/azure-events.in
|
||||||
|
@@ -297,7 +297,7 @@ class clusterHelper:
|
||||||
|
Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby)
|
||||||
|
"""
|
||||||
|
# <tniek> Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node?
|
||||||
|
- # # crm_simulate -Ls
|
||||||
|
+ # # crm_simulate -LS
|
||||||
|
# Transition Summary:
|
||||||
|
# * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1)
|
||||||
|
# * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0)
|
||||||
|
@@ -307,15 +307,16 @@ class clusterHelper:
|
||||||
|
# Transition Summary:
|
||||||
|
ocf.logger.debug("transitionSummary: begin")
|
||||||
|
|
||||||
|
- summary = clusterHelper._exec("crm_simulate", "-Ls")
|
||||||
|
+ summary = clusterHelper._exec("crm_simulate", "-LS")
|
||||||
|
if not summary:
|
||||||
|
ocf.logger.warning("transitionSummary: could not load transition summary")
|
||||||
|
return ""
|
||||||
|
if summary.find("Transition Summary:") < 0:
|
||||||
|
ocf.logger.debug("transitionSummary: no transactions: %s" % summary)
|
||||||
|
return ""
|
||||||
|
- summary = summary.split("Transition Summary:")[1]
|
||||||
|
- ret = summary.split("\n").pop(0)
|
||||||
|
+ j=summary.find('Transition Summary:') + len('Transition Summary:')
|
||||||
|
+ l=summary.lower().find('executing cluster transition:')
|
||||||
|
+ ret = list(filter(str.strip, summary[j:l].split("\n")))
|
||||||
|
|
||||||
|
ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret))
|
||||||
|
return ret
|
@ -0,0 +1,23 @@
|
|||||||
|
From b02b06c437b1d8cb1dcfe8ace47c2efc4a0e476c Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 30 Mar 2023 14:44:41 +0200
|
||||||
|
Subject: [PATCH] Filesystem: fail if AWS efs-utils not installed when
|
||||||
|
fstype=efs
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 65088029ec..50c68f115b 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -456,7 +456,7 @@ fstype_supported()
|
||||||
|
# System (EFS)
|
||||||
|
case "$FSTYPE" in
|
||||||
|
fuse.*|glusterfs|rozofs) support="fuse";;
|
||||||
|
- efs) support="nfs4";;
|
||||||
|
+ efs) check_binary "mount.efs"; support="nfs4";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ "$support" != "$FSTYPE" ]; then
|
125
SOURCES/bz2189243-Filesystem-1-improve-stop-action.patch
Normal file
125
SOURCES/bz2189243-Filesystem-1-improve-stop-action.patch
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
From 48ed6e6d6510f42743e4463970e27f05637e4982 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 4 Jul 2023 14:40:19 +0200
|
||||||
|
Subject: [PATCH] Filesystem: improve stop-action and allow setting term/kill
|
||||||
|
signals and signal_delay for large filesystems
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 80 ++++++++++++++++++++++++++++++++++++++------
|
||||||
|
1 file changed, 70 insertions(+), 10 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index 65a9dffb5..fe608ebfd 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -71,6 +71,9 @@ OCF_RESKEY_run_fsck_default="auto"
|
||||||
|
OCF_RESKEY_fast_stop_default="no"
|
||||||
|
OCF_RESKEY_force_clones_default="false"
|
||||||
|
OCF_RESKEY_force_unmount_default="true"
|
||||||
|
+OCF_RESKEY_term_signals_default="TERM"
|
||||||
|
+OCF_RESKEY_kill_signals_default="KILL"
|
||||||
|
+OCF_RESKEY_signal_delay_default="1"
|
||||||
|
|
||||||
|
# RHEL specific defaults
|
||||||
|
if is_redhat_based; then
|
||||||
|
@@ -104,6 +107,9 @@ if [ -z "${OCF_RESKEY_fast_stop}" ]; then
|
||||||
|
fi
|
||||||
|
: ${OCF_RESKEY_force_clones=${OCF_RESKEY_force_clones_default}}
|
||||||
|
: ${OCF_RESKEY_force_unmount=${OCF_RESKEY_force_unmount_default}}
|
||||||
|
+: ${OCF_RESKEY_term_signals=${OCF_RESKEY_term_signals_default}}
|
||||||
|
+: ${OCF_RESKEY_kill_signals=${OCF_RESKEY_kill_signals_default}}
|
||||||
|
+: ${OCF_RESKEY_signal_delay=${OCF_RESKEY_signal_delay_default}}
|
||||||
|
|
||||||
|
# Variables used by multiple methods
|
||||||
|
HOSTOS=$(uname)
|
||||||
|
@@ -266,6 +272,30 @@ block if unresponsive nfs mounts are in use on the system.
|
||||||
|
<content type="boolean" default="${OCF_RESKEY_force_unmount_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
+<parameter name="term_signals">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action</shortdesc>
|
||||||
|
+<content type="boolean" default="${OCF_RESKEY_term_signals_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="kill_signals">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action</shortdesc>
|
||||||
|
+<content type="boolean" default="${OCF_RESKEY_kill_signals_default}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="signal_delay">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+How many seconds to wait after sending term/kill signals to processes in stop-action.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">How many seconds to wait after sending term/kill signals to processes in stop-action</shortdesc>
|
||||||
|
+<content type="boolean" default="${OCF_RESKEY_kill_signal_delay}" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<actions>
|
||||||
|
@@ -663,19 +693,49 @@ try_umount() {
|
||||||
|
}
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
}
|
||||||
|
-fs_stop() {
|
||||||
|
- local SUB="$1" timeout=$2 sig cnt
|
||||||
|
- for sig in TERM KILL; do
|
||||||
|
- cnt=$((timeout/2)) # try half time with TERM
|
||||||
|
- while [ $cnt -gt 0 ]; do
|
||||||
|
- try_umount "$SUB" &&
|
||||||
|
- return $OCF_SUCCESS
|
||||||
|
- ocf_exit_reason "Couldn't unmount $SUB; trying cleanup with $sig"
|
||||||
|
+timeout_child() {
|
||||||
|
+ local pid="$1" timeout="$2" killer ret
|
||||||
|
+
|
||||||
|
+ # start job in the background that will KILL the given process after timeout expires
|
||||||
|
+ sleep $timeout && kill -s KILL $pid &
|
||||||
|
+ killer=$!
|
||||||
|
+
|
||||||
|
+ # block until the child process either exits on its own or gets killed by the above killer pipeline
|
||||||
|
+ wait $pid
|
||||||
|
+ ret=$?
|
||||||
|
+
|
||||||
|
+ # ret would be 127 + child exit code if the timeout expired
|
||||||
|
+ [ $ret -lt 128 ] && kill -s KILL $killer
|
||||||
|
+ return $ret
|
||||||
|
+}
|
||||||
|
+fs_stop_loop() {
|
||||||
|
+ local SUB="$1" signals="$2" sig
|
||||||
|
+ while true; do
|
||||||
|
+ for sig in $signals; do
|
||||||
|
signal_processes "$SUB" $sig
|
||||||
|
- cnt=$((cnt-1))
|
||||||
|
- sleep 1
|
||||||
|
done
|
||||||
|
+ sleep $OCF_RESKEY_signal_delay
|
||||||
|
+ try_umount "$SUB" && return $OCF_SUCCESS
|
||||||
|
done
|
||||||
|
+}
|
||||||
|
+fs_stop() {
|
||||||
|
+ local SUB="$1" timeout=$2 grace_time ret
|
||||||
|
+ grace_time=$((timeout/2))
|
||||||
|
+
|
||||||
|
+ # try gracefully terminating processes for up to half of the configured timeout
|
||||||
|
+ fs_stop_loop "$SUB" "$OCF_RESKEY_term_signals" &
|
||||||
|
+ timeout_child $! $grace_time
|
||||||
|
+ ret=$?
|
||||||
|
+ [ $ret -eq $OCF_SUCCESS ] && return $ret
|
||||||
|
+
|
||||||
|
+ # try killing them for the rest of the timeout
|
||||||
|
+ fs_stop_loop "$SUB" "$OCF_RESKEY_kill_signals" &
|
||||||
|
+ timeout_child $! $grace_time
|
||||||
|
+ ret=$?
|
||||||
|
+ [ $ret -eq $OCF_SUCCESS ] && return $ret
|
||||||
|
+
|
||||||
|
+ # timeout expired
|
||||||
|
+ ocf_exit_reason "Couldn't unmount $SUB within given timeout"
|
||||||
|
return $OCF_ERR_GENERIC
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,49 @@
|
|||||||
|
From 7056635f3f94c1bcaaa5ed5563dc3b0e9f6749e0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Tue, 18 Jul 2023 14:12:27 +0200
|
||||||
|
Subject: [PATCH] Filesystem: dont use boolean type for non-boolean parameters
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 8 ++++----
|
||||||
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index ee55a4843..b9aae8d50 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -269,7 +269,7 @@ fuser cli tool. fuser is known to perform operations that can potentially
|
||||||
|
block if unresponsive nfs mounts are in use on the system.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Kill processes before unmount</shortdesc>
|
||||||
|
-<content type="boolean" default="${OCF_RESKEY_force_unmount_default}" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_force_unmount_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
<parameter name="term_signals">
|
||||||
|
@@ -277,7 +277,7 @@ block if unresponsive nfs mounts are in use on the system.
|
||||||
|
Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Signals (names or numbers, whitespace separated) to send processes during graceful termination phase in stop-action</shortdesc>
|
||||||
|
-<content type="boolean" default="${OCF_RESKEY_term_signals_default}" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_term_signals_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
<parameter name="kill_signals">
|
||||||
|
@@ -285,7 +285,7 @@ Signals (names or numbers, whitespace separated) to send processes during gracef
|
||||||
|
Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">Signals (names or numbers, whitespace separated) to send processes during forceful killing phase in stop-action</shortdesc>
|
||||||
|
-<content type="boolean" default="${OCF_RESKEY_kill_signals_default}" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_kill_signals_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
<parameter name="signal_delay">
|
||||||
|
@@ -293,7 +293,7 @@ Signals (names or numbers, whitespace separated) to send processes during forcef
|
||||||
|
How many seconds to wait after sending term/kill signals to processes in stop-action.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">How many seconds to wait after sending term/kill signals to processes in stop-action</shortdesc>
|
||||||
|
-<content type="boolean" default="${OCF_RESKEY_kill_signal_delay}" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_kill_signal_delay}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
</parameters>
|
@ -0,0 +1,23 @@
|
|||||||
|
From f779fad52e5f515ca81218da6098398bdecac286 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||||
|
Date: Thu, 20 Jul 2023 10:18:12 +0200
|
||||||
|
Subject: [PATCH] Filesystem: fix incorrect variable name for signal_delay
|
||||||
|
default in metadata
|
||||||
|
|
||||||
|
---
|
||||||
|
heartbeat/Filesystem | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||||
|
index b9aae8d50..066562891 100755
|
||||||
|
--- a/heartbeat/Filesystem
|
||||||
|
+++ b/heartbeat/Filesystem
|
||||||
|
@@ -293,7 +293,7 @@ Signals (names or numbers, whitespace separated) to send processes during forcef
|
||||||
|
How many seconds to wait after sending term/kill signals to processes in stop-action.
|
||||||
|
</longdesc>
|
||||||
|
<shortdesc lang="en">How many seconds to wait after sending term/kill signals to processes in stop-action</shortdesc>
|
||||||
|
-<content type="string" default="${OCF_RESKEY_kill_signal_delay}" />
|
||||||
|
+<content type="string" default="${OCF_RESKEY_signal_delay_default}" />
|
||||||
|
</parameter>
|
||||||
|
|
||||||
|
</parameters>
|
28
SOURCES/gcp-configure-skip-bundled-lib-checks.patch
Normal file
28
SOURCES/gcp-configure-skip-bundled-lib-checks.patch
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
--- ClusterLabs-resource-agents-55a4e2c9/configure.ac 2021-08-19 09:37:57.000000000 +0200
|
||||||
|
+++ ClusterLabs-resource-agents-55a4e2c9/configure.ac.modif 2021-09-02 13:12:26.336044699 +0200
|
||||||
|
@@ -522,25 +522,12 @@
|
||||||
|
AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1)
|
||||||
|
|
||||||
|
BUILD_GCP_PD_MOVE=1
|
||||||
|
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
- BUILD_GCP_PD_MOVE=0
|
||||||
|
- AC_MSG_WARN("Not building gcp-pd-move")
|
||||||
|
-fi
|
||||||
|
AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1)
|
||||||
|
|
||||||
|
BUILD_GCP_VPC_MOVE_ROUTE=1
|
||||||
|
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || \
|
||||||
|
- test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
- BUILD_GCP_VPC_MOVE_ROUTE=0
|
||||||
|
- AC_MSG_WARN("Not building gcp-vpc-move-route")
|
||||||
|
-fi
|
||||||
|
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1)
|
||||||
|
|
||||||
|
BUILD_GCP_VPC_MOVE_VIP=1
|
||||||
|
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||||
|
- BUILD_GCP_VPC_MOVE_VIP=0
|
||||||
|
- AC_MSG_WARN("Not building gcp-vpc-move-vip")
|
||||||
|
-fi
|
||||||
|
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1)
|
||||||
|
|
||||||
|
AC_PATH_PROGS(ROUTE, route)
|
766
SOURCES/nova-compute-wait-NovaEvacuate.patch
Normal file
766
SOURCES/nova-compute-wait-NovaEvacuate.patch
Normal file
@ -0,0 +1,766 @@
|
|||||||
|
diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||||
|
--- a/doc/man/Makefile.am 2021-08-25 09:31:14.033615965 +0200
|
||||||
|
+++ b/doc/man/Makefile.am 2021-08-24 17:59:40.679372762 +0200
|
||||||
|
@@ -97,6 +97,8 @@
|
||||||
|
ocf_heartbeat_ManageRAID.7 \
|
||||||
|
ocf_heartbeat_ManageVE.7 \
|
||||||
|
ocf_heartbeat_NodeUtilization.7 \
|
||||||
|
+ ocf_heartbeat_nova-compute-wait.7 \
|
||||||
|
+ ocf_heartbeat_NovaEvacuate.7 \
|
||||||
|
ocf_heartbeat_Pure-FTPd.7 \
|
||||||
|
ocf_heartbeat_Raid1.7 \
|
||||||
|
ocf_heartbeat_Route.7 \
|
||||||
|
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||||
|
--- a/heartbeat/Makefile.am 2021-08-25 09:31:14.034615967 +0200
|
||||||
|
+++ b/heartbeat/Makefile.am 2021-08-24 17:59:40.679372762 +0200
|
||||||
|
@@ -29,6 +29,8 @@
|
||||||
|
|
||||||
|
ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
|
||||||
|
|
||||||
|
+ospdir = $(OCF_RA_DIR_PREFIX)/openstack
|
||||||
|
+
|
||||||
|
dtddir = $(datadir)/$(PACKAGE_NAME)
|
||||||
|
dtd_DATA = ra-api-1.dtd metadata.rng
|
||||||
|
|
||||||
|
@@ -50,6 +52,9 @@
|
||||||
|
send_ua_SOURCES = send_ua.c IPv6addr_utils.c
|
||||||
|
send_ua_LDADD = $(LIBNETLIBS)
|
||||||
|
|
||||||
|
+osp_SCRIPTS = nova-compute-wait \
|
||||||
|
+ NovaEvacuate
|
||||||
|
+
|
||||||
|
ocf_SCRIPTS = AoEtarget \
|
||||||
|
AudibleAlarm \
|
||||||
|
ClusterMon \
|
||||||
|
diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
|
||||||
|
--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100
|
||||||
|
+++ b/heartbeat/nova-compute-wait 2021-08-24 17:59:40.678372759 +0200
|
||||||
|
@@ -0,0 +1,317 @@
|
||||||
|
+#!/bin/sh
|
||||||
|
+# Copyright 2015 Red Hat, Inc.
|
||||||
|
+#
|
||||||
|
+# Description: Manages compute daemons
|
||||||
|
+#
|
||||||
|
+# Authors: Andrew Beekhof
|
||||||
|
+#
|
||||||
|
+# Support: openstack@lists.openstack.org
|
||||||
|
+# License: Apache Software License (ASL) 2.0
|
||||||
|
+#
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+# Initialization:
|
||||||
|
+
|
||||||
|
+###
|
||||||
|
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
+###
|
||||||
|
+
|
||||||
|
+: ${__OCF_ACTION=$1}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+meta_data() {
|
||||||
|
+ cat <<END
|
||||||
|
+<?xml version="1.0"?>
|
||||||
|
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||||
|
+<resource-agent name="nova-compute-wait" version="1.0">
|
||||||
|
+<version>1.0</version>
|
||||||
|
+
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+OpenStack Nova Compute Server.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">OpenStack Nova Compute Server</shortdesc>
|
||||||
|
+
|
||||||
|
+<parameters>
|
||||||
|
+
|
||||||
|
+<parameter name="auth_url" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="username" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="password" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="tenant_name" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="domain" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">DNS domain</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="endpoint_type" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="no_shared_storage" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||||
|
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||||
|
+<content type="boolean" default="0" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="evacuation_delay" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+How long to wait for nova to finish evacuating instances elsewhere
|
||||||
|
+before starting nova-compute. Only used when the agent detects
|
||||||
|
+evacuations might be in progress.
|
||||||
|
+
|
||||||
|
+You may need to increase the start timeout when increasing this value.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Delay to allow evacuations time to complete</shortdesc>
|
||||||
|
+<content type="integer" default="120" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+</parameters>
|
||||||
|
+
|
||||||
|
+<actions>
|
||||||
|
+<action name="start" timeout="600" />
|
||||||
|
+<action name="stop" timeout="300" />
|
||||||
|
+<action name="monitor" timeout="20" interval="10" depth="0"/>
|
||||||
|
+<action name="validate-all" timeout="20" />
|
||||||
|
+<action name="meta-data" timeout="5" />
|
||||||
|
+</actions>
|
||||||
|
+</resource-agent>
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+# don't exit on TERM, to test that lrmd makes sure that we do exit
|
||||||
|
+trap sigterm_handler TERM
|
||||||
|
+sigterm_handler() {
|
||||||
|
+ ocf_log info "They use TERM to bring us down. No such luck."
|
||||||
|
+ return
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_usage() {
|
||||||
|
+ cat <<END
|
||||||
|
+usage: $0 {start|stop|monitor|validate-all|meta-data}
|
||||||
|
+
|
||||||
|
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_start() {
|
||||||
|
+ build_unfence_overlay
|
||||||
|
+
|
||||||
|
+ state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
|
||||||
|
+ if [ "x$state" = x ]; then
|
||||||
|
+ : never been fenced
|
||||||
|
+
|
||||||
|
+ elif [ "x$state" = xno ]; then
|
||||||
|
+ : has been evacuated, however it could have been 1s ago
|
||||||
|
+ ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
|
||||||
|
+ sleep ${OCF_RESKEY_evacuation_delay}
|
||||||
|
+
|
||||||
|
+ else
|
||||||
|
+ while [ "x$state" != "xno" ]; do
|
||||||
|
+ ocf_log info "Waiting for pending evacuations from ${NOVA_HOST}"
|
||||||
|
+ state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
|
||||||
|
+ sleep 5
|
||||||
|
+ done
|
||||||
|
+
|
||||||
|
+ ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
|
||||||
|
+ sleep ${OCF_RESKEY_evacuation_delay}
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ touch "$statefile"
|
||||||
|
+
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_stop() {
|
||||||
|
+ rm -f "$statefile"
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_monitor() {
|
||||||
|
+ if [ ! -f "$statefile" ]; then
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_notify() {
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+build_unfence_overlay() {
|
||||||
|
+ fence_options=""
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
|
||||||
|
+ candidates=$(/usr/sbin/stonith_admin -l ${NOVA_HOST})
|
||||||
|
+ for candidate in ${candidates}; do
|
||||||
|
+ pcs stonith show $d | grep -q fence_compute
|
||||||
|
+ if [ $? = 0 ]; then
|
||||||
|
+ ocf_log info "Unfencing nova based on: $candidate"
|
||||||
|
+ fence_auth=$(pcs stonith show $candidate | grep Attributes: | sed -e s/Attributes:// -e s/-/_/g -e 's/[^ ]\+=/OCF_RESKEY_\0/g' -e s/passwd/password/g)
|
||||||
|
+ eval "export $fence_auth"
|
||||||
|
+ break
|
||||||
|
+ fi
|
||||||
|
+ done
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Copied from NovaEvacuate
|
||||||
|
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
|
||||||
|
+ ocf_exit_reason "auth_url not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_username}" ]; then
|
||||||
|
+ ocf_exit_reason "username not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_password}" ]; then
|
||||||
|
+ ocf_exit_reason "password not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
|
||||||
|
+ ocf_exit_reason "tenant_name not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_domain}" ]; then
|
||||||
|
+ fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_region_name}" ]; then
|
||||||
|
+ fence_options="${fence_options} \
|
||||||
|
+ --region-name ${OCF_RESKEY_region_name}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_insecure}" ]; then
|
||||||
|
+ if ocf_is_true "${OCF_RESKEY_insecure}"; then
|
||||||
|
+ fence_options="${fence_options} --insecure"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
|
||||||
|
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
|
||||||
|
+ fence_options="${fence_options} --no-shared-storage"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
|
||||||
|
+ case ${OCF_RESKEY_endpoint_type} in
|
||||||
|
+ adminURL|publicURL|internalURL)
|
||||||
|
+ ;;
|
||||||
|
+ *)
|
||||||
|
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
|
||||||
|
+ "not valid. Use adminURL or publicURL or internalURL"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ ;;
|
||||||
|
+ esac
|
||||||
|
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ mkdir -p /run/systemd/system/openstack-nova-compute.service.d
|
||||||
|
+ cat<<EOF>/run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf
|
||||||
|
+[Service]
|
||||||
|
+ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST}
|
||||||
|
+EOF
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+nova_validate() {
|
||||||
|
+ rc=$OCF_SUCCESS
|
||||||
|
+
|
||||||
|
+ check_binary crudini
|
||||||
|
+ check_binary nova-compute
|
||||||
|
+ check_binary fence_compute
|
||||||
|
+
|
||||||
|
+ if [ ! -f /etc/nova/nova.conf ]; then
|
||||||
|
+ ocf_exit_reason "/etc/nova/nova.conf not found"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Is the state directory writable?
|
||||||
|
+ state_dir=$(dirname $statefile)
|
||||||
|
+ touch "$state_dir/$$"
|
||||||
|
+ if [ $? != 0 ]; then
|
||||||
|
+ ocf_exit_reason "Invalid state directory: $state_dir"
|
||||||
|
+ return $OCF_ERR_ARGS
|
||||||
|
+ fi
|
||||||
|
+ rm -f "$state_dir/$$"
|
||||||
|
+
|
||||||
|
+ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null)
|
||||||
|
+ if [ $? = 1 ]; then
|
||||||
|
+ short_host=$(uname -n | awk -F. '{print $1}')
|
||||||
|
+ if [ "x${OCF_RESKEY_domain}" != x ]; then
|
||||||
|
+ NOVA_HOST=${short_host}.${OCF_RESKEY_domain}
|
||||||
|
+ else
|
||||||
|
+ NOVA_HOST=$(uname -n)
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ $rc != $OCF_SUCCESS ]; then
|
||||||
|
+ exit $rc
|
||||||
|
+ fi
|
||||||
|
+ return $rc
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
|
||||||
|
+
|
||||||
|
+: ${OCF_RESKEY_evacuation_delay=120}
|
||||||
|
+case $__OCF_ACTION in
|
||||||
|
+meta-data) meta_data
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+usage|help) nova_usage
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+esac
|
||||||
|
+
|
||||||
|
+case $__OCF_ACTION in
|
||||||
|
+start) nova_validate; nova_start;;
|
||||||
|
+stop) nova_stop;;
|
||||||
|
+monitor) nova_validate; nova_monitor;;
|
||||||
|
+notify) nova_notify;;
|
||||||
|
+validate-all) exit $OCF_SUCCESS;;
|
||||||
|
+*) nova_usage
|
||||||
|
+ exit $OCF_ERR_UNIMPLEMENTED
|
||||||
|
+ ;;
|
||||||
|
+esac
|
||||||
|
+rc=$?
|
||||||
|
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
|
||||||
|
+exit $rc
|
||||||
|
+
|
||||||
|
diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||||
|
--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100
|
||||||
|
+++ b/heartbeat/NovaEvacuate 2021-08-24 17:59:40.682372770 +0200
|
||||||
|
@@ -0,0 +1,407 @@
|
||||||
|
+#!/bin/bash
|
||||||
|
+#
|
||||||
|
+# Copyright 2015 Red Hat, Inc.
|
||||||
|
+#
|
||||||
|
+# Description: Manages evacuation of nodes running nova-compute
|
||||||
|
+#
|
||||||
|
+# Authors: Andrew Beekhof
|
||||||
|
+#
|
||||||
|
+# Support: openstack@lists.openstack.org
|
||||||
|
+# License: Apache Software License (ASL) 2.0
|
||||||
|
+#
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+# Initialization:
|
||||||
|
+
|
||||||
|
+###
|
||||||
|
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||||
|
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||||
|
+###
|
||||||
|
+
|
||||||
|
+: ${__OCF_ACTION=$1}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+meta_data() {
|
||||||
|
+ cat <<END
|
||||||
|
+<?xml version="1.0"?>
|
||||||
|
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||||
|
+<resource-agent name="NovaEvacuate" version="1.0">
|
||||||
|
+<version>1.0</version>
|
||||||
|
+
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Evacuator for OpenStack Nova Compute Server</shortdesc>
|
||||||
|
+
|
||||||
|
+<parameters>
|
||||||
|
+
|
||||||
|
+<parameter name="auth_url" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Authorization URL for connecting to keystone in admin context
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Authorization URL</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="username" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Username for connecting to keystone in admin context
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Username</shortdesc>
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="password" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Password for connecting to keystone in admin context
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Password</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="tenant_name" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Tenant name for connecting to keystone in admin context.
|
||||||
|
+Note that with Keystone V3 tenant names are only unique within a domain.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="user_domain" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+User's domain name. Used when authenticating to Keystone.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="project_domain" unique="0" required="1">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Domain name containing project. Used when authenticating to Keystone.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="endpoint_type" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Nova API location (internal, public or admin URL)
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Nova API location (internal, public or admin URL)</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="region_name" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Region name for connecting to nova.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Region name</shortdesc>
|
||||||
|
+<content type="string" default="" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="insecure" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Explicitly allow client to perform "insecure" TLS (https) requests.
|
||||||
|
+The server's certificate will not be verified against any certificate authorities.
|
||||||
|
+This option should be used with caution.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
|
||||||
|
+<content type="boolean" default="0" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="no_shared_storage" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Disable shared storage recovery for instances. Use at your own risk!
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Disable shared storage recovery for instances</shortdesc>
|
||||||
|
+<content type="boolean" default="0" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="verbose" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Enable extra logging from the evacuation process
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Enable debug logging</shortdesc>
|
||||||
|
+<content type="boolean" default="0" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+<parameter name="evacuate_delay" unique="0" required="0">
|
||||||
|
+<longdesc lang="en">
|
||||||
|
+Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean
|
||||||
|
+up eventual locks/leases.
|
||||||
|
+</longdesc>
|
||||||
|
+<shortdesc lang="en">Nova evacuate delay</shortdesc>
|
||||||
|
+<content type="integer" default="0" />
|
||||||
|
+</parameter>
|
||||||
|
+
|
||||||
|
+</parameters>
|
||||||
|
+
|
||||||
|
+<actions>
|
||||||
|
+<action name="start" timeout="20" />
|
||||||
|
+<action name="stop" timeout="20" />
|
||||||
|
+<action name="monitor" timeout="600" interval="10" depth="0"/>
|
||||||
|
+<action name="validate-all" timeout="20" />
|
||||||
|
+<action name="meta-data" timeout="5" />
|
||||||
|
+</actions>
|
||||||
|
+</resource-agent>
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+#######################################################################
|
||||||
|
+
|
||||||
|
+# don't exit on TERM, to test that lrmd makes sure that we do exit
|
||||||
|
+trap sigterm_handler TERM
|
||||||
|
+sigterm_handler() {
|
||||||
|
+ ocf_log info "They use TERM to bring us down. No such luck."
|
||||||
|
+ return
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+evacuate_usage() {
|
||||||
|
+ cat <<END
|
||||||
|
+usage: $0 {start|stop|monitor|validate-all|meta-data}
|
||||||
|
+
|
||||||
|
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||||
|
+END
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+evacuate_stop() {
|
||||||
|
+ rm -f "$statefile"
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+evacuate_start() {
|
||||||
|
+ touch "$statefile"
|
||||||
|
+ # Do not invole monitor here so that the start timeout can be low
|
||||||
|
+ return $?
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+update_evacuation() {
|
||||||
|
+ attrd_updater -p -n evacuate -Q -N ${1} -v ${2}
|
||||||
|
+ arc=$?
|
||||||
|
+ if [ ${arc} != 0 ]; then
|
||||||
|
+ ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
|
||||||
|
+ fi
|
||||||
|
+ return ${arc}
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+handle_evacuations() {
|
||||||
|
+ while [ $# -gt 0 ]; do
|
||||||
|
+ node=$1
|
||||||
|
+ state=$2
|
||||||
|
+ shift; shift;
|
||||||
|
+ need_evacuate=0
|
||||||
|
+
|
||||||
|
+ case $state in
|
||||||
|
+ "")
|
||||||
|
+ ;;
|
||||||
|
+ no)
|
||||||
|
+ ocf_log debug "$node is either fine or already handled"
|
||||||
|
+ ;;
|
||||||
|
+ yes) need_evacuate=1
|
||||||
|
+ ;;
|
||||||
|
+ *@*)
|
||||||
|
+ where=$(echo $state | awk -F@ '{print $1}')
|
||||||
|
+ when=$(echo $state | awk -F@ '{print $2}')
|
||||||
|
+ now=$(date +%s)
|
||||||
|
+
|
||||||
|
+ if [ $(($now - $when)) -gt 60 ]; then
|
||||||
|
+ ocf_log info "Processing partial evacuation of $node by $where at $when"
|
||||||
|
+ need_evacuate=1
|
||||||
|
+ else
|
||||||
|
+ # Give some time for any in-flight evacuations to either complete or fail
|
||||||
|
+ # Nova won't react well if there are two overlapping requests
|
||||||
|
+ ocf_log info "Deferring processing partial evacuation of $node by $where at $when"
|
||||||
|
+ fi
|
||||||
|
+ ;;
|
||||||
|
+ esac
|
||||||
|
+
|
||||||
|
+ if [ $need_evacuate = 1 ]; then
|
||||||
|
+ fence_agent="fence_compute"
|
||||||
|
+
|
||||||
|
+ if have_binary fence_evacuate
|
||||||
|
+ then
|
||||||
|
+ fence_agent="fence_evacuate"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then
|
||||||
|
+ ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds"
|
||||||
|
+ sleep ${OCF_RESKEY_evacuate_delay}
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ ocf_log notice "Initiating evacuation of $node with $fence_agent"
|
||||||
|
+ $fence_agent ${fence_options} -o status -n ${node}
|
||||||
|
+ if [ $? = 1 ]; then
|
||||||
|
+ ocf_log info "Nova does not know about ${node}"
|
||||||
|
+ # Dont mark as no because perhaps nova is unavailable right now
|
||||||
|
+ continue
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ update_evacuation ${node} "$(uname -n)@$(date +%s)"
|
||||||
|
+ if [ $? != 0 ]; then
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ $fence_agent ${fence_options} -o off -n $node
|
||||||
|
+ rc=$?
|
||||||
|
+
|
||||||
|
+ if [ $rc = 0 ]; then
|
||||||
|
+ update_evacuation ${node} no
|
||||||
|
+ ocf_log notice "Completed evacuation of $node"
|
||||||
|
+ else
|
||||||
|
+ ocf_log warn "Evacuation of $node failed: $rc"
|
||||||
|
+ update_evacuation ${node} yes
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+ done
|
||||||
|
+
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+evacuate_monitor() {
|
||||||
|
+ if [ ! -f "$statefile" ]; then
|
||||||
|
+ return $OCF_NOT_RUNNING
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ handle_evacuations $(
|
||||||
|
+ attrd_updater -n evacuate -A \
|
||||||
|
+ 2> >(grep -v "attribute does not exist" 1>&2) |
|
||||||
|
+ sed 's/ value=""/ value="no"/' |
|
||||||
|
+ tr '="' ' ' |
|
||||||
|
+ awk '{print $4" "$6}'
|
||||||
|
+ )
|
||||||
|
+ return $OCF_SUCCESS
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+evacuate_validate() {
|
||||||
|
+ rc=$OCF_SUCCESS
|
||||||
|
+ fence_options=""
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+ if ! have_binary fence_evacuate; then
|
||||||
|
+ check_binary fence_compute
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ # Is the state directory writable?
|
||||||
|
+ state_dir=$(dirname $statefile)
|
||||||
|
+ touch "$state_dir/$$"
|
||||||
|
+ if [ $? != 0 ]; then
|
||||||
|
+ ocf_exit_reason "Invalid state directory: $state_dir"
|
||||||
|
+ return $OCF_ERR_ARGS
|
||||||
|
+ fi
|
||||||
|
+ rm -f "$state_dir/$$"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
|
||||||
|
+ ocf_exit_reason "auth_url not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_username}" ]; then
|
||||||
|
+ ocf_exit_reason "username not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_password}" ]; then
|
||||||
|
+ ocf_exit_reason "password not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
|
||||||
|
+ ocf_exit_reason "tenant_name not configured"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_user_domain}" ]; then
|
||||||
|
+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_project_domain}" ]; then
|
||||||
|
+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_region_name}" ]; then
|
||||||
|
+ fence_options="${fence_options} \
|
||||||
|
+ --region-name ${OCF_RESKEY_region_name}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_insecure}" ]; then
|
||||||
|
+ if ocf_is_true "${OCF_RESKEY_insecure}"; then
|
||||||
|
+ fence_options="${fence_options} --insecure"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
|
||||||
|
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
|
||||||
|
+ fence_options="${fence_options} --no-shared-storage"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_verbose}" ]; then
|
||||||
|
+ if ocf_is_true "${OCF_RESKEY_verbose}"; then
|
||||||
|
+ fence_options="${fence_options} --verbose"
|
||||||
|
+ fi
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
|
||||||
|
+ case ${OCF_RESKEY_endpoint_type} in
|
||||||
|
+ adminURL|publicURL|internalURL) ;;
|
||||||
|
+ *)
|
||||||
|
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type} not valid. Use adminURL or publicURL or internalURL"
|
||||||
|
+ exit $OCF_ERR_CONFIGURED
|
||||||
|
+ ;;
|
||||||
|
+ esac
|
||||||
|
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ -z "${OCF_RESKEY_evacuate_delay}" ]; then
|
||||||
|
+ OCF_RESKEY_evacuate_delay=0
|
||||||
|
+ fi
|
||||||
|
+
|
||||||
|
+ if [ $rc != $OCF_SUCCESS ]; then
|
||||||
|
+ exit $rc
|
||||||
|
+ fi
|
||||||
|
+ return $rc
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
|
||||||
|
+
|
||||||
|
+case $__OCF_ACTION in
|
||||||
|
+ start)
|
||||||
|
+ evacuate_validate
|
||||||
|
+ evacuate_start
|
||||||
|
+ ;;
|
||||||
|
+ stop)
|
||||||
|
+ evacuate_stop
|
||||||
|
+ ;;
|
||||||
|
+ monitor)
|
||||||
|
+ evacuate_validate
|
||||||
|
+ evacuate_monitor
|
||||||
|
+ ;;
|
||||||
|
+ meta-data)
|
||||||
|
+ meta_data
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+ usage|help)
|
||||||
|
+ evacuate_usage
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+ validate-all)
|
||||||
|
+ exit $OCF_SUCCESS
|
||||||
|
+ ;;
|
||||||
|
+ *)
|
||||||
|
+ evacuate_usage
|
||||||
|
+ exit $OCF_ERR_UNIMPLEMENTED
|
||||||
|
+ ;;
|
||||||
|
+esac
|
||||||
|
+rc=$?
|
||||||
|
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
|
||||||
|
+exit $rc
|
592
SOURCES/python3-syntax-fixes.patch
Normal file
592
SOURCES/python3-syntax-fixes.patch
Normal file
@ -0,0 +1,592 @@
|
|||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-10-08 12:36:31.868765636 +0200
|
||||||
|
@@ -52,8 +52,8 @@
|
||||||
|
if not filename == None:
|
||||||
|
self.exportInstanceToFile(result,filename)
|
||||||
|
else:
|
||||||
|
- print 'Filename is needed'
|
||||||
|
- except Exception,e:
|
||||||
|
+ print('Filename is needed')
|
||||||
|
+ except Exception as e:
|
||||||
|
print(e)
|
||||||
|
def _optimizeResult(self,result):
|
||||||
|
keys = result.keys()
|
||||||
|
@@ -81,9 +81,9 @@
|
||||||
|
fp = open(fileName,'w')
|
||||||
|
try :
|
||||||
|
fp.write(json.dumps(result,indent=4))
|
||||||
|
- print "success"
|
||||||
|
+ print("success")
|
||||||
|
except IOError:
|
||||||
|
- print "Error: can\'t find file or read data"
|
||||||
|
+ print("Error: can\'t find file or read data")
|
||||||
|
finally:
|
||||||
|
fp.close()
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-10-08 12:36:53.882358851 +0200
|
||||||
|
@@ -16,7 +16,7 @@
|
||||||
|
if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||||
|
filename = keyValues['--filename'][0]
|
||||||
|
else:
|
||||||
|
- print "A profile is needed! please use \'--filename\' and add the profile name."
|
||||||
|
+ print("A profile is needed! please use \'--filename\' and add the profile name.")
|
||||||
|
return filename
|
||||||
|
|
||||||
|
def getInstanceCount(self,keyValues):
|
||||||
|
@@ -25,7 +25,7 @@
|
||||||
|
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
|
||||||
|
count = keyValues['--instancecount'][0]
|
||||||
|
else:
|
||||||
|
- print "InstanceCount should be a positive number! The default value(1) will be used!"
|
||||||
|
+ print("InstanceCount should be a positive number! The default value(1) will be used!")
|
||||||
|
return int(count)
|
||||||
|
|
||||||
|
def getSubOperations(self,cmd,operation):
|
||||||
|
@@ -65,8 +65,8 @@
|
||||||
|
_newkeyValues["RegionId"] = newkeyValues["RegionId"]
|
||||||
|
self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest)
|
||||||
|
else:
|
||||||
|
- print "InstanceId is need!"
|
||||||
|
- except Exception,e:
|
||||||
|
+ print("InstanceId is need!")
|
||||||
|
+ except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False):
|
||||||
|
@@ -81,7 +81,7 @@
|
||||||
|
response.display_response("error", result, "json")
|
||||||
|
else:
|
||||||
|
response.display_response(extraOperation, result, "json")
|
||||||
|
- except Exception,e:
|
||||||
|
+ except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
@@ -127,7 +127,7 @@
|
||||||
|
'''
|
||||||
|
if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
|
||||||
|
instanceId = data['InstanceId']
|
||||||
|
- except Exception,e:
|
||||||
|
+ except Exception as e:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
return instanceId
|
||||||
|
@@ -156,5 +156,5 @@
|
||||||
|
if __name__ == "__main__":
|
||||||
|
handler = EcsImportHandler()
|
||||||
|
handler.getKVFromJson('ttt')
|
||||||
|
- print handler.getKVFromJson('ttt')
|
||||||
|
+ print(handler.getKVFromJson('ttt'))
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200
|
||||||
|
@@ -77,8 +77,8 @@
|
||||||
|
if not filename == None:
|
||||||
|
self.exportInstanceToFile(result,filename)
|
||||||
|
else:
|
||||||
|
- print 'Filename is needed'
|
||||||
|
- except Exception,e:
|
||||||
|
+ print('Filename is needed')
|
||||||
|
+ except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def exportInstanceToFile(self, result, filename):
|
||||||
|
@@ -96,9 +96,9 @@
|
||||||
|
fp = open(fileName,'w')
|
||||||
|
try :
|
||||||
|
fp.write(json.dumps(result,indent=4))
|
||||||
|
- print "success"
|
||||||
|
+ print("success")
|
||||||
|
except IOError:
|
||||||
|
- print "Error: can\'t find file or read data"
|
||||||
|
+ print("Error: can\'t find file or read data")
|
||||||
|
finally:
|
||||||
|
fp.close()
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200
|
||||||
|
@@ -26,7 +26,7 @@
|
||||||
|
count = keyValues[import_count][0]
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
- # print "InstanceCount should be a positive number! The default value(1) will be used!"
|
||||||
|
+ # print("InstanceCount should be a positive number! The default value(1) will be used!")
|
||||||
|
return int(count), "InstanceCount is "+str(count)+" created."
|
||||||
|
|
||||||
|
def getSubOperations(self,cmd,operation):
|
||||||
|
@@ -46,7 +46,7 @@
|
||||||
|
if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues):
|
||||||
|
newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
|
||||||
|
newkeyValues["ClientToken"] = [self.random_str()]
|
||||||
|
- # print newkeyValues.keys()
|
||||||
|
+ # print(newkeyValues.keys())
|
||||||
|
# return
|
||||||
|
# self._setAttr(cmdInstance, newkeyValues) # set all key values in instance
|
||||||
|
# self.apiHandler.changeEndPoint(cmdInstance, newkeyValues)
|
||||||
|
@@ -58,7 +58,7 @@
|
||||||
|
response.display_response("error", result, "json")
|
||||||
|
else:
|
||||||
|
response.display_response(item, result, "json")
|
||||||
|
- except Exception,e:
|
||||||
|
+ except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def getKVFromJson(self,filename):
|
||||||
|
@@ -77,7 +77,7 @@
|
||||||
|
fp = open(fileName,'r')
|
||||||
|
data=json.loads(fp.read())
|
||||||
|
keys = data.keys()
|
||||||
|
- # print keys, type(data['Items']['DBInstanceAttribute'][0])
|
||||||
|
+ # print(keys, type(data['Items']['DBInstanceAttribute'][0]))
|
||||||
|
# instanceAttribute = data['Items']['DBInstanceAttribute'][0]
|
||||||
|
items = data['Items']['DBInstanceAttribute'][0]
|
||||||
|
keys = items.keys()
|
||||||
|
@@ -130,7 +130,7 @@
|
||||||
|
if __name__ == "__main__":
|
||||||
|
handler = RdsImportDBInstanceHandler()
|
||||||
|
# handler.getKVFromJson('ttt')
|
||||||
|
- # print handler.getKVFromJson('ttt')
|
||||||
|
- print handler.random_str()
|
||||||
|
+ # print(handler.getKVFromJson('ttt'))
|
||||||
|
+ print(handler.random_str())
|
||||||
|
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200
|
||||||
|
@@ -24,9 +24,9 @@
|
||||||
|
_value = keyValues[ProfileCmd.name][0] # use the first value
|
||||||
|
self.extensionCliHandler.setUserProfile(_value)
|
||||||
|
else:
|
||||||
|
- print "Do your forget profile name? please use \'--name\' and add the profile name."
|
||||||
|
+ print("Do your forget profile name? please use \'--name\' and add the profile name.")
|
||||||
|
else:
|
||||||
|
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?"
|
||||||
|
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?")
|
||||||
|
|
||||||
|
def addProfileCmd(self, cmd, keyValues):
|
||||||
|
userKey = ''
|
||||||
|
@@ -52,12 +52,12 @@
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
else:
|
||||||
|
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?"
|
||||||
|
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
handler = ProfileHandler()
|
||||||
|
handler.handleProfileCmd("useprofile", {'--name':["profile444"]})
|
||||||
|
- print handler.extensionCliHandler.getUserProfile()
|
||||||
|
+ print(handler.extensionCliHandler.getUserProfile())
|
||||||
|
handler.addProfileCmd("addProfile", {})
|
||||||
|
- handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
|
||||||
|
\ No newline at end of file
|
||||||
|
+ handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200
|
||||||
|
@@ -24,14 +24,14 @@
|
||||||
|
self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
|
||||||
|
|
||||||
|
def showUsage(self):
|
||||||
|
- print "usage: aliyuncli <command> <operation> [options and parameters]"
|
||||||
|
+ print("usage: aliyuncli <command> <operation> [options and parameters]")
|
||||||
|
|
||||||
|
def showExample(self):
|
||||||
|
- print "show example"
|
||||||
|
+ print("show example")
|
||||||
|
|
||||||
|
def showCmdError(self, cmd):
|
||||||
|
self.showUsage()
|
||||||
|
- print "<aliyuncli> the valid command as follows:\n"
|
||||||
|
+ print("<aliyuncli> the valid command as follows:\n")
|
||||||
|
cmds = self.openApiDataHandler.getApiCmds()
|
||||||
|
self.printAsFormat(cmds)
|
||||||
|
|
||||||
|
@@ -44,7 +44,7 @@
|
||||||
|
error.printInFormat("Wrong version", "The sdk version is not exit.")
|
||||||
|
return None
|
||||||
|
self.showUsage()
|
||||||
|
- print "["+cmd+"]","valid operations as follows:\n"
|
||||||
|
+ print("["+cmd+"]","valid operations as follows:\n")
|
||||||
|
operations = self.openApiDataHandler.getApiOperations(cmd, version)
|
||||||
|
extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd)
|
||||||
|
operations.update(extensions)
|
||||||
|
@@ -56,8 +56,8 @@
|
||||||
|
self.printAsFormat(operations)
|
||||||
|
|
||||||
|
def showParameterError(self, cmd, operation, parameterlist):
|
||||||
|
- print 'usage: aliyuncli <command> <operation> [options and parameters]'
|
||||||
|
- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n'
|
||||||
|
+ print('usage: aliyuncli <command> <operation> [options and parameters]')
|
||||||
|
+ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n')
|
||||||
|
self.printAsFormat(parameterlist)
|
||||||
|
pass
|
||||||
|
|
||||||
|
@@ -72,7 +72,7 @@
|
||||||
|
tmpList.append(item)
|
||||||
|
count = count+1
|
||||||
|
if len(tmpList) == 2:
|
||||||
|
- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')
|
||||||
|
+ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10'))
|
||||||
|
tmpList = list()
|
||||||
|
if len(tmpList) == 1 and count == len(mlist):
|
||||||
|
- print tmpList[0]
|
||||||
|
\ No newline at end of file
|
||||||
|
+ print(tmpList[0])
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200
|
||||||
|
@@ -91,7 +91,7 @@
|
||||||
|
keyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
|
||||||
|
#check necessaryArgs as:accesskeyid accesskeysecret regionId
|
||||||
|
if not self.handler.hasNecessaryArgs(keyValues):
|
||||||
|
- print 'accesskeyid/accesskeysecret/regionId is absence'
|
||||||
|
+ print('accesskeyid/accesskeysecret/regionId is absence')
|
||||||
|
return
|
||||||
|
result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest)
|
||||||
|
if result is None:
|
||||||
|
@@ -102,7 +102,7 @@
|
||||||
|
else:
|
||||||
|
response.display_response(operation, result, outPutFormat,keyValues)
|
||||||
|
else:
|
||||||
|
- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com'
|
||||||
|
+ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com')
|
||||||
|
elif self.handler.isAvailableExtensionOperation(cmd, operation):
|
||||||
|
if self.args.__len__() >= 3 and self.args[2] == 'help':
|
||||||
|
import commandConfigure
|
||||||
|
@@ -125,7 +125,7 @@
|
||||||
|
def showInstanceAttribute(self, cmd, operation, classname):
|
||||||
|
if self.args.__len__() >= 3 and self.args[2] == "help":
|
||||||
|
self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname))
|
||||||
|
- #print self.completer._help_to_show_instance_attribute(cmdInstance)
|
||||||
|
+ #print(self.completer._help_to_show_instance_attribute(cmdInstance))
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200
|
||||||
|
@@ -141,7 +141,7 @@
|
||||||
|
_key = keyValues[keystr][0]
|
||||||
|
if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
|
||||||
|
_secret = keyValues[secretstr][0]
|
||||||
|
- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret
|
||||||
|
+ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
|
||||||
|
return _key, _secret
|
||||||
|
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200
|
||||||
|
@@ -161,12 +161,12 @@
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
upgradeHandler = aliyunCliUpgradeHandler()
|
||||||
|
- # print upgradeHandler.getLatestTimeFromServer()
|
||||||
|
+ # print(upgradeHandler.getLatestTimeFromServer())
|
||||||
|
# flag, url = upgradeHandler.isNewVersionReady()
|
||||||
|
# if flag:
|
||||||
|
- # print url
|
||||||
|
+ # print(url)
|
||||||
|
# else:
|
||||||
|
- # print "current version is latest one"
|
||||||
|
- # print "final test:"
|
||||||
|
- print upgradeHandler.checkForUpgrade()
|
||||||
|
- print upgradeHandler.handleUserChoice("N")
|
||||||
|
+ # print("current version is latest one")
|
||||||
|
+ # print("final test:")
|
||||||
|
+ print(upgradeHandler.checkForUpgrade())
|
||||||
|
+ print(upgradeHandler.handleUserChoice("N"))
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200
|
||||||
|
@@ -127,35 +127,35 @@
|
||||||
|
|
||||||
|
# this api will show help page when user input aliyuncli help(-h or --help)
|
||||||
|
def showAliyunCliHelp(self):
|
||||||
|
- print color.bold+"ALIYUNCLI()"+color.end
|
||||||
|
- print color.bold+"\nNAME"+color.end
|
||||||
|
- print "\taliyuncli -"
|
||||||
|
- print color.bold+"\nDESCRIPTION"+color.end
|
||||||
|
- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. "
|
||||||
|
- print color.bold+"\nSYNOPSIS"+color.end
|
||||||
|
- print "\taliyuncli <command> <operation> [options and parameters]"
|
||||||
|
- print "\n\taliyuncli has supported command completion now. The detail you can check our site."
|
||||||
|
- print color.bold+"OPTIONS"+color.end
|
||||||
|
- print color.bold+"\tconfigure"+color.end
|
||||||
|
- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)"
|
||||||
|
- print color.bold+"\n\t--output"+color.end+" (string)"
|
||||||
|
- print "\n\tThe formatting style for command output."
|
||||||
|
- print "\n\to json"
|
||||||
|
- print "\n\to text"
|
||||||
|
- print "\n\to table"
|
||||||
|
+ print(color.bold+"ALIYUNCLI()"+color.end)
|
||||||
|
+ print(color.bold+"\nNAME"+color.end)
|
||||||
|
+ print("\taliyuncli -")
|
||||||
|
+ print(color.bold+"\nDESCRIPTION"+color.end)
|
||||||
|
+ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ")
|
||||||
|
+ print(color.bold+"\nSYNOPSIS"+color.end)
|
||||||
|
+ print("\taliyuncli <command> <operation> [options and parameters]")
|
||||||
|
+ print("\n\taliyuncli has supported command completion now. The detail you can check our site.")
|
||||||
|
+ print(color.bold+"OPTIONS"+color.end)
|
||||||
|
+ print(color.bold+"\tconfigure"+color.end)
|
||||||
|
+ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)")
|
||||||
|
+ print(color.bold+"\n\t--output"+color.end+" (string)")
|
||||||
|
+ print("\n\tThe formatting style for command output.")
|
||||||
|
+ print("\n\to json")
|
||||||
|
+ print("\n\to text")
|
||||||
|
+ print("\n\to table")
|
||||||
|
|
||||||
|
- print color.bold+"\n\t--secure"+color.end
|
||||||
|
- print "\n\tMaking secure requests(HTTPS) to service"
|
||||||
|
+ print(color.bold+"\n\t--secure"+color.end)
|
||||||
|
+ print("\n\tMaking secure requests(HTTPS) to service")
|
||||||
|
|
||||||
|
- print color.bold+"\nAVAILABLE SERVICES"+color.end
|
||||||
|
- print "\n\to ecs"
|
||||||
|
- print "\n\to ess"
|
||||||
|
- print "\n\to mts"
|
||||||
|
- print "\n\to rds"
|
||||||
|
- print "\n\to slb"
|
||||||
|
+ print(color.bold+"\nAVAILABLE SERVICES"+color.end)
|
||||||
|
+ print("\n\to ecs")
|
||||||
|
+ print("\n\to ess")
|
||||||
|
+ print("\n\to mts")
|
||||||
|
+ print("\n\to rds")
|
||||||
|
+ print("\n\to slb")
|
||||||
|
|
||||||
|
def showCurrentVersion(self):
|
||||||
|
- print self._version
|
||||||
|
+ print(self._version)
|
||||||
|
|
||||||
|
def findConfigureFilePath(self):
|
||||||
|
homePath = ""
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200
|
||||||
|
@@ -39,9 +39,9 @@
|
||||||
|
|
||||||
|
|
||||||
|
def oss_notice():
|
||||||
|
- print "OSS operation in aliyuncli is not supported."
|
||||||
|
- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation."
|
||||||
|
- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n"
|
||||||
|
+ print("OSS operation in aliyuncli is not supported.")
|
||||||
|
+ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.")
|
||||||
|
+ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
@@ -391,22 +391,22 @@
|
||||||
|
return jsonobj
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
- print module, 'is not exist!'
|
||||||
|
+ print(module, 'is not exist!')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
except ServerException as e:
|
||||||
|
error = cliError.error()
|
||||||
|
error.printInFormat(e.get_error_code(), e.get_error_msg())
|
||||||
|
- print "Detail of Server Exception:\n"
|
||||||
|
- print str(e)
|
||||||
|
+ print("Detail of Server Exception:\n")
|
||||||
|
+ print(str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
except ClientException as e:
|
||||||
|
- # print e.get_error_msg()
|
||||||
|
+ # print(e.get_error_msg())
|
||||||
|
error = cliError.error()
|
||||||
|
error.printInFormat(e.get_error_code(), e.get_error_msg())
|
||||||
|
- print "Detail of Client Exception:\n"
|
||||||
|
- print str(e)
|
||||||
|
+ print("Detail of Client Exception:\n")
|
||||||
|
+ print(str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def getSetFuncs(self,classname):
|
||||||
|
@@ -549,6 +549,6 @@
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
handler = aliyunOpenApiDataHandler()
|
||||||
|
- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')
|
||||||
|
- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances')
|
||||||
|
- print "###############",handler.getExtensionOperationsFromCmd('ecs')
|
||||||
|
+ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance'))
|
||||||
|
+ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances'))
|
||||||
|
+ print("###############",handler.getExtensionOperationsFromCmd('ecs'))
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200
|
||||||
|
@@ -44,7 +44,7 @@
|
||||||
|
filename=self.fileName
|
||||||
|
self.writeCmdVersionToFile(cmd,version,filename)
|
||||||
|
else:
|
||||||
|
- print "A argument is needed! please use \'--version\' and add the sdk version."
|
||||||
|
+ print("A argument is needed! please use \'--version\' and add the sdk version.")
|
||||||
|
return
|
||||||
|
def showVersions(self,cmd,operation,stream=None):
|
||||||
|
configureVersion='(not configure)'
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200
|
||||||
|
@@ -55,7 +55,7 @@
|
||||||
|
# _mlist = self.rds.extensionOptions[self.rds.exportDBInstance]
|
||||||
|
self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance])
|
||||||
|
if operation.lower() == self.rds.importDBInstance.lower():
|
||||||
|
- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance])
|
||||||
|
+ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance]))
|
||||||
|
# parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance])
|
||||||
|
self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance])
|
||||||
|
|
||||||
|
@@ -89,8 +89,8 @@
|
||||||
|
importInstance:['count','filename']}
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
- # print type(rds.extensionOperations)
|
||||||
|
- # print type(rds.extensionOptions)
|
||||||
|
- # print rds.extensionOptions['ll']
|
||||||
|
+ # print(type(rds.extensionOperations))
|
||||||
|
+ # print(type(rds.extensionOptions))
|
||||||
|
+ # print(rds.extensionOptions['ll'])
|
||||||
|
configure = commandConfigure()
|
||||||
|
- print configure.showExtensionOperationHelp("ecs", "ExportInstance")
|
||||||
|
+ print(configure.showExtensionOperationHelp("ecs", "ExportInstance"))
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200
|
||||||
|
@@ -577,7 +577,7 @@
|
||||||
|
operation = operations[i].strip()
|
||||||
|
self._getKeyFromSection(profilename,operation)
|
||||||
|
else:
|
||||||
|
- print 'The correct usage:aliyuncli configure get key --profile profilename'
|
||||||
|
+ print('The correct usage:aliyuncli configure get key --profile profilename')
|
||||||
|
return
|
||||||
|
|
||||||
|
def _getKeyFromSection(self,profilename,key):
|
||||||
|
@@ -591,7 +591,7 @@
|
||||||
|
elif key in _WRITE_TO_CONFIG_FILE :
|
||||||
|
self._getKeyFromFile(config_filename,sectionName,key)
|
||||||
|
else:
|
||||||
|
- print key,'=','None'
|
||||||
|
+ print(key,'=','None')
|
||||||
|
def _getKeyFromFile(self,filename,section,key):
|
||||||
|
if os.path.isfile(filename):
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
@@ -600,9 +600,9 @@
|
||||||
|
start = self._configWriter.hasSectionName(section,contents)[1]
|
||||||
|
end = self._configWriter._getSectionEnd(start,contents)
|
||||||
|
value = self._configWriter._getValueInSlice(start,end,key,contents)
|
||||||
|
- print key,'=',value
|
||||||
|
+ print(key,'=',value)
|
||||||
|
else:
|
||||||
|
- print key,'=None'
|
||||||
|
+ print(key,'=None')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200
|
||||||
|
@@ -2,7 +2,7 @@
|
||||||
|
|
||||||
|
def handleEndPoint(cmd,operation,keyValues):
|
||||||
|
if not hasNecessaryArgs(keyValues):
|
||||||
|
- print 'RegionId/EndPoint is absence'
|
||||||
|
+ print('RegionId/EndPoint is absence')
|
||||||
|
return
|
||||||
|
if cmd is not None:
|
||||||
|
cmd = cmd.capitalize()
|
||||||
|
@@ -25,7 +25,7 @@
|
||||||
|
from aliyunsdkcore.profile.region_provider import modify_point
|
||||||
|
modify_point(cmd,regionId,endPoint)
|
||||||
|
except Exception as e:
|
||||||
|
- print e
|
||||||
|
+ print(e)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200
|
||||||
|
@@ -111,14 +111,14 @@
|
||||||
|
if os.path.isfile(cfgfile):
|
||||||
|
ans = raw_input('File existed. Do you wish to overwrite it?(y/n)')
|
||||||
|
if ans.lower() != 'y':
|
||||||
|
- print 'Answer is No. Quit now'
|
||||||
|
+ print('Answer is No. Quit now')
|
||||||
|
return
|
||||||
|
with open(cfgfile, 'w+') as f:
|
||||||
|
config.write(f)
|
||||||
|
- print 'Your configuration is saved to %s.' % cfgfile
|
||||||
|
+ print('Your configuration is saved to %s.' % cfgfile)
|
||||||
|
|
||||||
|
def cmd_help(args):
|
||||||
|
- print HELP
|
||||||
|
+ print(HELP)
|
||||||
|
|
||||||
|
def add_config(parser):
|
||||||
|
parser.add_argument('--host', type=str, help='service host')
|
||||||
|
@@ -161,7 +161,7 @@
|
||||||
|
return CMD_LIST.keys()
|
||||||
|
def handleOas(pars=None):
|
||||||
|
if pars is None:
|
||||||
|
- print HELP
|
||||||
|
+ print(HELP)
|
||||||
|
sys.exit(0)
|
||||||
|
parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter)
|
||||||
|
|
||||||
|
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
|
||||||
|
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200
|
||||||
|
@@ -61,7 +61,7 @@
|
||||||
|
data = f.read()
|
||||||
|
return data
|
||||||
|
except (OSError, IOError) as e:
|
||||||
|
- print e
|
||||||
|
+ print(e)
|
||||||
|
def _getParamFromUrl(prefix,value,mode):
|
||||||
|
|
||||||
|
req = urllib2.Request(value)
|
||||||
|
@@ -74,7 +74,7 @@
|
||||||
|
errorMsg='Get the wrong content'
|
||||||
|
errorClass.printInFormat(response.getcode(), errorMsg)
|
||||||
|
except Exception as e:
|
||||||
|
- print e
|
||||||
|
+ print(e)
|
||||||
|
|
||||||
|
PrefixMap = {'file://': _getParamFromFile,
|
||||||
|
'fileb://': _getParamFromFile
|
||||||
|
@@ -86,4 +86,4 @@
|
||||||
|
'fileb://': {'mode': 'rb'},
|
||||||
|
#'http://': {},
|
||||||
|
#'https://': {}
|
||||||
|
- }
|
||||||
|
\ No newline at end of file
|
||||||
|
+ }
|
||||||
|
diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py
|
||||||
|
--- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100
|
||||||
|
+++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200
|
||||||
|
@@ -16,10 +16,10 @@
|
||||||
|
3a4
|
||||||
|
"""
|
||||||
|
colorama.init()
|
||||||
|
- print "aaa"
|
||||||
|
- print "aaa"
|
||||||
|
- print "aaa"
|
||||||
|
- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4"
|
||||||
|
+ print("aaa")
|
||||||
|
+ print("aaa")
|
||||||
|
+ print("aaa")
|
||||||
|
+ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
2170
SPECS/resource-agents.spec
Normal file
2170
SPECS/resource-agents.spec
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +0,0 @@
|
|||||||
--- !Policy
|
|
||||||
product_versions:
|
|
||||||
- rhel-10
|
|
||||||
decision_context: osci_compose_gate
|
|
||||||
rules:
|
|
||||||
- !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional}
|
|
@ -1,12 +0,0 @@
|
|||||||
diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
|
|
||||||
--- a/heartbeat/aliyun-vpc-move-ip 2021-08-19 09:37:57.000000000 +0200
|
|
||||||
+++ b/heartbeat/aliyun-vpc-move-ip 2021-08-25 13:38:26.786626079 +0200
|
|
||||||
@@ -17,7 +17,7 @@
|
|
||||||
OCF_RESKEY_interface_default="eth0"
|
|
||||||
OCF_RESKEY_profile_default="default"
|
|
||||||
OCF_RESKEY_endpoint_default="vpc.aliyuncs.com"
|
|
||||||
-OCF_RESKEY_aliyuncli_default="detect"
|
|
||||||
+OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/aliyun-cli/aliyun"
|
|
||||||
|
|
||||||
|
|
||||||
: ${OCF_RESKEY_address=${OCF_RESKEY_address_default}}
|
|
@ -1,33 +0,0 @@
|
|||||||
diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
|
||||||
--- a/heartbeat/gcp-pd-move.in 2021-08-19 09:37:57.000000000 +0200
|
|
||||||
+++ b/heartbeat/gcp-pd-move.in 2021-08-25 13:50:54.461732967 +0200
|
|
||||||
@@ -32,6 +32,7 @@
|
|
||||||
from ocf import logger
|
|
||||||
|
|
||||||
try:
|
|
||||||
+ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages")
|
|
||||||
import googleapiclient.discovery
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
|
||||||
--- a/heartbeat/gcp-vpc-move-route.in 2021-08-19 09:37:57.000000000 +0200
|
|
||||||
+++ b/heartbeat/gcp-vpc-move-route.in 2021-08-25 13:51:17.489797999 +0200
|
|
||||||
@@ -45,6 +45,7 @@
|
|
||||||
from ocf import *
|
|
||||||
|
|
||||||
try:
|
|
||||||
+ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages")
|
|
||||||
import googleapiclient.discovery
|
|
||||||
import pyroute2
|
|
||||||
try:
|
|
||||||
diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
|
|
||||||
--- a/heartbeat/gcp-vpc-move-vip.in 2021-08-19 09:37:57.000000000 +0200
|
|
||||||
+++ b/heartbeat/gcp-vpc-move-vip.in 2021-08-25 13:51:35.012847487 +0200
|
|
||||||
@@ -29,6 +29,7 @@
|
|
||||||
from ocf import *
|
|
||||||
|
|
||||||
try:
|
|
||||||
+ sys.path.insert(0, "/usr/lib/fence-agents/support/google/lib/python#PYTHON3_VERSION#/site-packages")
|
|
||||||
import googleapiclient.discovery
|
|
||||||
try:
|
|
||||||
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
|
@ -1,584 +0,0 @@
|
|||||||
#
|
|
||||||
# All modifications and additions to the file contributed by third parties
|
|
||||||
# remain the property of their copyright owners, unless otherwise agreed
|
|
||||||
# upon. The license for this file, and modifications and additions to the
|
|
||||||
# file, is the same license as for the pristine package itself (unless the
|
|
||||||
# license for the pristine package is not an Open Source License, in which
|
|
||||||
# case the license is the MIT License). An "Open Source License" is a
|
|
||||||
# license that conforms to the Open Source Definition (Version 1.9)
|
|
||||||
# published by the Open Source Initiative.
|
|
||||||
#
|
|
||||||
|
|
||||||
# Below is the script used to generate a new source file
|
|
||||||
# from the resource-agent upstream git repo.
|
|
||||||
#
|
|
||||||
# TAG=$(git log --pretty="format:%h" -n 1)
|
|
||||||
# distdir="ClusterLabs-resource-agents-${TAG}"
|
|
||||||
# TARFILE="${distdir}.tar.gz"
|
|
||||||
# rm -rf $TARFILE $distdir
|
|
||||||
# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE
|
|
||||||
#
|
|
||||||
|
|
||||||
%global upstream_prefix ClusterLabs-resource-agents
|
|
||||||
%global upstream_version 56e76b01
|
|
||||||
|
|
||||||
# Whether this platform defaults to using systemd as an init system
|
|
||||||
# (needs to be evaluated prior to BuildRequires being enumerated and
|
|
||||||
# installed as it's intended to conditionally select some of these, and
|
|
||||||
# for that there are only few indicators with varying reliability:
|
|
||||||
# - presence of systemd-defined macros (when building in a full-fledged
|
|
||||||
# environment, which is not the case with ordinary mock-based builds)
|
|
||||||
# - systemd-aware rpm as manifested with the presence of particular
|
|
||||||
# macro (rpm itself will trivially always be present when building)
|
|
||||||
# - existence of /usr/lib/os-release file, which is something heavily
|
|
||||||
# propagated by systemd project
|
|
||||||
# - when not good enough, there's always a possibility to check
|
|
||||||
# particular distro-specific macros (incl. version comparison)
|
|
||||||
%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
|
|
||||||
} || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
|
|
||||||
} || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
|
|
||||||
|
|
||||||
# determine the ras-set to process based on configure invokation
|
|
||||||
%bcond_with rgmanager
|
|
||||||
%bcond_without linuxha
|
|
||||||
|
|
||||||
Name: resource-agents
|
|
||||||
Summary: Open Source HA Reusable Cluster Resource Scripts
|
|
||||||
Version: 4.16.0
|
|
||||||
Release: 1%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
|
|
||||||
License: GPL-2.0-or-later AND LGPL-2.1-or-later
|
|
||||||
URL: https://github.com/ClusterLabs/resource-agents
|
|
||||||
Source0: %{upstream_prefix}-%{upstream_version}.tar.gz
|
|
||||||
Patch0: pgsqlms-ra.patch
|
|
||||||
Patch1: RHEL-66293-1-aws-agents-reuse-imds-token-until-it-expires.patch
|
|
||||||
Patch2: RHEL-66293-2-aws-agents-reuse-imds-token-improvements.patch
|
|
||||||
Patch3: RHEL-68740-awsvip-add-interface-parameter.patch
|
|
||||||
|
|
||||||
# bundled ha-cloud-support libs
|
|
||||||
Patch500: ha-cloud-support-aliyun.patch
|
|
||||||
Patch501: ha-cloud-support-gcloud.patch
|
|
||||||
|
|
||||||
Obsoletes: heartbeat-resources <= %{version}
|
|
||||||
Provides: heartbeat-resources = %{version}
|
|
||||||
|
|
||||||
# Build dependencies
|
|
||||||
BuildRequires: make
|
|
||||||
BuildRequires: automake autoconf pkgconfig gcc
|
|
||||||
BuildRequires: perl
|
|
||||||
BuildRequires: libxslt glib2-devel libqb-devel
|
|
||||||
BuildRequires: systemd
|
|
||||||
BuildRequires: which
|
|
||||||
|
|
||||||
%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
|
|
||||||
BuildRequires: python3-devel
|
|
||||||
%else
|
|
||||||
BuildRequires: python-devel
|
|
||||||
%endif
|
|
||||||
|
|
||||||
# for pgsqlms
|
|
||||||
BuildRequires: perl-devel perl-English perl-FindBin
|
|
||||||
|
|
||||||
%ifarch x86_64
|
|
||||||
BuildRequires: ha-cloud-support
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%if 0%{?fedora} || 0%{?centos} || 0%{?rhel}
|
|
||||||
BuildRequires: docbook-style-xsl docbook-dtds
|
|
||||||
%if 0%{?rhel} == 0
|
|
||||||
BuildRequires: libnet-devel
|
|
||||||
%endif
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%if 0%{?suse_version}
|
|
||||||
BuildRequires: libnet-devel
|
|
||||||
%if 0%{?suse_version} > 1500
|
|
||||||
BuildRequires: cluster-glue-devel
|
|
||||||
%else
|
|
||||||
BuildRequires: libglue-devel
|
|
||||||
%endif
|
|
||||||
BuildRequires: libxslt docbook_4 docbook-xsl-stylesheets
|
|
||||||
%endif
|
|
||||||
|
|
||||||
# dependencies for powervs-subnet
|
|
||||||
BuildRequires: python3-requests python3-urllib3
|
|
||||||
|
|
||||||
## Runtime deps
|
|
||||||
# system tools shared by several agents
|
|
||||||
Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk
|
|
||||||
Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat
|
|
||||||
Requires: /bin/mount
|
|
||||||
%if 0%{?suse_version}
|
|
||||||
Requires: /usr/bin/fuser
|
|
||||||
%else
|
|
||||||
Requires: /usr/sbin/fuser
|
|
||||||
%endif
|
|
||||||
|
|
||||||
# Filesystem / fs.sh / netfs.sh
|
|
||||||
%if 0%{?fedora} > 39 || 0%{?centos} > 9 || 0%{?rhel} > 9 || 0%{?suse_version}
|
|
||||||
Requires: /usr/sbin/fsck
|
|
||||||
%else
|
|
||||||
Requires: /sbin/fsck
|
|
||||||
%endif
|
|
||||||
Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4
|
|
||||||
Requires: /usr/sbin/fsck.xfs
|
|
||||||
%if 0%{?fedora} > 40 || 0%{?centos} > 9 || 0%{?rhel} > 9 || 0%{?suse_version}
|
|
||||||
Requires: /usr/sbin/mount.nfs /usr/sbin/mount.nfs4
|
|
||||||
%else
|
|
||||||
Requires: /sbin/mount.nfs /sbin/mount.nfs4
|
|
||||||
%endif
|
|
||||||
%if (0%{?fedora} && 0%{?fedora} < 33) || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version}
|
|
||||||
%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8)
|
|
||||||
Requires: /usr/sbin/mount.cifs
|
|
||||||
%else
|
|
||||||
Recommends: /usr/sbin/mount.cifs
|
|
||||||
%endif
|
|
||||||
%endif
|
|
||||||
|
|
||||||
# IPaddr2
|
|
||||||
Requires: /sbin/ip
|
|
||||||
|
|
||||||
# LVM / lvm.sh
|
|
||||||
Requires: /usr/sbin/lvm
|
|
||||||
|
|
||||||
# nfsserver / netfs.sh
|
|
||||||
%if 0%{?fedora} > 40 || 0%{?centos} > 9 || 0%{?rhel} > 9 || 0%{?suse_version}
|
|
||||||
Requires: /usr/sbin/rpc.statd
|
|
||||||
%else
|
|
||||||
Requires: /sbin/rpc.statd
|
|
||||||
%endif
|
|
||||||
Requires: /usr/sbin/rpc.nfsd /usr/sbin/rpc.mountd
|
|
||||||
|
|
||||||
# ocf.py
|
|
||||||
Requires: python3
|
|
||||||
|
|
||||||
# ethmonitor
|
|
||||||
Requires: bc
|
|
||||||
|
|
||||||
# rgmanager
|
|
||||||
%if %{with rgmanager}
|
|
||||||
# ip.sh
|
|
||||||
Requires: /usr/sbin/ethtool
|
|
||||||
Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6
|
|
||||||
|
|
||||||
# nfsexport.sh
|
|
||||||
%if 0%{?fedora} > 39 || 0%{?centos} > 9 || 0%{?rhel} > 9
|
|
||||||
Requires: /usr/sbin/findfs
|
|
||||||
Requires: /usr/sbin/quotaon /usr/sbin/quotacheck
|
|
||||||
%else
|
|
||||||
Requires: /sbin/findfs
|
|
||||||
Requires: /sbin/quotaon /sbin/quotacheck
|
|
||||||
%endif
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%description
|
|
||||||
A set of scripts to interface with several services to operate in a
|
|
||||||
High Availability environment for both Pacemaker and rgmanager
|
|
||||||
service managers.
|
|
||||||
|
|
||||||
%ifarch x86_64
|
|
||||||
%package cloud
|
|
||||||
License: GPLv2+ and LGPLv2+
|
|
||||||
Summary: Cloud resource agents
|
|
||||||
Requires: %{name} = %{version}-%{release}
|
|
||||||
Requires: ha-cloud-support
|
|
||||||
Requires: python3-requests python3-urllib3
|
|
||||||
Requires: socat
|
|
||||||
Provides: resource-agents-aliyun
|
|
||||||
Obsoletes: resource-agents-aliyun <= %{version}
|
|
||||||
Provides: resource-agents-gcp
|
|
||||||
Obsoletes: resource-agents-gcp <= %{version}
|
|
||||||
|
|
||||||
%description cloud
|
|
||||||
Cloud resource agents allows Cloud instances to be managed
|
|
||||||
in a cluster environment.
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%package paf
|
|
||||||
License: PostgreSQL
|
|
||||||
Summary: PostgreSQL Automatic Failover (PAF) resource agent
|
|
||||||
Requires: %{name} = %{version}-%{release}
|
|
||||||
Requires: perl-interpreter perl-lib perl-English perl-FindBin
|
|
||||||
|
|
||||||
%description paf
|
|
||||||
PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL
|
|
||||||
databases to be managed in a cluster environment.
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos} == 0 && 0%{?rhel} == 0
|
|
||||||
%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
|
|
||||||
exit 1
|
|
||||||
%endif
|
|
||||||
%setup -q -n %{upstream_prefix}-%{upstream_version}
|
|
||||||
%patch -p1 -P 0
|
|
||||||
%patch -p1 -P 1
|
|
||||||
%patch -p1 -P 2
|
|
||||||
%patch -p1 -P 3
|
|
||||||
|
|
||||||
# bundled ha-cloud-support libs
|
|
||||||
%patch -p1 -P 500
|
|
||||||
%patch -p1 -P 501
|
|
||||||
|
|
||||||
chmod 755 heartbeat/pgsqlms
|
|
||||||
|
|
||||||
%build
|
|
||||||
sed -i -e "s/#PYTHON3_VERSION#/%{python3_version}/" heartbeat/gcp*
|
|
||||||
|
|
||||||
if [ ! -f configure ]; then
|
|
||||||
./autogen.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
%if 0%{?fedora} >= 11 || 0%{?centos} > 5 || 0%{?rhel} > 5
|
|
||||||
CFLAGS="$(echo '%{optflags}')"
|
|
||||||
%global conf_opt_fatal "--enable-fatal-warnings=no"
|
|
||||||
%else
|
|
||||||
CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}"
|
|
||||||
%global conf_opt_fatal "--enable-fatal-warnings=yes"
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%if %{with rgmanager}
|
|
||||||
%global rasset rgmanager
|
|
||||||
%endif
|
|
||||||
%if %{with linuxha}
|
|
||||||
%global rasset linux-ha
|
|
||||||
%endif
|
|
||||||
%if %{with rgmanager} && %{with linuxha}
|
|
||||||
%global rasset all
|
|
||||||
%endif
|
|
||||||
|
|
||||||
export CFLAGS
|
|
||||||
|
|
||||||
%configure \
|
|
||||||
%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
|
|
||||||
PYTHON="%{__python3}" \
|
|
||||||
%endif
|
|
||||||
%ifarch x86_64
|
|
||||||
PYTHONPATH="%{_usr}/lib/fence-agents/support/google/lib/python%{python3_version}/site-packages" \
|
|
||||||
%endif
|
|
||||||
%{conf_opt_fatal} \
|
|
||||||
%if %{defined _unitdir}
|
|
||||||
SYSTEMD_UNIT_DIR=%{_unitdir} \
|
|
||||||
%endif
|
|
||||||
%if %{defined _tmpfilesdir}
|
|
||||||
SYSTEMD_TMPFILES_DIR=%{_tmpfilesdir} \
|
|
||||||
--with-rsctmpdir=/run/resource-agents \
|
|
||||||
%endif
|
|
||||||
--with-pkg-name=%{name} \
|
|
||||||
--with-ras-set=%{rasset}
|
|
||||||
|
|
||||||
make %{_smp_mflags}
|
|
||||||
|
|
||||||
%install
|
|
||||||
rm -rf %{buildroot}
|
|
||||||
make install DESTDIR=%{buildroot}
|
|
||||||
|
|
||||||
## tree fixup
|
|
||||||
# remove docs (there is only one and they should come from doc sections in files)
|
|
||||||
rm -rf %{buildroot}/usr/share/doc/resource-agents
|
|
||||||
|
|
||||||
%files
|
|
||||||
%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog
|
|
||||||
%if %{with linuxha}
|
|
||||||
%doc heartbeat/README.galera
|
|
||||||
%doc doc/README.webapps
|
|
||||||
%doc %{_datadir}/%{name}/ra-api-1.dtd
|
|
||||||
%doc %{_datadir}/%{name}/metadata.rng
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%if %{with rgmanager}
|
|
||||||
%{_datadir}/cluster
|
|
||||||
%{_sbindir}/rhev-check.sh
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%if %{with linuxha}
|
|
||||||
%dir %{_usr}/lib/ocf
|
|
||||||
%dir %{_usr}/lib/ocf/resource.d
|
|
||||||
%dir %{_usr}/lib/ocf/lib
|
|
||||||
|
|
||||||
%{_usr}/lib/ocf/lib/heartbeat
|
|
||||||
|
|
||||||
%{_usr}/lib/ocf/resource.d/heartbeat
|
|
||||||
|
|
||||||
%{_datadir}/pkgconfig/%{name}.pc
|
|
||||||
|
|
||||||
%if %{defined _unitdir}
|
|
||||||
%{_unitdir}/resource-agents-deps.target
|
|
||||||
%endif
|
|
||||||
%if %{defined _tmpfilesdir}
|
|
||||||
%{_tmpfilesdir}/%{name}.conf
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%dir %{_datadir}/%{name}
|
|
||||||
%dir %{_datadir}/%{name}/ocft
|
|
||||||
%{_datadir}/%{name}/ocft/configs
|
|
||||||
%{_datadir}/%{name}/ocft/caselib
|
|
||||||
%{_datadir}/%{name}/ocft/README
|
|
||||||
%{_datadir}/%{name}/ocft/README.zh_CN
|
|
||||||
%{_datadir}/%{name}/ocft/helpers.sh
|
|
||||||
%exclude %{_datadir}/%{name}/ocft/runocft
|
|
||||||
%exclude %{_datadir}/%{name}/ocft/runocft.prereq
|
|
||||||
|
|
||||||
%{_sbindir}/ocf-tester
|
|
||||||
%{_sbindir}/ocft
|
|
||||||
|
|
||||||
%{_includedir}/heartbeat
|
|
||||||
|
|
||||||
%if %{defined _tmpfilesdir}
|
|
||||||
%dir %attr (1755, root, root) /run/resource-agents
|
|
||||||
%else
|
|
||||||
%dir %attr (1755, root, root) %{_var}/run/resource-agents
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%{_mandir}/man7/*.7*
|
|
||||||
%{_mandir}/man8/ocf-tester.8*
|
|
||||||
|
|
||||||
###
|
|
||||||
# Supported, but in another sub package
|
|
||||||
###
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/aws*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/azure-*
|
|
||||||
%exclude %{_mandir}/man7/*aliyun-vpc-move-ip*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
|
|
||||||
%exclude %{_mandir}/man7/*gcp*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/powervs-subnet
|
|
||||||
%exclude %{_mandir}/man7/*powervs-subnet*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms
|
|
||||||
%exclude %{_mandir}/man7/*pgsqlms*
|
|
||||||
%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
|
|
||||||
|
|
||||||
###
|
|
||||||
# Moved to separate packages
|
|
||||||
###
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/SAP*
|
|
||||||
%exclude /usr/lib/ocf/lib/heartbeat/sap*
|
|
||||||
%exclude %{_mandir}/man7/*SAP*
|
|
||||||
|
|
||||||
###
|
|
||||||
# Unsupported
|
|
||||||
###
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/IPaddr
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker*
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dovecot
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dummypy
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mdraid
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/nvmet-*
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/smb-share
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd
|
|
||||||
%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_docker*.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_dovecot.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_dummypy.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_mdraid.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_nvmet-*.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_smb-share.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz
|
|
||||||
%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz
|
|
||||||
|
|
||||||
###
|
|
||||||
# Other excluded files.
|
|
||||||
###
|
|
||||||
# ldirectord is not supported
|
|
||||||
%exclude /etc/ha.d/resource.d/ldirectord
|
|
||||||
%exclude %{_sysconfdir}/rc.d/init.d/ldirectord
|
|
||||||
%exclude %{_unitdir}/ldirectord.service
|
|
||||||
%exclude /etc/logrotate.d/ldirectord
|
|
||||||
%exclude /usr/sbin/ldirectord
|
|
||||||
%exclude %{_mandir}/man8/ldirectord.8.gz
|
|
||||||
|
|
||||||
# For compatability with pre-existing agents
|
|
||||||
%dir %{_sysconfdir}/ha.d
|
|
||||||
%{_sysconfdir}/ha.d/shellfuncs
|
|
||||||
|
|
||||||
%{_libexecdir}/heartbeat
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%ifarch x86_64
|
|
||||||
%files cloud
|
|
||||||
/usr/lib/ocf/resource.d/heartbeat/aliyun-*
|
|
||||||
%{_mandir}/man7/*aliyun-*
|
|
||||||
/usr/lib/ocf/resource.d/heartbeat/aws*
|
|
||||||
%{_mandir}/man7/*aws*
|
|
||||||
/usr/lib/ocf/resource.d/heartbeat/azure-*
|
|
||||||
%{_mandir}/man7/*azure-*
|
|
||||||
/usr/lib/ocf/resource.d/heartbeat/gcp-*
|
|
||||||
%{_mandir}/man7/*gcp-*
|
|
||||||
/usr/lib/ocf/resource.d/heartbeat/powervs-subnet
|
|
||||||
%{_mandir}/man7/*powervs-subnet*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/azure-events
|
|
||||||
%exclude %{_mandir}/man7/*azure-events.7*
|
|
||||||
%exclude /usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-ip
|
|
||||||
%exclude %{_mandir}/man7/*gcp-vpc-move-ip*
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%files paf
|
|
||||||
%doc paf_README.md
|
|
||||||
%license paf_LICENSE
|
|
||||||
%defattr(-,root,root)
|
|
||||||
%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms
|
|
||||||
%{_mandir}/man7/*pgsqlms*
|
|
||||||
%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
|
|
||||||
|
|
||||||
%changelog
|
|
||||||
* Tue Nov 26 2024 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.16.0-1
|
|
||||||
- Rebase to resource-agents 4.16.0 upstream release
|
|
||||||
- AWS agents: reuse IMDS token until it expires
|
|
||||||
- awsvip: add interface parameter
|
|
||||||
- ethmonitor: add bc dependency
|
|
||||||
- build: use /usr/sbin path for nfs-utils dependencies
|
|
||||||
|
|
||||||
Resolves: RHEL-65331, RHEL-66293, RHEL-68740, RHEL-53615, RHEL-68840
|
|
||||||
|
|
||||||
* Tue Oct 29 2024 Troy Dawson <tdawson@redhat.com> - 4.15.1-1.1
|
|
||||||
- Bump release for October 2024 mass rebuild:
|
|
||||||
Resolves: RHEL-64018
|
|
||||||
|
|
||||||
* Fri Jul 26 2024 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.15.1-1
|
|
||||||
- Rebase to resource-agents 4.15.1 upstream release
|
|
||||||
- IPaddr2: change default for lvs_ipv6_addrlabel to true to avoid
|
|
||||||
last added IP becoming src IP
|
|
||||||
- powervs-subnet: new resource agent
|
|
||||||
|
|
||||||
Resolves: RHEL-50378, RHEL-46557, RHEL-50380
|
|
||||||
|
|
||||||
* Thu Jun 27 2024 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.13.0-6
|
|
||||||
- apache: prefer curl due to wget2 issues, and dont use -L for wget2
|
|
||||||
|
|
||||||
Resolves: RHEL-40720
|
|
||||||
|
|
||||||
* Mon Jun 24 2024 Troy Dawson <tdawson@redhat.com> - 4.13.0-4.1
|
|
||||||
- Bump release for June 2024 mass rebuild
|
|
||||||
|
|
||||||
* Wed Jun 12 2024 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.13.0-4
|
|
||||||
- cloud agents: set support library path
|
|
||||||
- pgsqlms: add to -paf subpackage
|
|
||||||
|
|
||||||
* Tue Jan 30 2024 Zbigniew Jedrzejewski-Szmek <zbyszek@in.waw.pl> - 4.13.0-2.3
|
|
||||||
- Replace /sbin by /usr/sbin in some paths so that the package remains
|
|
||||||
installable without full filepath metadata (rhbz#2229951)
|
|
||||||
|
|
||||||
* Fri Jan 26 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.13.0-2.2
|
|
||||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
|
|
||||||
|
|
||||||
* Mon Jan 22 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.13.0-2.1
|
|
||||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
|
|
||||||
|
|
||||||
* Wed Jan 10 2024 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.13.0-2
|
|
||||||
- configure: fix "C preprocessor "gcc -E" fails sanity check" error
|
|
||||||
with autoconf 2.72+
|
|
||||||
|
|
||||||
Resolves: rhbz#2256836
|
|
||||||
|
|
||||||
* Wed Oct 11 2023 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.13.0-1
|
|
||||||
- Rebase to resource-agents 4.13.0 upstream release.
|
|
1
sources
1
sources
@ -1 +0,0 @@
|
|||||||
SHA512 (ClusterLabs-resource-agents-56e76b01.tar.gz) = 14fded6bebcc9ada33c0c3db4fee45d6efc5d33865c52c1cf76b4485180cf9a76bae3456d84ba32635a2214ed1ee4010ca8704c844cebf4093ebc0843081919e
|
|
@ -1,26 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
err=0
|
|
||||||
|
|
||||||
agents=$(pcs resource list ocf:heartbeat 2>&1)
|
|
||||||
if [ $(echo "$agents" | wc -l) -lt 2 ]; then
|
|
||||||
echo "ERROR: pcs: agents available:\n$agents"
|
|
||||||
err=$((err+1))
|
|
||||||
else
|
|
||||||
echo "INFO: pcs: agents available..."
|
|
||||||
fi
|
|
||||||
|
|
||||||
for bin in "/usr/lib/fence-agents/support/aliyun/aliyun-cli/aliyun version"; do
|
|
||||||
output="$($bin 2>&1)"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo -e "ERROR: $bin failed:\n$output"
|
|
||||||
err=$((err+1))
|
|
||||||
else
|
|
||||||
echo "INFO: $bin works..."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $err -ge 1 ]; then
|
|
||||||
echo -e "\nERROR: $err tests FAILED..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
@ -1,15 +0,0 @@
|
|||||||
- hosts: localhost
|
|
||||||
roles:
|
|
||||||
- role: standard-test-basic
|
|
||||||
tags:
|
|
||||||
- classic
|
|
||||||
- container
|
|
||||||
required_packages:
|
|
||||||
- resource-agents-cloud
|
|
||||||
- resource-agents-paf
|
|
||||||
- pcs
|
|
||||||
|
|
||||||
tests:
|
|
||||||
- subpackage_tests:
|
|
||||||
dir: .
|
|
||||||
run: ./run_tests.sh
|
|
Loading…
Reference in New Issue
Block a user