Compare commits
No commits in common. "c9-beta" and "c8" have entirely different histories.
13
.gitignore
vendored
13
.gitignore
vendored
@ -1 +1,12 @@
|
||||
SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
|
||||
SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
SOURCES/colorama-0.3.3.tar.gz
|
||||
SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
SOURCES/httplib2-0.20.4.tar.gz
|
||||
SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
SOURCES/pyroute2-0.4.13.tar.gz
|
||||
SOURCES/urllib3-1.26.18.tar.gz
|
||||
|
@ -1 +1,12 @@
|
||||
3b517ecdbe2103df77813050e5c998e102c5de7e SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
|
||||
dfc65f4cac3f95026b2f5674019814a527333004 SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz
|
||||
81f039cf075e9c8b70d5af99c189296a9e031de3 SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
7caf4412d9473bf17352316249a8133fa70b7e37 SOURCES/httplib2-0.20.4.tar.gz
|
||||
c55d177e9484d974c95078d4ae945f89ba2c7251 SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
c8307f47e3b75a2d02af72982a2dfefa3f56e407 SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz
|
||||
84e2852d8da1655373f7ce5e7d5d3e256b62b4e4 SOURCES/urllib3-1.26.18.tar.gz
|
||||
|
25
SOURCES/10-gcloud-support-info.patch
Normal file
25
SOURCES/10-gcloud-support-info.patch
Normal file
@ -0,0 +1,25 @@
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200
|
||||
@@ -900,6 +900,9 @@
|
||||
return """\
|
||||
For detailed information on this command and its flags, run:
|
||||
{command_path} --help
|
||||
+
|
||||
+WARNING: {command_path} is only supported for "{command_path} init" and for use
|
||||
+with the agents in resource-agents.
|
||||
""".format(command_path=' '.join(command.GetPath()))
|
||||
|
||||
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200
|
||||
@@ -84,7 +84,7 @@
|
||||
|
||||
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
|
||||
loader = cli.CLILoader(
|
||||
- name='gcloud',
|
||||
+ name='gcloud-ra',
|
||||
command_root_directory=os.path.join(pkg_root, 'surface'),
|
||||
allow_non_existing_modules=True,
|
||||
version_func=VersionFunc,
|
45
SOURCES/7-gcp-bundled.patch
Normal file
45
SOURCES/7-gcp-bundled.patch
Normal file
@ -0,0 +1,45 @@
|
||||
diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||
--- a/heartbeat/gcp-pd-move.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-pd-move.in 2024-07-22 11:01:51.455543850 +0200
|
||||
@@ -32,6 +32,7 @@
|
||||
from ocf import logger
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
except ImportError:
|
||||
pass
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
|
||||
--- a/heartbeat/gcp-vpc-move-ip.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-ip.in 2024-07-22 11:01:18.010752081 +0200
|
||||
@@ -36,7 +36,7 @@
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
# Defaults
|
||||
-OCF_RESKEY_gcloud_default="/usr/bin/gcloud"
|
||||
+OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra"
|
||||
OCF_RESKEY_configuration_default="default"
|
||||
OCF_RESKEY_vpc_network_default="default"
|
||||
OCF_RESKEY_interface_default="eth0"
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||
--- a/heartbeat/gcp-vpc-move-route.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-route.in 2024-07-22 11:01:18.011752105 +0200
|
||||
@@ -45,6 +45,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
import pyroute2
|
||||
try:
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
|
||||
--- a/heartbeat/gcp-vpc-move-vip.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-vip.in 2024-07-22 11:01:18.012752128 +0200
|
||||
@@ -29,6 +29,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
try:
|
||||
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
@ -0,0 +1,129 @@
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200
|
||||
@@ -19,8 +19,14 @@
|
||||
certificates.
|
||||
"""
|
||||
|
||||
+from pyasn1.codec.der import decoder
|
||||
from pyasn1_modules import pem
|
||||
-import rsa
|
||||
+from pyasn1_modules.rfc2459 import Certificate
|
||||
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
|
||||
+from cryptography.hazmat.primitives import serialization, hashes
|
||||
+from cryptography.hazmat.primitives.asymmetric import padding
|
||||
+from cryptography import x509
|
||||
+from cryptography.hazmat.backends import default_backend
|
||||
import six
|
||||
|
||||
from oauth2client import _helpers
|
||||
@@ -40,7 +46,7 @@
|
||||
'-----END RSA PRIVATE KEY-----')
|
||||
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
|
||||
'-----END PRIVATE KEY-----')
|
||||
-_PKCS8_SPEC = None
|
||||
+_PKCS8_SPEC = PrivateKeyInfo()
|
||||
|
||||
|
||||
def _bit_list_to_bytes(bit_list):
|
||||
@@ -67,7 +73,8 @@
|
||||
"""
|
||||
|
||||
def __init__(self, pubkey):
|
||||
- self._pubkey = pubkey
|
||||
+ self._pubkey = serialization.load_pem_public_key(pubkey,
|
||||
+ backend=default_backend())
|
||||
|
||||
def verify(self, message, signature):
|
||||
"""Verifies a message against a signature.
|
||||
@@ -84,8 +91,9 @@
|
||||
"""
|
||||
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||
try:
|
||||
- return rsa.pkcs1.verify(message, signature, self._pubkey)
|
||||
- except (ValueError, rsa.pkcs1.VerificationError):
|
||||
+ return self._pubkey.verify(signature, message, padding.PKCS1v15(),
|
||||
+ hashes.SHA256())
|
||||
+ except (ValueError, TypeError, InvalidSignature):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
@@ -109,19 +117,18 @@
|
||||
"""
|
||||
key_pem = _helpers._to_bytes(key_pem)
|
||||
if is_x509_cert:
|
||||
- from pyasn1.codec.der import decoder
|
||||
- from pyasn1_modules import rfc2459
|
||||
-
|
||||
- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
|
||||
- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate())
|
||||
+ der = x509.load_pem_x509_certificate(pem_data, default_backend())
|
||||
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
|
||||
if remaining != b'':
|
||||
raise ValueError('Unused bytes', remaining)
|
||||
|
||||
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
|
||||
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
|
||||
- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
|
||||
+ pubkey = serialization.load_der_public_key(decoded_key,
|
||||
+ backend=default_backend())
|
||||
else:
|
||||
- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
|
||||
+ pubkey = serialization.load_pem_public_key(decoded_key,
|
||||
+ backend=default_backend())
|
||||
return cls(pubkey)
|
||||
|
||||
|
||||
@@ -134,6 +141,8 @@
|
||||
|
||||
def __init__(self, pkey):
|
||||
self._key = pkey
|
||||
+ self._pubkey = serialization.load_pem_private_key(pkey,
|
||||
+ backend=default_backend())
|
||||
|
||||
def sign(self, message):
|
||||
"""Signs a message.
|
||||
@@ -145,7 +154,7 @@
|
||||
string, The signature of the message for the given key.
|
||||
"""
|
||||
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||
- return rsa.pkcs1.sign(message, self._key, 'SHA-256')
|
||||
+ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256())
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, key, password='notasecret'):
|
||||
@@ -163,27 +172,24 @@
|
||||
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
|
||||
PEM format.
|
||||
"""
|
||||
- global _PKCS8_SPEC
|
||||
key = _helpers._from_bytes(key) # pem expects str in Py3
|
||||
marker_id, key_bytes = pem.readPemBlocksFromFile(
|
||||
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
|
||||
|
||||
if marker_id == 0:
|
||||
- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
|
||||
- format='DER')
|
||||
- elif marker_id == 1:
|
||||
- from pyasn1.codec.der import decoder
|
||||
- from pyasn1_modules import rfc5208
|
||||
+ pkey = serialization.load_der_private_key(
|
||||
+ key_bytes, password=None,
|
||||
+ backend=default_backend())
|
||||
|
||||
- if _PKCS8_SPEC is None:
|
||||
- _PKCS8_SPEC = rfc5208.PrivateKeyInfo()
|
||||
+ elif marker_id == 1:
|
||||
key_info, remaining = decoder.decode(
|
||||
key_bytes, asn1Spec=_PKCS8_SPEC)
|
||||
if remaining != b'':
|
||||
raise ValueError('Unused bytes', remaining)
|
||||
pkey_info = key_info.getComponentByName('privateKey')
|
||||
- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
|
||||
- format='DER')
|
||||
+ pkey = serialization.load_der_private_key(
|
||||
+ pkey_info.asOctets(), password=None,
|
||||
+ backend=default_backend())
|
||||
else:
|
||||
raise ValueError('No key could be detected.')
|
||||
|
@ -1,71 +0,0 @@
|
||||
From 54fa7a59c36697cd8df5b619fff0b50af00df76e Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:35:52 +0100
|
||||
Subject: [PATCH 1/2] storage_mon: fix file handler out of scope leak and
|
||||
uninitialized values
|
||||
|
||||
---
|
||||
tools/storage_mon.c | 11 +++++++++--
|
||||
1 file changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||
index 1aae29e58..cc415e97f 100644
|
||||
--- a/tools/storage_mon.c
|
||||
+++ b/tools/storage_mon.c
|
||||
@@ -382,9 +382,11 @@ static int write_pid_file(const char *pidfile)
|
||||
syslog(LOG_ERR, "Failed to write '%s' to %s: %s", pid, pidfile, strerror(errno));
|
||||
goto done;
|
||||
}
|
||||
- close(fd);
|
||||
rc = 0;
|
||||
done:
|
||||
+ if (fd != -1) {
|
||||
+ close(fd);
|
||||
+ }
|
||||
if (pid != NULL) {
|
||||
free(pid);
|
||||
}
|
||||
@@ -663,6 +665,7 @@ storage_mon_client(void)
|
||||
snprintf(request.message, SMON_MAX_MSGSIZE, "%s", SMON_GET_RESULT_COMMAND);
|
||||
request.hdr.id = 0;
|
||||
request.hdr.size = sizeof(struct storage_mon_check_value_req);
|
||||
+ response.hdr.id = 0;
|
||||
rc = qb_ipcc_send(conn, &request, request.hdr.size);
|
||||
if (rc < 0) {
|
||||
syslog(LOG_ERR, "qb_ipcc_send error : %d\n", rc);
|
||||
@@ -683,7 +686,11 @@ storage_mon_client(void)
|
||||
/* greater than 0 : monitoring error. */
|
||||
/* -1 : communication system error. */
|
||||
/* -2 : Not all checks completed for first device in daemon mode. */
|
||||
- rc = atoi(response.message);
|
||||
+ if (strnlen(response.message, 1)) {
|
||||
+ rc = atoi(response.message);
|
||||
+ } else {
|
||||
+ rc = -1;
|
||||
+ }
|
||||
|
||||
syslog(LOG_DEBUG, "daemon response[%d]: %s \n", response.hdr.id, response.message);
|
||||
|
||||
|
||||
From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:37:37 +0100
|
||||
Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS
|
||||
|
||||
---
|
||||
tools/findif.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/findif.c b/tools/findif.c
|
||||
index a25395fec..ab108a3c4 100644
|
||||
--- a/tools/findif.c
|
||||
+++ b/tools/findif.c
|
||||
@@ -669,7 +669,7 @@ main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
- if (netmaskbits) {
|
||||
+ if (netmaskbits != NULL && *netmaskbits != EOS) {
|
||||
best_netmask = netmask;
|
||||
}else if (best_netmask == 0L) {
|
||||
/*
|
@ -1,23 +0,0 @@
|
||||
From cb968378959b8aa334e98daf62a1b08ef6525fb4 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 22 Nov 2023 10:32:31 +0100
|
||||
Subject: [PATCH] storage_mon: use memset() to fix "uninitialized value"
|
||||
covscan error, as qb_ipcc_recv() will always set a message (according to
|
||||
honzaf)
|
||||
|
||||
---
|
||||
tools/storage_mon.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||
index cc415e97f..a9227ef90 100644
|
||||
--- a/tools/storage_mon.c
|
||||
+++ b/tools/storage_mon.c
|
||||
@@ -655,6 +655,7 @@ storage_mon_client(void)
|
||||
int32_t rc;
|
||||
|
||||
|
||||
+ memset(&response, 0, sizeof(response));
|
||||
snprintf(ipcs_name, SMON_MAX_IPCSNAME, "storage_mon_%s", attrname);
|
||||
conn = qb_ipcc_connect(ipcs_name, 0);
|
||||
if (conn == NULL) {
|
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
@ -0,0 +1,22 @@
|
||||
From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:37:37 +0100
|
||||
Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS
|
||||
|
||||
---
|
||||
tools/findif.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/findif.c b/tools/findif.c
|
||||
index a25395fec..ab108a3c4 100644
|
||||
--- a/tools/findif.c
|
||||
+++ b/tools/findif.c
|
||||
@@ -669,7 +669,7 @@ main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
- if (netmaskbits) {
|
||||
+ if (netmaskbits != NULL && *netmaskbits != EOS) {
|
||||
best_netmask = netmask;
|
||||
}else if (best_netmask == 0L) {
|
||||
/*
|
@ -1,46 +0,0 @@
|
||||
From 65a066cf9066390db65c4875e21c4c391793b9ae Mon Sep 17 00:00:00 2001
|
||||
From: Arslan Ahmad <arslan.ahmad97@googlemail.com>
|
||||
Date: Tue, 16 Jan 2024 09:11:17 +0530
|
||||
Subject: [PATCH] Avoid false positive for VG activation
|
||||
|
||||
When lvm.conf file has `volume_list` parameter configured and the
|
||||
cluster is managing the shared storage using `system_id_source`,
|
||||
then the activation of the LV fails to happen. However it is
|
||||
reported as a success.
|
||||
|
||||
The fixes will avoid starting of `LVM-activate` resource when
|
||||
the cluster is configured with both `system_id_source` and
|
||||
`volume_list`.
|
||||
|
||||
Signed-off-by: Arslan Ahmad <arslan.ahmad97@googlemail.com>
|
||||
---
|
||||
heartbeat/LVM-activate | 9 +++++++++
|
||||
1 file changed, 9 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||
index f6f24a3b5..3858ed8dc 100755
|
||||
--- a/heartbeat/LVM-activate
|
||||
+++ b/heartbeat/LVM-activate
|
||||
@@ -448,6 +448,10 @@ systemid_check()
|
||||
{
|
||||
# system_id_source is set in lvm.conf
|
||||
source=$(lvmconfig 'global/system_id_source' 2>/dev/null | cut -d"=" -f2)
|
||||
+
|
||||
+ # Is volume_list set in lvm.conf
|
||||
+ vol_list=$(lvmconfig 'activation/volume_list' 2>/dev/null | cut -d"=" -f2)
|
||||
+
|
||||
if [ "$source" = "" ] || [ "$source" = "none" ]; then
|
||||
ocf_exit_reason "system_id_source in lvm.conf is not set correctly!"
|
||||
exit $OCF_ERR_ARGS
|
||||
@@ -458,6 +462,11 @@ systemid_check()
|
||||
exit $OCF_ERR_ARGS
|
||||
fi
|
||||
|
||||
+ if [ -n "$source" ] && [ -n "$vol_list" ]; then
|
||||
+ ocf_exit_reason "Both system_id_source & volume_list cannot be defined!"
|
||||
+ exit $OCF_ERR_ARGS
|
||||
+ fi
|
||||
+
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 264e38e02cb4c04877e412bac254e42c7f6b2e1c Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 20 Feb 2024 12:34:42 +0100
|
||||
Subject: [PATCH] Filesystem: fail when leading or trailing whitespace is
|
||||
present in device or directory parameters
|
||||
|
||||
---
|
||||
heartbeat/Filesystem | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index e1378f781..f88e3b552 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -995,6 +995,12 @@ if [ -n "${OCF_RESKEY_force_unmount}" ]; then
|
||||
fi
|
||||
|
||||
DEVICE="$OCF_RESKEY_device"
|
||||
+case "$DEVICE" in
|
||||
+ [[:space:]]*|*[[:space:]])
|
||||
+ ocf_exit_reason "device parameter does not accept leading or trailing whitespace characters"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ;;
|
||||
+esac
|
||||
FSTYPE=$OCF_RESKEY_fstype
|
||||
if [ ! -z "$OCF_RESKEY_options" ]; then
|
||||
options="-o $OCF_RESKEY_options"
|
||||
@@ -1032,6 +1038,12 @@ if [ -z "$OCF_RESKEY_directory" ]; then
|
||||
else
|
||||
MOUNTPOINT="$(echo "$OCF_RESKEY_directory" | sed 's/\/*$//')"
|
||||
: ${MOUNTPOINT:=/}
|
||||
+ case "$MOUNTPOINT" in
|
||||
+ [[:space:]]*|*[[:space:]])
|
||||
+ ocf_exit_reason "directory parameter does not accept leading or trailing whitespace characters"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ;;
|
||||
+ esac
|
||||
if [ -e "$MOUNTPOINT" ] ; then
|
||||
CANONICALIZED_MOUNTPOINT="$(readlink -f "$MOUNTPOINT")"
|
||||
if [ $? -ne 0 ]; then
|
@ -1,30 +0,0 @@
|
||||
From 1317efc72af6b72d9fb37aea18dc16129c146148 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 25 Jun 2024 13:33:19 +0200
|
||||
Subject: [PATCH] Filesystem: return success during stop-action when leading or
|
||||
trailing whitespace is present in device or directory parameters
|
||||
|
||||
---
|
||||
heartbeat/Filesystem | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index 8e0127531..3eb520e0c 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -1037,6 +1037,7 @@ fi
|
||||
DEVICE="$OCF_RESKEY_device"
|
||||
case "$DEVICE" in
|
||||
[[:space:]]*|*[[:space:]])
|
||||
+ [ "$__OCF_ACTION" = "stop" ] && exit $OCF_SUCCESS
|
||||
ocf_exit_reason "device parameter does not accept leading or trailing whitespace characters"
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
;;
|
||||
@@ -1080,6 +1081,7 @@ else
|
||||
: ${MOUNTPOINT:=/}
|
||||
case "$MOUNTPOINT" in
|
||||
[[:space:]]*|*[[:space:]])
|
||||
+ [ "$__OCF_ACTION" = "stop" ] && exit $OCF_SUCCESS
|
||||
ocf_exit_reason "directory parameter does not accept leading or trailing whitespace characters"
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
;;
|
@ -1,75 +0,0 @@
|
||||
From 4357f0dbb8668ac4090cd7070c2ea195e5683326 Mon Sep 17 00:00:00 2001
|
||||
From: Damien Ciabrini <dciabrin@redhat.com>
|
||||
Date: Wed, 24 Jan 2024 13:27:26 +0100
|
||||
Subject: [PATCH] galera: allow joiner to report non-Primary during initial IST
|
||||
|
||||
It seems that with recent galera versions, when a galera node
|
||||
joins a cluster, there is a small time window where the node is
|
||||
connected to the primary component of the galera cluster, but it
|
||||
might still be preparing its IST. During this time, it can report
|
||||
itself as being 'not ready' and in 'non-primary' state.
|
||||
|
||||
Update the galera resource agent to allow the node to be in
|
||||
non-primary state, but only if running a "promote" operation. Any
|
||||
network partition during the promotion will be caught by the
|
||||
promote timeout.
|
||||
|
||||
In reworking the promotion code, we move the check for primary
|
||||
partition into the "galera_monitor" function. The check works
|
||||
as before for regular "monitor" or "probe" operations.
|
||||
|
||||
Related-Bug: rhbz#2255414
|
||||
---
|
||||
heartbeat/galera.in | 25 +++++++++++++++++--------
|
||||
1 file changed, 17 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/galera.in b/heartbeat/galera.in
|
||||
index 6aed3e4b6d..b518595cb0 100755
|
||||
--- a/heartbeat/galera.in
|
||||
+++ b/heartbeat/galera.in
|
||||
@@ -822,6 +822,11 @@ galera_promote()
|
||||
return $rc
|
||||
fi
|
||||
|
||||
+ # At this point, the mysql pidfile is created on disk and the
|
||||
+ # mysql server is reacheable via its UNIX socket. If we are a
|
||||
+ # joiner, SST transfers (rsync) have finished, but an IST may
|
||||
+ # still be requested or ongoing
|
||||
+
|
||||
galera_monitor
|
||||
rc=$?
|
||||
if [ $rc != $OCF_SUCCESS -a $rc != $OCF_RUNNING_MASTER ]; then
|
||||
@@ -835,12 +840,6 @@ galera_promote()
|
||||
return $OCF_ERR_GENERIC
|
||||
fi
|
||||
|
||||
- is_primary
|
||||
- if [ $? -ne 0 ]; then
|
||||
- ocf_exit_reason "Failure. Master instance started, but is not in Primary mode."
|
||||
- return $OCF_ERR_GENERIC
|
||||
- fi
|
||||
-
|
||||
if ocf_is_true $bootstrap; then
|
||||
promote_everyone
|
||||
clear_bootstrap_node
|
||||
@@ -991,8 +990,18 @@ galera_monitor()
|
||||
fi
|
||||
rc=$OCF_RUNNING_MASTER
|
||||
else
|
||||
- ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state."
|
||||
- rc=$OCF_ERR_GENERIC
|
||||
+ # It seems that with recent galera (26.4+), a joiner that is
|
||||
+ # connected to a Primary component and is preparing its IST
|
||||
+ # request might still temporarily report its state as
|
||||
+ # Non-Primary. Do not fail in this case as the promote
|
||||
+ # operation will loop until the IST finishes or the promote
|
||||
+ # times out.
|
||||
+ if [ "$__OCF_ACTION" = "promote" ] && ! ocf_is_true $(is_bootstrap); then
|
||||
+ ocf_log info "local node <${NODENAME}> is receiving a State Transfer."
|
||||
+ else
|
||||
+ ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state."
|
||||
+ rc=$OCF_ERR_GENERIC
|
||||
+ fi
|
||||
fi
|
||||
|
||||
return $rc
|
@ -1,25 +0,0 @@
|
||||
From f717b4a3aa83c9124e62716f421b99e314d00233 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Fri, 12 Apr 2024 12:23:21 +0200
|
||||
Subject: [PATCH] findif.sh: fix corner cases
|
||||
|
||||
---
|
||||
heartbeat/findif.sh | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||
index 7c766e6e0..13484f827 100644
|
||||
--- a/heartbeat/findif.sh
|
||||
+++ b/heartbeat/findif.sh
|
||||
@@ -215,9 +215,9 @@ findif()
|
||||
fi
|
||||
if [ -n "$nic" ] ; then
|
||||
# NIC supports more than two.
|
||||
- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||
+ set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
else
|
||||
- set -- $(ip -o -f $family route list match $match $scope | awk 'BEGIN{best=0} /\// { mask=$1; sub(".*/", "", mask); if( int(mask)>=best ) { best=int(mask); best_ln=$0; } } END{print best_ln}')
|
||||
+ set -- $(ip -o -f $family route list match $match $scope | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
fi
|
||||
if [ $# = 0 ] ; then
|
||||
case $OCF_RESKEY_ip in
|
@ -1,365 +0,0 @@
|
||||
From 12d73d53026d219be67c0d5353010ba08ab49e98 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 28 May 2024 09:45:55 +0200
|
||||
Subject: [PATCH 1/3] findif.sh: add metric for IPv6 support and fail when
|
||||
matching more than 1 route
|
||||
|
||||
---
|
||||
heartbeat/findif.sh | 19 ++++++++++++++++---
|
||||
1 file changed, 16 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||
index 13484f827..ca5d1a5c1 100644
|
||||
--- a/heartbeat/findif.sh
|
||||
+++ b/heartbeat/findif.sh
|
||||
@@ -196,10 +196,13 @@ findif()
|
||||
{
|
||||
local match="$OCF_RESKEY_ip"
|
||||
local family
|
||||
+ local proto
|
||||
local scope
|
||||
local nic="$OCF_RESKEY_nic"
|
||||
local netmask="$OCF_RESKEY_cidr_netmask"
|
||||
local brdcast="$OCF_RESKEY_broadcast"
|
||||
+ local metric
|
||||
+ local routematch
|
||||
|
||||
echo $match | grep -qs ":"
|
||||
if [ $? = 0 ] ; then
|
||||
@@ -215,10 +218,19 @@ findif()
|
||||
fi
|
||||
if [ -n "$nic" ] ; then
|
||||
# NIC supports more than two.
|
||||
- set -- $(ip -o -f $family route list match $match $scope | grep "dev $nic " | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
+ routematch=$(ip -o -f $family route list match $match $proto $scope | grep "dev $nic " | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
else
|
||||
- set -- $(ip -o -f $family route list match $match $scope | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
+ routematch=$(ip -o -f $family route list match $match $proto $scope | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
fi
|
||||
+ if [ "$family" = "inet6" ]; then
|
||||
+ routematch=$(echo "$routematch" | grep -v "^default")
|
||||
+ fi
|
||||
+
|
||||
+ if [ $(echo "$routematch" | wc -l) -gt 1 ]; then
|
||||
+ ocf_exit_reason "More than 1 routes match $match. Unable to decide which route to use."
|
||||
+ return $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+ set -- $routematch
|
||||
if [ $# = 0 ] ; then
|
||||
case $OCF_RESKEY_ip in
|
||||
127.*)
|
||||
@@ -255,6 +267,7 @@ findif()
|
||||
return $OCF_ERR_GENERIC
|
||||
fi
|
||||
fi
|
||||
- echo "$nic netmask $netmask broadcast $brdcast"
|
||||
+ metric=$(echo "$@" | sed "s/.*metric[[:blank:]]\([^ ]\+\).*/\1/")
|
||||
+ echo "$nic netmask $netmask broadcast $brdcast metric $metric"
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
From 488c096d63fe0f7e15938e65483ba20628080198 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 28 May 2024 09:47:11 +0200
|
||||
Subject: [PATCH 2/3] IPaddr2: use metric for IPv6
|
||||
|
||||
---
|
||||
heartbeat/IPaddr2 | 11 ++++++++---
|
||||
1 file changed, 8 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 5f30b8f98..091bea418 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -561,10 +561,11 @@ ip_init() {
|
||||
if
|
||||
[ $rc -eq 0 ]
|
||||
then
|
||||
- NICINFO=`echo "$NICINFO" | sed -e 's/netmask\ //;s/broadcast\ //'`
|
||||
+ NICINFO=`echo "$NICINFO" | sed -e 's/netmask\ //;s/broadcast\ //;s/metric\ //'`
|
||||
NIC=`echo "$NICINFO" | cut -d" " -f1`
|
||||
NETMASK=`echo "$NICINFO" | cut -d" " -f2`
|
||||
BRDCAST=`echo "$NICINFO" | cut -d" " -f3`
|
||||
+ METRIC=`echo "$NICINFO" | cut -d" " -f4`
|
||||
else
|
||||
# findif couldn't find the interface
|
||||
if ocf_is_probe; then
|
||||
@@ -659,13 +660,14 @@ delete_interface () {
|
||||
# Add an interface
|
||||
#
|
||||
add_interface () {
|
||||
- local cmd msg extra_opts ipaddr netmask broadcast iface label
|
||||
+ local cmd msg extra_opts ipaddr netmask broadcast iface label metric
|
||||
|
||||
ipaddr="$1"
|
||||
netmask="$2"
|
||||
broadcast="$3"
|
||||
iface="$4"
|
||||
label="$5"
|
||||
+ metric="$6"
|
||||
|
||||
if [ "$FAMILY" = "inet" ] && ocf_is_true $OCF_RESKEY_run_arping &&
|
||||
check_binary arping; then
|
||||
@@ -688,6 +690,9 @@ add_interface () {
|
||||
fi
|
||||
|
||||
extra_opts=""
|
||||
+ if [ "$FAMILY" = "inet6" ]; then
|
||||
+ extra_opts="$extra_opts metric $metric"
|
||||
+ fi
|
||||
if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
||||
extra_opts="$extra_opts nodad"
|
||||
fi
|
||||
@@ -1083,7 +1088,7 @@ ip_start() {
|
||||
done
|
||||
fi
|
||||
|
||||
- add_interface $OCF_RESKEY_ip $NETMASK ${BRDCAST:-none} $NIC $IFLABEL
|
||||
+ add_interface "$OCF_RESKEY_ip" "$NETMASK" "${BRDCAST:-none}" "$NIC" "$IFLABEL" "$METRIC"
|
||||
rc=$?
|
||||
|
||||
if [ $rc -ne $OCF_SUCCESS ]; then
|
||||
|
||||
From d1c4d1969381d3e35cfaaaaae522e5687a9ed88a Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 28 May 2024 09:47:56 +0200
|
||||
Subject: [PATCH 3/3] IPsrcaddr: add IPv6 support
|
||||
|
||||
---
|
||||
heartbeat/IPsrcaddr | 116 ++++++++++++++++++++++++++++++++------------
|
||||
1 file changed, 85 insertions(+), 31 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||
index c732ce8df..1c87d5b7f 100755
|
||||
--- a/heartbeat/IPsrcaddr
|
||||
+++ b/heartbeat/IPsrcaddr
|
||||
@@ -60,6 +60,7 @@ OCF_RESKEY_cidr_netmask_default=""
|
||||
OCF_RESKEY_destination_default="0.0.0.0/0"
|
||||
OCF_RESKEY_proto_default=""
|
||||
OCF_RESKEY_metric_default=""
|
||||
+OCF_RESKEY_pref_default=""
|
||||
OCF_RESKEY_table_default=""
|
||||
|
||||
: ${OCF_RESKEY_ipaddress=${OCF_RESKEY_ipaddress_default}}
|
||||
@@ -67,6 +68,7 @@ OCF_RESKEY_table_default=""
|
||||
: ${OCF_RESKEY_destination=${OCF_RESKEY_destination_default}}
|
||||
: ${OCF_RESKEY_proto=${OCF_RESKEY_proto_default}}
|
||||
: ${OCF_RESKEY_metric=${OCF_RESKEY_metric_default}}
|
||||
+: ${OCF_RESKEY_pref=${OCF_RESKEY_pref_default}}
|
||||
: ${OCF_RESKEY_table=${OCF_RESKEY_table_default}}
|
||||
#######################################################################
|
||||
|
||||
@@ -75,10 +77,13 @@ OCF_RESKEY_table_default=""
|
||||
|
||||
USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}";
|
||||
|
||||
- CMDSHOW="$IP2UTIL route show $TABLE to exact $OCF_RESKEY_destination"
|
||||
-CMDCHANGE="$IP2UTIL route change to "
|
||||
+echo "$OCF_RESKEY_ipaddress" | grep -q ":" && FAMILY="inet6" || FAMILY="inet"
|
||||
+[ "$FAMILY" = "inet6" ] && [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && OCF_RESKEY_destination="::/0"
|
||||
|
||||
-if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then
|
||||
+ CMDSHOW="$IP2UTIL -f $FAMILY route show $TABLE to exact $OCF_RESKEY_destination"
|
||||
+CMDCHANGE="$IP2UTIL -f $FAMILY route change to "
|
||||
+
|
||||
+if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] && [ "$OCF_RESKEY_destination" != "::/0" ]; then
|
||||
CMDSHOW="$CMDSHOW src $OCF_RESKEY_ipaddress"
|
||||
fi
|
||||
|
||||
@@ -153,6 +158,14 @@ Metric. Only needed if incorrect metric value is used.
|
||||
<content type="string" default="${OCF_RESKEY_metric_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="pref">
|
||||
+<longdesc lang="en">
|
||||
+IPv6 route preference (low, medium or high). Only needed if incorrect pref value is used.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">IPv6 route preference.</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_pref_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="table">
|
||||
<longdesc lang="en">
|
||||
Table to modify and use for interface lookup. E.g. "local".
|
||||
@@ -196,12 +209,21 @@ errorexit() {
|
||||
# where the src clause "src Y.Y.Y.Y" may or may not be present
|
||||
|
||||
WS="[[:blank:]]"
|
||||
-OCTET="[0-9]\{1,3\}"
|
||||
-IPADDR="\($OCTET\.\)\{3\}$OCTET"
|
||||
+case "$FAMILY" in
|
||||
+ inet)
|
||||
+ GROUP="[0-9]\{1,3\}"
|
||||
+ IPADDR="\($GROUP\.\)\{3\}$GROUP"
|
||||
+ ;;
|
||||
+ inet6)
|
||||
+ GROUP="[0-9a-f]\{0,4\}"
|
||||
+ IPADDR="\($GROUP\:\)\{0,\}$GROUP"
|
||||
+ ;;
|
||||
+esac
|
||||
SRCCLAUSE="src$WS$WS*\($IPADDR\)"
|
||||
-MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
|
||||
-METRICCLAUSE=".*\(metric$WS[^ ]\+\)"
|
||||
+MATCHROUTE="\(.*${WS}\)proto [^ ]\+\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
|
||||
+METRICCLAUSE=".*\(metric$WS[^ ]\+\).*"
|
||||
PROTOCLAUSE=".*\(proto$WS[^ ]\+\).*"
|
||||
+PREFCLAUSE=".*\(pref$WS[^ ]\+\).*"
|
||||
FINDIF=findif
|
||||
|
||||
# findif needs that to be set
|
||||
@@ -216,17 +238,17 @@ srca_read() {
|
||||
errorexit "more than 1 matching route exists"
|
||||
|
||||
# But there might still be no matching route
|
||||
- [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \
|
||||
+ ([ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ]) && [ -z "$ROUTE" ] && \
|
||||
! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists"
|
||||
|
||||
# Sed out the source ip address if it exists
|
||||
- SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"`
|
||||
+ SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\4/p"`
|
||||
|
||||
# and what remains after stripping out the source ip address clause
|
||||
- ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\5/"`
|
||||
+ ROUTE_WO_SRC=`echo $ROUTE | sed "s/$MATCHROUTE/\1\2\6/"`
|
||||
|
||||
# using "src <ip>" only returns output if there's a match
|
||||
- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ]; then
|
||||
+ if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] && [ "$OCF_RESKEY_destination" != "::/0" ]; then
|
||||
[ -z "$ROUTE" ] && return 1 || return 0
|
||||
fi
|
||||
|
||||
@@ -249,12 +271,15 @@ srca_start() {
|
||||
rc=$OCF_SUCCESS
|
||||
ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)"
|
||||
else
|
||||
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \
|
||||
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed"
|
||||
+ # NetworkManager manages routes with proto static/kernel
|
||||
+ [ -z "$OCF_RESKEY_proto" ] && echo "$PROTO" | grep -q "proto \(kernel\|static\)" && PROTO="proto keepalived"
|
||||
|
||||
- if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||
- $CMDCHANGE $ROUTE_WO_SRC src $1 || \
|
||||
- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $1' failed"
|
||||
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC $PREF || \
|
||||
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC $PREF' failed"
|
||||
+
|
||||
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ]; then
|
||||
+ $CMDCHANGE $ROUTE_WO_SRC $PROTO src $1 || \
|
||||
+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC $PROTO src $1' failed"
|
||||
fi
|
||||
rc=$?
|
||||
fi
|
||||
@@ -290,14 +315,15 @@ srca_stop() {
|
||||
fi
|
||||
|
||||
PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
|
||||
- OPTS="proto kernel scope $SCOPE src $PRIMARY_IP"
|
||||
+ OPTS="proto kernel scope $SCOPE"
|
||||
+ [ "$FAMILY" = "inet" ] && OPTS="$OPTS src $PRIMARY_IP"
|
||||
|
||||
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \
|
||||
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
|
||||
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC $PREF || \
|
||||
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC $PREF' failed"
|
||||
|
||||
- if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||
- $CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP || \
|
||||
- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC src $PRIMARY_IP' failed"
|
||||
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ]; then
|
||||
+ $CMDCHANGE $ROUTE_WO_SRC proto static || \
|
||||
+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC proto static' failed"
|
||||
fi
|
||||
|
||||
return $?
|
||||
@@ -330,7 +356,7 @@ CheckIP() {
|
||||
case $ip in
|
||||
*[!0-9.]*) #got invalid char
|
||||
false;;
|
||||
- .*|*.) #begin or end by ".", which is invalid
|
||||
+ .*|*.) #begin or end with ".", which is invalid
|
||||
false;;
|
||||
*..*) #consecutive ".", which is invalid
|
||||
false;;
|
||||
@@ -356,6 +382,18 @@ CheckIP() {
|
||||
return $? # This return is unnecessary, this comment too :)
|
||||
}
|
||||
|
||||
+CheckIP6() {
|
||||
+ ip="$1"
|
||||
+ case $ip in
|
||||
+ *[!0-9a-f:]*) #got invalid char
|
||||
+ false;;
|
||||
+ *:::*) # more than 2 consecutive ":", which is invalid
|
||||
+ false;;
|
||||
+ *::*::*) # more than 1 "::", which is invalid
|
||||
+ false;;
|
||||
+ esac
|
||||
+}
|
||||
+
|
||||
#
|
||||
# Find out which interface or alias serves the given IP address
|
||||
# The argument is an IP address, and its output
|
||||
@@ -396,8 +434,7 @@ find_interface_solaris() {
|
||||
# is an (aliased) interface name (e.g., "eth0" and "eth0:0").
|
||||
#
|
||||
find_interface_generic() {
|
||||
-
|
||||
- local iface=`$IP2UTIL -o -f inet addr show | grep "\ $BASEIP" \
|
||||
+ local iface=`$IP2UTIL -o -f $FAMILY addr show | grep "\ $BASEIP" \
|
||||
| cut -d ' ' -f2 | grep -v '^ipsec[0-9][0-9]*$'`
|
||||
if [ -z "$iface" ]; then
|
||||
return $OCF_ERR_GENERIC
|
||||
@@ -502,7 +539,9 @@ srca_validate_all() {
|
||||
|
||||
# The IP address should be in good shape
|
||||
if CheckIP "$ipaddress"; then
|
||||
- :
|
||||
+ :
|
||||
+ elif CheckIP6 "$ipaddress"; then
|
||||
+ :
|
||||
else
|
||||
ocf_exit_reason "Invalid IP address [$ipaddress]"
|
||||
return $OCF_ERR_CONFIGURED
|
||||
@@ -570,21 +609,36 @@ rc=$?
|
||||
}
|
||||
|
||||
INTERFACE=`echo $findif_out | awk '{print $1}'`
|
||||
-LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress`
|
||||
+case "$FAMILY" in
|
||||
+ inet)
|
||||
+ LISTCMD="$IP2UTIL -f $FAMILY route list dev $INTERFACE scope link $PROTO match $ipaddress"
|
||||
+ ;;
|
||||
+ inet6)
|
||||
+ LISTCMD="$IP2UTIL -f $FAMILY route list dev $INTERFACE $PROTO match $ipaddress"
|
||||
+ ;;
|
||||
+esac
|
||||
+LISTROUTE=`$LISTCMD`
|
||||
+
|
||||
[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"`
|
||||
if [ -n "$OCF_RESKEY_metric" ]; then
|
||||
METRIC="metric $OCF_RESKEY_metric"
|
||||
-elif [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ]; then
|
||||
+elif [ -z "$TABLE" ] || [ "${TABLE#table }" = "main" ] || [ "$FAMILY" = "inet6" ]; then
|
||||
METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
|
||||
else
|
||||
METRIC=""
|
||||
fi
|
||||
-if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
|
||||
+if [ "$FAMILY" = "inet6" ]; then
|
||||
+ if [ -z "$OCF_RESKEY_pref" ]; then
|
||||
+ PREF=`echo $LISTROUTE | sed -n "s/$PREFCLAUSE/\1/p"`
|
||||
+ else
|
||||
+ PREF="pref $OCF_RESKEY_pref"
|
||||
+ fi
|
||||
+fi
|
||||
+if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ] ;then
|
||||
NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'`
|
||||
|
||||
if [ -z "$NETWORK" ]; then
|
||||
- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
|
||||
- err_str="$err_str match $ipaddress' failed to find a matching route"
|
||||
+ err_str="command '$LISTCMD' failed to find a matching route"
|
||||
|
||||
if [ "$__OCF_ACTION" = "start" ]; then
|
||||
ocf_exit_reason "$err_str"
|
@ -1,22 +0,0 @@
|
||||
From 4075aff88776e2811ebc83b735b2a70bcf46247f Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 24 Jun 2024 09:45:29 +0200
|
||||
Subject: [PATCH] IPaddr2: only set metric value for IPv6 when detected
|
||||
|
||||
---
|
||||
heartbeat/IPaddr2 | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 091bea418..3bc5abec1 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -690,7 +690,7 @@ add_interface () {
|
||||
fi
|
||||
|
||||
extra_opts=""
|
||||
- if [ "$FAMILY" = "inet6" ]; then
|
||||
+ if [ "$FAMILY" = "inet6" ] && [ -n "$metric" ]; then
|
||||
extra_opts="$extra_opts metric $metric"
|
||||
fi
|
||||
if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
@ -1,25 +0,0 @@
|
||||
From f561e272e9b7fe94ba598b70c6d2f44d034446ed Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 14 Aug 2024 12:05:54 +0200
|
||||
Subject: [PATCH] findif.sh: ignore unreachable, blackhole, and prohibit routes
|
||||
|
||||
---
|
||||
heartbeat/findif.sh | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
|
||||
index ca5d1a5c1..7b817f75c 100644
|
||||
--- a/heartbeat/findif.sh
|
||||
+++ b/heartbeat/findif.sh
|
||||
@@ -218,9 +218,9 @@ findif()
|
||||
fi
|
||||
if [ -n "$nic" ] ; then
|
||||
# NIC supports more than two.
|
||||
- routematch=$(ip -o -f $family route list match $match $proto $scope | grep "dev $nic " | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
+ routematch=$(ip -o -f $family route list match $match $proto $scope | grep -v "^\(unreachable\|prohibit\|blackhole\)" | grep "dev $nic " | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
else
|
||||
- routematch=$(ip -o -f $family route list match $match $proto $scope | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
+ routematch=$(ip -o -f $family route list match $match $proto $scope | grep -v "^\(unreachable\|prohibit\|blackhole\)" | sed -e 's,^\([0-9.]\+\) ,\1/32 ,;s,^\([0-9a-f:]\+\) ,\1/128 ,' | sort -t/ -k2,2nr)
|
||||
fi
|
||||
if [ "$family" = "inet6" ]; then
|
||||
routematch=$(echo "$routematch" | grep -v "^default")
|
@ -1,36 +0,0 @@
|
||||
From f23ae9c1e9ff9a44a053c7c2378975ac5b807478 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 29 Aug 2024 16:24:02 +0200
|
||||
Subject: [PATCH] IPsrcaddr: specify dev for default route, as e.g. fe80::
|
||||
routes can be present on multiple interfaces
|
||||
|
||||
---
|
||||
heartbeat/IPsrcaddr | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
|
||||
index 1c87d5b7f..58d89a280 100755
|
||||
--- a/heartbeat/IPsrcaddr
|
||||
+++ b/heartbeat/IPsrcaddr
|
||||
@@ -278,8 +278,8 @@ srca_start() {
|
||||
errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC $PREF' failed"
|
||||
|
||||
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ]; then
|
||||
- $CMDCHANGE $ROUTE_WO_SRC $PROTO src $1 || \
|
||||
- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC $PROTO src $1' failed"
|
||||
+ $CMDCHANGE $ROUTE_WO_SRC dev $INTERFACE $PROTO src $1 || \
|
||||
+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC dev $INTERFACE $PROTO src $1' failed"
|
||||
fi
|
||||
rc=$?
|
||||
fi
|
||||
@@ -322,8 +322,8 @@ srca_stop() {
|
||||
errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC $PREF' failed"
|
||||
|
||||
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] || [ "$OCF_RESKEY_destination" = "::/0" ]; then
|
||||
- $CMDCHANGE $ROUTE_WO_SRC proto static || \
|
||||
- errorexit "command '$CMDCHANGE $ROUTE_WO_SRC proto static' failed"
|
||||
+ $CMDCHANGE $ROUTE_WO_SRC dev $INTERFACE proto static || \
|
||||
+ errorexit "command '$CMDCHANGE $ROUTE_WO_SRC dev $INTERFACE proto static' failed"
|
||||
fi
|
||||
|
||||
return $?
|
@ -1,110 +0,0 @@
|
||||
From 66a5308d2e8f61093716a076f4386416dc18045c Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 22 Apr 2024 11:26:09 +0200
|
||||
Subject: [PATCH] Filesystem: fail when incorrect device mounted on mountpoint,
|
||||
and dont unmount the mountpoint in this case, or if mountpoint set to "/"
|
||||
|
||||
---
|
||||
heartbeat/Filesystem | 71 ++++++++++++++++++++++++++++++++++++--------
|
||||
1 file changed, 58 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index e1378f781..cec71f1a6 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -582,10 +582,16 @@ Filesystem_start()
|
||||
fi
|
||||
|
||||
# See if the device is already mounted.
|
||||
- if Filesystem_status >/dev/null 2>&1 ; then
|
||||
- ocf_log info "Filesystem $MOUNTPOINT is already mounted."
|
||||
- return $OCF_SUCCESS
|
||||
- fi
|
||||
+ Filesystem_status
|
||||
+ case "$?" in
|
||||
+ $OCF_SUCCESS)
|
||||
+ ocf_log info "Filesystem $MOUNTPOINT is already mounted."
|
||||
+ return $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ $OCF_ERR_CONFIGURED)
|
||||
+ return $OCF_ERR_CONFIGURED
|
||||
+ ;;
|
||||
+ esac
|
||||
|
||||
fstype_supported || exit $OCF_ERR_INSTALLED
|
||||
|
||||
@@ -801,10 +807,42 @@ Filesystem_stop()
|
||||
#
|
||||
Filesystem_status()
|
||||
{
|
||||
- match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}"
|
||||
- if list_mounts | grep "$match_string" >/dev/null 2>&1; then
|
||||
- rc=$OCF_SUCCESS
|
||||
- msg="$MOUNTPOINT is mounted (running)"
|
||||
+ local match_string="${TAB}${CANONICALIZED_MOUNTPOINT}${TAB}"
|
||||
+ local mounted_device=$(list_mounts | grep "$match_string" | awk '{print $1}')
|
||||
+
|
||||
+ if [ -n "$mounted_device" ]; then
|
||||
+ if [ "X$blockdevice" = "Xyes" ]; then
|
||||
+ if [ -e "$DEVICE" ] ; then
|
||||
+ local canonicalized_device="$(readlink -f "$DEVICE")"
|
||||
+ if [ $? -ne 0 ]; then
|
||||
+ ocf_exit_reason "Could not canonicalize $DEVICE because readlink failed"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+ else
|
||||
+ local canonicalized_device="$DEVICE"
|
||||
+ fi
|
||||
+ if [ -e "$mounted_device" ] ; then
|
||||
+ local canonicalized_mounted_device="$(readlink -f "$mounted_device")"
|
||||
+ if [ $? -ne 0 ]; then
|
||||
+ ocf_exit_reason "Could not canonicalize $mounted_device because readlink failed"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+ else
|
||||
+ local canonicalized_mounted_device="$mounted_device"
|
||||
+ fi
|
||||
+ if [ "$canonicalized_device" != "$canonicalized_mounted_device" ]; then
|
||||
+ if ocf_is_probe || [ "$__OCF_ACTION" = "stop" ]; then
|
||||
+ ocf_log debug "Another device ($mounted_device) is already mounted on $MOUNTPOINT"
|
||||
+ rc=$OCF_NOT_RUNNING
|
||||
+ else
|
||||
+ ocf_exit_reason "Another device ($mounted_device) is already mounted on $MOUNTPOINT"
|
||||
+ rc=$OCF_ERR_CONFIGURED
|
||||
+ fi
|
||||
+ fi
|
||||
+ else
|
||||
+ rc=$OCF_SUCCESS
|
||||
+ msg="$MOUNTPOINT is mounted (running)"
|
||||
+ fi
|
||||
else
|
||||
rc=$OCF_NOT_RUNNING
|
||||
msg="$MOUNTPOINT is unmounted (stopped)"
|
||||
@@ -1041,9 +1079,18 @@ else
|
||||
else
|
||||
CANONICALIZED_MOUNTPOINT="$MOUNTPOINT"
|
||||
fi
|
||||
- # At this stage, $MOUNTPOINT does not contain trailing "/" unless it is "/"
|
||||
- # TODO: / mounted via Filesystem sounds dangerous. On stop, we'll
|
||||
- # kill the whole system. Is that a good idea?
|
||||
+
|
||||
+ if echo "$CANONICALIZED_MOUNTPOINT" | grep -q "^\s*/\s*$"; then
|
||||
+ if ocf_is_probe; then
|
||||
+ ocf_log debug "/ cannot be managed in a cluster"
|
||||
+ exit $OCF_NOT_RUNNING
|
||||
+ elif [ "$__OCF_ACTION" = "start" ] || [ "$__OCF_ACTION" = "monitor" ] || [ "$__OCF_ACTION" = "status" ]; then
|
||||
+ ocf_exit_reason "/ cannot be managed in a cluster"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ elif [ "$__OCF_ACTION" = "stop" ]; then
|
||||
+ exit $OCF_SUCCESS
|
||||
+ fi
|
||||
+ fi
|
||||
fi
|
||||
|
||||
# Check to make sure the utilites are found
|
||||
@@ -1124,5 +1171,3 @@ case $OP in
|
||||
;;
|
||||
esac
|
||||
exit $?
|
||||
-
|
||||
-
|
@ -1,333 +0,0 @@
|
||||
From 7739c2a802c1dddb6757ff75cf7f6582a89bd518 Mon Sep 17 00:00:00 2001
|
||||
From: id <happytobi@tscoding.de>
|
||||
Date: Fri, 31 May 2024 09:00:18 +0200
|
||||
Subject: [PATCH] azure-events-az: update to API versions, add retry
|
||||
functionality for metadata requests, update tests
|
||||
|
||||
---
|
||||
heartbeat/azure-events-az.in | 117 ++++++++++++++++++++++++-----------
|
||||
heartbeat/ocf.py | 50 +++++++++++++--
|
||||
2 files changed, 126 insertions(+), 41 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||
index 46d4d1f3d9..6d31e5abae 100644
|
||||
--- a/heartbeat/azure-events-az.in
|
||||
+++ b/heartbeat/azure-events-az.in
|
||||
@@ -27,7 +27,7 @@ import ocf
|
||||
##############################################################################
|
||||
|
||||
|
||||
-VERSION = "0.10"
|
||||
+VERSION = "0.20"
|
||||
USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro())
|
||||
|
||||
attr_globalPullState = "azure-events-az_globalPullState"
|
||||
@@ -39,9 +39,6 @@ attr_healthstate = "#health-azure"
|
||||
default_loglevel = ocf.logging.INFO
|
||||
default_relevantEventTypes = set(["Reboot", "Redeploy"])
|
||||
|
||||
-global_pullMaxAttempts = 3
|
||||
-global_pullDelaySecs = 1
|
||||
-
|
||||
##############################################################################
|
||||
|
||||
class attrDict(defaultdict):
|
||||
@@ -71,16 +68,22 @@ class azHelper:
|
||||
metadata_host = "http://169.254.169.254/metadata"
|
||||
instance_api = "instance"
|
||||
events_api = "scheduledevents"
|
||||
- api_version = "2019-08-01"
|
||||
+ events_api_version = "2020-07-01"
|
||||
+ instance_api_version = "2021-12-13"
|
||||
|
||||
@staticmethod
|
||||
- def _sendMetadataRequest(endpoint, postData=None):
|
||||
+ def _sendMetadataRequest(endpoint, postData=None, api_version="2019-08-01"):
|
||||
"""
|
||||
Send a request to Azure's Azure Metadata Service API
|
||||
"""
|
||||
- url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version)
|
||||
+
|
||||
+ retryCount = int(ocf.get_parameter("retry_count",3))
|
||||
+ retryWaitTime = int(ocf.get_parameter("retry_wait",20))
|
||||
+ requestTimeout = int(ocf.get_parameter("request_timeout",15))
|
||||
+
|
||||
+ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, api_version)
|
||||
data = ""
|
||||
- ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData))
|
||||
+ ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s, retry_count = %s, retry_wait time = %s, request_timeout = %s" % (endpoint, postData, retryCount, retryWaitTime, requestTimeout))
|
||||
ocf.logger.debug("_sendMetadataRequest: url = %s" % url)
|
||||
|
||||
if postData and type(postData) != bytes:
|
||||
@@ -89,18 +92,37 @@ class azHelper:
|
||||
req = urllib2.Request(url, postData)
|
||||
req.add_header("Metadata", "true")
|
||||
req.add_header("User-Agent", USER_AGENT)
|
||||
- try:
|
||||
- resp = urllib2.urlopen(req)
|
||||
- except URLError as e:
|
||||
- if hasattr(e, 'reason'):
|
||||
- ocf.logger.warning("Failed to reach the server: %s" % e.reason)
|
||||
- clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
||||
- elif hasattr(e, 'code'):
|
||||
- ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code)
|
||||
- clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
||||
- else:
|
||||
- data = resp.read()
|
||||
- ocf.logger.debug("_sendMetadataRequest: response = %s" % data)
|
||||
+
|
||||
+ if retryCount > 0:
|
||||
+ ocf.logger.debug("_sendMetadataRequest: retry enabled")
|
||||
+
|
||||
+ successful = None
|
||||
+ for retry in range(retryCount+1):
|
||||
+ try:
|
||||
+ resp = urllib2.urlopen(req, timeout=requestTimeout)
|
||||
+ except Exception as e:
|
||||
+ excType = e.__class__.__name__
|
||||
+ if excType == TimeoutError.__name__:
|
||||
+ ocf.logger.warning("Request timed out after %s seconds Error: %s" % (requestTimeout, e))
|
||||
+ if excType == URLError.__name__:
|
||||
+ if hasattr(e, 'reason'):
|
||||
+ ocf.logger.warning("Failed to reach the server: %s" % e.reason)
|
||||
+ elif hasattr(e, 'code'):
|
||||
+ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code)
|
||||
+
|
||||
+ if retryCount > 1 and retry != retryCount:
|
||||
+ ocf.logger.warning("Request failed, retry (%s/%s) wait %s seconds before retry (wait time)" % (retry + 1,retryCount,retryWaitTime))
|
||||
+ time.sleep(retryWaitTime)
|
||||
+
|
||||
+ else:
|
||||
+ data = resp.read()
|
||||
+ ocf.logger.debug("_sendMetadataRequest: response = %s" % data)
|
||||
+ successful = 1
|
||||
+ break
|
||||
+
|
||||
+ # When no request was successful also with retry enabled, set the cluster to idle
|
||||
+ if successful is None:
|
||||
+ clusterHelper.setAttr(attr_globalPullState, "IDLE")
|
||||
|
||||
if data:
|
||||
data = json.loads(data)
|
||||
@@ -115,14 +137,15 @@ class azHelper:
|
||||
"""
|
||||
ocf.logger.debug("getInstanceInfo: begin")
|
||||
|
||||
- jsondata = azHelper._sendMetadataRequest(azHelper.instance_api)
|
||||
+ jsondata = azHelper._sendMetadataRequest(azHelper.instance_api, None, azHelper.instance_api_version)
|
||||
ocf.logger.debug("getInstanceInfo: json = %s" % jsondata)
|
||||
|
||||
if jsondata:
|
||||
ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"]))
|
||||
return attrDict(jsondata["compute"])
|
||||
else:
|
||||
- ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info")
|
||||
+ apiCall = "%s/%s?api-version=%s" % (azHelper.metadata_host, azHelper.instance_api, azHelper.instance_api_version)
|
||||
+ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info - call: %s" % apiCall)
|
||||
sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
|
||||
@staticmethod
|
||||
@@ -132,11 +155,17 @@ class azHelper:
|
||||
"""
|
||||
ocf.logger.debug("pullScheduledEvents: begin")
|
||||
|
||||
- jsondata = azHelper._sendMetadataRequest(azHelper.events_api)
|
||||
+ jsondata = azHelper._sendMetadataRequest(azHelper.events_api, None, azHelper.events_api_version)
|
||||
ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata)
|
||||
|
||||
- ocf.logger.debug("pullScheduledEvents: finished")
|
||||
- return attrDict(jsondata)
|
||||
+ if jsondata:
|
||||
+ ocf.logger.debug("pullScheduledEvents: finished")
|
||||
+ return attrDict(jsondata)
|
||||
+ else:
|
||||
+ apiCall = "%s/%s?api-version=%s" % (azHelper.metadata_host, azHelper.events_api, azHelper.events_api_version)
|
||||
+ ocf.ocf_exit_reason("pullScheduledEvents: Unable to get scheduledevents info - call: %s" % apiCall)
|
||||
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
+
|
||||
|
||||
@staticmethod
|
||||
def forceEvents(eventIDs):
|
||||
@@ -534,7 +563,7 @@ class Node:
|
||||
except ValueError:
|
||||
# Handle the exception
|
||||
ocf.logger.warn("Health attribute %s on node %s cannot be converted to an integer value" % (healthAttributeStr, node))
|
||||
-
|
||||
+
|
||||
ocf.logger.debug("isNodeInStandby: finished - result %s" % isInStandy)
|
||||
return isInStandy
|
||||
|
||||
@@ -584,7 +613,7 @@ class raAzEvents:
|
||||
|
||||
def monitor(self):
|
||||
ocf.logger.debug("monitor: begin")
|
||||
-
|
||||
+
|
||||
events = azHelper.pullScheduledEvents()
|
||||
|
||||
# get current document version
|
||||
@@ -600,21 +629,21 @@ class raAzEvents:
|
||||
ocf.logger.info("monitor: already handled curDocVersion, skip")
|
||||
return ocf.OCF_SUCCESS
|
||||
|
||||
- localAzEventIDs = set()
|
||||
+ localAzEventIds = dict()
|
||||
for e in localEvents:
|
||||
- localAzEventIDs.add(e.EventId)
|
||||
+ localAzEventIds[e.EventId] = json.dumps(e)
|
||||
|
||||
curState = self.node.getState()
|
||||
clusterEventIDs = self.node.getEventIDs()
|
||||
|
||||
ocf.logger.debug("monitor: curDocVersion has not been handled yet")
|
||||
-
|
||||
+
|
||||
if clusterEventIDs:
|
||||
# there are pending events set, so our state must be STOPPING or IN_EVENT
|
||||
i = 0; touchedEventIDs = False
|
||||
while i < len(clusterEventIDs):
|
||||
# clean up pending events that are already finished according to AZ
|
||||
- if clusterEventIDs[i] not in localAzEventIDs:
|
||||
+ if clusterEventIDs[i] not in localAzEventIds.keys():
|
||||
ocf.logger.info("monitor: remove finished local clusterEvent %s" % (clusterEventIDs[i]))
|
||||
clusterEventIDs.pop(i)
|
||||
touchedEventIDs = True
|
||||
@@ -644,12 +673,12 @@ class raAzEvents:
|
||||
ocf.logger.info("monitor: all local events finished, but some resources have not completed startup yet -> wait")
|
||||
else:
|
||||
if curState == AVAILABLE:
|
||||
- if len(localAzEventIDs) > 0:
|
||||
+ if len(localAzEventIds) > 0:
|
||||
if clusterHelper.otherNodesAvailable(self.node):
|
||||
- ocf.logger.info("monitor: can handle local events %s -> set state STOPPING" % (str(localAzEventIDs)))
|
||||
- curState = self.node.updateNodeStateAndEvents(STOPPING, localAzEventIDs)
|
||||
+ ocf.logger.info("monitor: can handle local events %s -> set state STOPPING - %s" % (str(list(localAzEventIds.keys())), str(list(localAzEventIds.values()))))
|
||||
+ curState = self.node.updateNodeStateAndEvents(STOPPING, localAzEventIds.keys())
|
||||
else:
|
||||
- ocf.logger.info("monitor: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(localAzEventIDs))
|
||||
+ ocf.logger.info("monitor: cannot handle azEvents %s (only node available) -> set state ON_HOLD - %s" % (str(list(localAzEventIds.keys())), str(list(localAzEventIds.values()))))
|
||||
self.node.setState(ON_HOLD)
|
||||
else:
|
||||
ocf.logger.debug("monitor: no local azEvents to handle")
|
||||
@@ -761,6 +790,24 @@ def main():
|
||||
longdesc="Set to true to enable verbose logging",
|
||||
content_type="boolean",
|
||||
default="false")
|
||||
+ agent.add_parameter(
|
||||
+ "retry_count",
|
||||
+ shortdesc="Azure IMDS webservice retry count",
|
||||
+ longdesc="Set to any number bigger than zero to enable retry count",
|
||||
+ content_type="integer",
|
||||
+ default="3")
|
||||
+ agent.add_parameter(
|
||||
+ "retry_wait",
|
||||
+ shortdesc="Configure a retry wait time",
|
||||
+ longdesc="Set retry wait time in seconds",
|
||||
+ content_type="integer",
|
||||
+ default="20")
|
||||
+ agent.add_parameter(
|
||||
+ "request_timeout",
|
||||
+ shortdesc="Configure a request timeout",
|
||||
+ longdesc="Set request timeout in seconds",
|
||||
+ content_type="integer",
|
||||
+ default="15")
|
||||
agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
||||
agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS)
|
||||
agent.add_action("validate-all", timeout=20, handler=validate_action)
|
||||
diff --git a/heartbeat/ocf.py b/heartbeat/ocf.py
|
||||
index dda2fed4bb..571cd19664 100644
|
||||
--- a/heartbeat/ocf.py
|
||||
+++ b/heartbeat/ocf.py
|
||||
@@ -16,7 +16,7 @@
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-#
|
||||
+#
|
||||
|
||||
import sys, os, logging, syslog
|
||||
|
||||
@@ -42,19 +42,19 @@
|
||||
# OCF does not include the concept of master/slave resources so we
|
||||
# need to extend it so we can discover a resource's complete state.
|
||||
#
|
||||
-# OCF_RUNNING_MASTER:
|
||||
+# OCF_RUNNING_MASTER:
|
||||
# The resource is in "master" mode and fully operational
|
||||
# OCF_FAILED_MASTER:
|
||||
# The resource is in "master" mode but in a failed state
|
||||
-#
|
||||
+#
|
||||
# The extra two values should only be used during a probe.
|
||||
#
|
||||
# Probes are used to discover resources that were started outside of
|
||||
# the CRM and/or left behind if the LRM fails.
|
||||
-#
|
||||
+#
|
||||
# They can be identified in RA scripts by checking for:
|
||||
# [ "${__OCF_ACTION}" = "monitor" -a "${OCF_RESKEY_CRM_meta_interval}" = "0" ]
|
||||
-#
|
||||
+#
|
||||
# Failed "slaves" should continue to use: OCF_ERR_GENERIC
|
||||
# Fully operational "slaves" should continue to use: OCF_SUCCESS
|
||||
#
|
||||
@@ -451,15 +451,17 @@ def value_for_parameter(param):
|
||||
sys.exit(OCF_ERR_UNIMPLEMENTED)
|
||||
|
||||
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
+ import logging
|
||||
|
||||
class TestMetadata(unittest.TestCase):
|
||||
def test_noparams_noactions(self):
|
||||
m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc")
|
||||
self.assertEqual("""<?xml version="1.0"?>
|
||||
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
-<resource-agent name="foo">
|
||||
+<resource-agent name="foo" version="1.0">
|
||||
<version>1.0</version>
|
||||
<longdesc lang="en">
|
||||
longdesc
|
||||
@@ -483,4 +485,40 @@ def test_params_actions(self):
|
||||
m.add_action("start")
|
||||
self.assertEqual(str(m.actions[0]), '<action name="start" />\n')
|
||||
|
||||
+ def test_retry_params_actions(self):
|
||||
+ log= logging.getLogger( "test_retry_params_actions" )
|
||||
+
|
||||
+ m = Agent("foo", shortdesc="shortdesc", longdesc="longdesc")
|
||||
+ m.add_parameter(
|
||||
+ "retry_count",
|
||||
+ shortdesc="Azure ims webservice retry count",
|
||||
+ longdesc="Set to any number bigger than zero to enable retry count",
|
||||
+ content_type="integer",
|
||||
+ default="0")
|
||||
+ m.add_parameter(
|
||||
+ "retry_wait",
|
||||
+ shortdesc="Configure a retry wait time",
|
||||
+ longdesc="Set retry wait time in seconds",
|
||||
+ content_type="integer",
|
||||
+ default="20")
|
||||
+ m.add_parameter(
|
||||
+ "request_timeout",
|
||||
+ shortdesc="Configure a request timeout",
|
||||
+ longdesc="Set request timeout in seconds",
|
||||
+ content_type="integer",
|
||||
+ default="15")
|
||||
+
|
||||
+ m.add_action("start")
|
||||
+
|
||||
+ log.debug( "actions= %s", str(m.actions[0] ))
|
||||
+ self.assertEqual(str(m.actions[0]), '<action name="start" />\n')
|
||||
+
|
||||
+ log.debug( "parameters= %s", str(m.parameters[0] ))
|
||||
+ log.debug( "parameters= %s", str(m.parameters[1] ))
|
||||
+ log.debug( "parameters= %s", str(m.parameters[2] ))
|
||||
+ self.assertEqual(str(m.parameters[0]), '<parameter name="retry_count">\n<longdesc lang="en">Set to any number bigger than zero to enable retry count</longdesc>\n<shortdesc lang="en">Azure ims webservice retry count</shortdesc>\n<content type="integer" default="0" />\n</parameter>\n')
|
||||
+ self.assertEqual(str(m.parameters[1]), '<parameter name="retry_wait">\n<longdesc lang="en">Set retry wait time in seconds</longdesc>\n<shortdesc lang="en">Configure a retry wait time</shortdesc>\n<content type="integer" default="20" />\n</parameter>\n')
|
||||
+ self.assertEqual(str(m.parameters[2]), '<parameter name="request_timeout">\n<longdesc lang="en">Set request timeout in seconds</longdesc>\n<shortdesc lang="en">Configure a request timeout</shortdesc>\n<content type="integer" default="15" />\n</parameter>\n')
|
||||
+
|
||||
+ logging.basicConfig( stream=sys.stderr )
|
||||
unittest.main()
|
File diff suppressed because it is too large
Load Diff
@ -1,61 +0,0 @@
|
||||
From 481672f73d05666ab20a883cf8fc746cb1f3050f Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 20 Jun 2024 09:29:21 +0200
|
||||
Subject: [PATCH] galera/mariadb/mysql/redis: remove Unpromoted monitor-action,
|
||||
as it's covered by the regular monitor-action
|
||||
|
||||
---
|
||||
heartbeat/galera.in | 1 -
|
||||
heartbeat/mariadb.in | 1 -
|
||||
heartbeat/mysql | 1 -
|
||||
heartbeat/redis.in | 1 -
|
||||
4 files changed, 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/galera.in b/heartbeat/galera.in
|
||||
index b518595cb0..b29d68bf73 100755
|
||||
--- a/heartbeat/galera.in
|
||||
+++ b/heartbeat/galera.in
|
||||
@@ -299,7 +299,6 @@ Use it with caution! (and fencing)
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="300s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="validate-all" timeout="5s" />
|
||||
diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in
|
||||
index e0f1f3c9f1..1dca98ba68 100644
|
||||
--- a/heartbeat/mariadb.in
|
||||
+++ b/heartbeat/mariadb.in
|
||||
@@ -255,7 +255,6 @@ The port on which the Promoted MariaDB instance is listening.
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/mysql b/heartbeat/mysql
|
||||
index 1df2fc0f28..6b00889ff4 100755
|
||||
--- a/heartbeat/mysql
|
||||
+++ b/heartbeat/mysql
|
||||
@@ -322,7 +322,6 @@ whether a node is usable for clients to read from.</shortdesc>
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
|
||||
index 6429477e11..1e541f13d5 100755
|
||||
--- a/heartbeat/redis.in
|
||||
+++ b/heartbeat/redis.in
|
||||
@@ -221,7 +221,6 @@ is in use.
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="60s" interval="45s" />
|
||||
<action name="monitor" role="Promoted" depth="0" timeout="60s" interval="20s" />
|
||||
-<action name="monitor" role="Unpromoted" depth="0" timeout="60s" interval="60s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
@ -0,0 +1,48 @@
|
||||
From accff72ecc2f6cf5a76d9570198a93ac7c90270e Mon Sep 17 00:00:00 2001
|
||||
From: Quentin Pradet <quentin.pradet@gmail.com>
|
||||
Date: Mon, 17 Jun 2024 11:09:06 +0400
|
||||
Subject: [PATCH] Merge pull request from GHSA-34jh-p97f-mpxf
|
||||
|
||||
* Strip Proxy-Authorization header on redirects
|
||||
|
||||
* Fix test_retry_default_remove_headers_on_redirect
|
||||
|
||||
* Set release date
|
||||
---
|
||||
CHANGES.rst | 5 +++++
|
||||
src/urllib3/util/retry.py | 4 +++-
|
||||
test/test_retry.py | 6 ++++-
|
||||
test/with_dummyserver/test_poolmanager.py | 27 ++++++++++++++++++++---
|
||||
4 files changed, 37 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
index 7a76a4a6ad..0456cceba4 100644
|
||||
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
@@ -189,7 +189,9 @@ class Retry:
|
||||
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||
|
||||
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||
+ )
|
||||
|
||||
#: Default maximum backoff time.
|
||||
DEFAULT_BACKOFF_MAX = 120
|
||||
|
||||
diff --git a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
index 7a76a4a6ad..0456cceba4 100644
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
@@ -189,7 +189,9 @@ class Retry:
|
||||
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||
|
||||
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||
+ )
|
||||
|
||||
#: Default maximum backoff time.
|
||||
DEFAULT_BACKOFF_MAX = 120
|
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
@ -0,0 +1,201 @@
|
||||
--- a/setuptools/package_index.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/setuptools/package_index.py 2024-07-25 10:11:40.537307665 +0200
|
||||
@@ -1,5 +1,6 @@
|
||||
"""PyPI and direct package downloading"""
|
||||
import sys
|
||||
+import subprocess
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
@@ -563,7 +564,7 @@
|
||||
scheme = URL_SCHEME(spec)
|
||||
if scheme:
|
||||
# It's a url, download it to tmpdir
|
||||
- found = self._download_url(scheme.group(1), spec, tmpdir)
|
||||
+ found = self._download_url(spec, tmpdir)
|
||||
base, fragment = egg_info_for_url(spec)
|
||||
if base.endswith('.py'):
|
||||
found = self.gen_setup(found, fragment, tmpdir)
|
||||
@@ -775,7 +776,7 @@
|
||||
raise DistutilsError("Download error for %s: %s"
|
||||
% (url, v))
|
||||
|
||||
- def _download_url(self, scheme, url, tmpdir):
|
||||
+ def _download_url(self, url, tmpdir):
|
||||
# Determine download filename
|
||||
#
|
||||
name, fragment = egg_info_for_url(url)
|
||||
@@ -790,19 +791,59 @@
|
||||
|
||||
filename = os.path.join(tmpdir, name)
|
||||
|
||||
- # Download the file
|
||||
- #
|
||||
- if scheme == 'svn' or scheme.startswith('svn+'):
|
||||
- return self._download_svn(url, filename)
|
||||
- elif scheme == 'git' or scheme.startswith('git+'):
|
||||
- return self._download_git(url, filename)
|
||||
- elif scheme.startswith('hg+'):
|
||||
- return self._download_hg(url, filename)
|
||||
- elif scheme == 'file':
|
||||
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
|
||||
- else:
|
||||
- self.url_ok(url, True) # raises error if not allowed
|
||||
- return self._attempt_download(url, filename)
|
||||
+ return self._download_vcs(url, filename) or self._download_other(url, filename)
|
||||
+
|
||||
+ @staticmethod
|
||||
+ def _resolve_vcs(url):
|
||||
+ """
|
||||
+ >>> rvcs = PackageIndex._resolve_vcs
|
||||
+ >>> rvcs('git+http://foo/bar')
|
||||
+ 'git'
|
||||
+ >>> rvcs('hg+https://foo/bar')
|
||||
+ 'hg'
|
||||
+ >>> rvcs('git:myhost')
|
||||
+ 'git'
|
||||
+ >>> rvcs('hg:myhost')
|
||||
+ >>> rvcs('http://foo/bar')
|
||||
+ """
|
||||
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||
+ pre, sep, post = scheme.partition('+')
|
||||
+ # svn and git have their own protocol; hg does not
|
||||
+ allowed = set(['svn', 'git'] + ['hg'] * bool(sep))
|
||||
+ return next(iter({pre} & allowed), None)
|
||||
+
|
||||
+ def _download_vcs(self, url, spec_filename):
|
||||
+ vcs = self._resolve_vcs(url)
|
||||
+ if not vcs:
|
||||
+ return
|
||||
+ if vcs == 'svn':
|
||||
+ raise DistutilsError(
|
||||
+ f"Invalid config, SVN download is not supported: {url}"
|
||||
+ )
|
||||
+
|
||||
+ filename, _, _ = spec_filename.partition('#')
|
||||
+ url, rev = self._vcs_split_rev_from_url(url)
|
||||
+
|
||||
+ self.info(f"Doing {vcs} clone from {url} to {filename}")
|
||||
+ subprocess.check_call([vcs, 'clone', '--quiet', url, filename])
|
||||
+
|
||||
+ co_commands = dict(
|
||||
+ git=[vcs, '-C', filename, 'checkout', '--quiet', rev],
|
||||
+ hg=[vcs, '--cwd', filename, 'up', '-C', '-r', rev, '-q'],
|
||||
+ )
|
||||
+ if rev is not None:
|
||||
+ self.info(f"Checking out {rev}")
|
||||
+ subprocess.check_call(co_commands[vcs])
|
||||
+
|
||||
+ return filename
|
||||
+
|
||||
+ def _download_other(self, url, filename):
|
||||
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||
+ if scheme == 'file': # pragma: no cover
|
||||
+ return urllib.request.url2pathname(urllib.parse.urlparse(url).path)
|
||||
+ # raise error if not allowed
|
||||
+ self.url_ok(url, True)
|
||||
+ return self._attempt_download(url, filename)
|
||||
|
||||
def scan_url(self, url):
|
||||
self.process_url(url, True)
|
||||
@@ -829,76 +870,37 @@
|
||||
os.unlink(filename)
|
||||
raise DistutilsError("Unexpected HTML page found at " + url)
|
||||
|
||||
- def _download_svn(self, url, filename):
|
||||
- url = url.split('#', 1)[0] # remove any fragment for svn's sake
|
||||
- creds = ''
|
||||
- if url.lower().startswith('svn:') and '@' in url:
|
||||
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
|
||||
- if not netloc and path.startswith('//') and '/' in path[2:]:
|
||||
- netloc, path = path[2:].split('/', 1)
|
||||
- auth, host = splituser(netloc)
|
||||
- if auth:
|
||||
- if ':' in auth:
|
||||
- user, pw = auth.split(':', 1)
|
||||
- creds = " --username=%s --password=%s" % (user, pw)
|
||||
- else:
|
||||
- creds = " --username=" + auth
|
||||
- netloc = host
|
||||
- parts = scheme, netloc, url, p, q, f
|
||||
- url = urllib.parse.urlunparse(parts)
|
||||
- self.info("Doing subversion checkout from %s to %s", url, filename)
|
||||
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
|
||||
- return filename
|
||||
-
|
||||
@staticmethod
|
||||
- def _vcs_split_rev_from_url(url, pop_prefix=False):
|
||||
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
|
||||
-
|
||||
- scheme = scheme.split('+', 1)[-1]
|
||||
-
|
||||
- # Some fragment identification fails
|
||||
- path = path.split('#', 1)[0]
|
||||
-
|
||||
- rev = None
|
||||
- if '@' in path:
|
||||
- path, rev = path.rsplit('@', 1)
|
||||
-
|
||||
- # Also, discard fragment
|
||||
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
|
||||
-
|
||||
- return url, rev
|
||||
-
|
||||
- def _download_git(self, url, filename):
|
||||
- filename = filename.split('#', 1)[0]
|
||||
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||
-
|
||||
- self.info("Doing git clone from %s to %s", url, filename)
|
||||
- os.system("git clone --quiet %s %s" % (url, filename))
|
||||
+ def _vcs_split_rev_from_url(url):
|
||||
+ """
|
||||
+ Given a possible VCS URL, return a clean URL and resolved revision if any.
|
||||
|
||||
- if rev is not None:
|
||||
- self.info("Checking out %s", rev)
|
||||
- os.system("(cd %s && git checkout --quiet %s)" % (
|
||||
- filename,
|
||||
- rev,
|
||||
- ))
|
||||
+ >>> vsrfu = PackageIndex._vcs_split_rev_from_url
|
||||
+ >>> vsrfu('git+https://github.com/pypa/setuptools@v69.0.0#egg-info=setuptools')
|
||||
+ ('https://github.com/pypa/setuptools', 'v69.0.0')
|
||||
+ >>> vsrfu('git+https://github.com/pypa/setuptools#egg-info=setuptools')
|
||||
+ ('https://github.com/pypa/setuptools', None)
|
||||
+ >>> vsrfu('http://foo/bar')
|
||||
+ ('http://foo/bar', None)
|
||||
+ """
|
||||
+ parts = urllib.parse.urlsplit(url)
|
||||
|
||||
- return filename
|
||||
+ clean_scheme = parts.scheme.split('+', 1)[-1]
|
||||
|
||||
- def _download_hg(self, url, filename):
|
||||
- filename = filename.split('#', 1)[0]
|
||||
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||
+ # Some fragment identification fails
|
||||
+ no_fragment_path, _, _ = parts.path.partition('#')
|
||||
|
||||
- self.info("Doing hg clone from %s to %s", url, filename)
|
||||
- os.system("hg clone --quiet %s %s" % (url, filename))
|
||||
+ pre, sep, post = no_fragment_path.rpartition('@')
|
||||
+ clean_path, rev = (pre, post) if sep else (post, None)
|
||||
|
||||
- if rev is not None:
|
||||
- self.info("Updating to %s", rev)
|
||||
- os.system("(cd %s && hg up -C -r %s >&-)" % (
|
||||
- filename,
|
||||
- rev,
|
||||
- ))
|
||||
+ resolved = parts._replace(
|
||||
+ scheme=clean_scheme,
|
||||
+ path=clean_path,
|
||||
+ # discard the fragment
|
||||
+ fragment='',
|
||||
+ ).geturl()
|
||||
|
||||
- return filename
|
||||
+ return resolved, rev
|
||||
|
||||
def debug(self, msg, *args):
|
||||
log.debug(msg, *args)
|
@ -1,43 +0,0 @@
|
||||
From 2ab2c832180dacb2e66d38541beae0957416eb96 Mon Sep 17 00:00:00 2001
|
||||
From: Antonio Romito <aromito@redhat.com>
|
||||
Date: Mon, 9 Sep 2024 17:30:38 +0200
|
||||
Subject: [PATCH] Improve handling of "stopping" container removal in
|
||||
remove_container()
|
||||
|
||||
- Added handling for containers in a stopping state by checking the state and force-removing if necessary.
|
||||
- Improved log messages to provide clearer information when force removal is needed.
|
||||
|
||||
Related: https://issues.redhat.com/browse/RHEL-58008
|
||||
---
|
||||
heartbeat/podman | 11 +++++++++--
|
||||
1 file changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/podman b/heartbeat/podman
|
||||
index 53867bff20..643ec4d894 100755
|
||||
--- a/heartbeat/podman
|
||||
+++ b/heartbeat/podman
|
||||
@@ -254,6 +254,13 @@ remove_container()
|
||||
ocf_run podman rm -v $CONTAINER
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
+ if [ $rc -eq 2 ]; then
|
||||
+ if podman inspect --format '{{.State.Status}}' $CONTAINER | grep -wq "stopping"; then
|
||||
+ ocf_log err "Inactive container ${CONTAINER} is stuck in 'stopping' state. Force-remove it."
|
||||
+ ocf_run podman rm -f $CONTAINER
|
||||
+ rc=$?
|
||||
+ fi
|
||||
+ fi
|
||||
# due to a podman bug (rhbz#1841485), sometimes a stopped
|
||||
# container can still be associated with Exec sessions, in
|
||||
# which case the "podman rm" has to be forced
|
||||
@@ -517,8 +524,8 @@ podman_stop()
|
||||
# but the associated container exit code is -1. If that's the case,
|
||||
# assume there's no failure and continue with the rm as usual.
|
||||
if [ $rc -eq 125 ] && \
|
||||
- podman inspect --format '{{.State.Status}}:{{.State.ExitCode}}' $CONTAINER | grep -wq "stopped:-1"; then
|
||||
- ocf_log warn "Container ${CONTAINER} had an unexpected stop outcome. Trying to remove it anyway."
|
||||
+ podman inspect --format '{{.State.Status}}:{{.State.ExitCode}}' $CONTAINER | grep -Eq '^(exited|stopped):-1$'; then
|
||||
+ ocf_log err "Container ${CONTAINER} had an unexpected stop outcome. Trying to remove it anyway."
|
||||
else
|
||||
ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
|
||||
return $OCF_ERR_GENERIC
|
@ -1,106 +0,0 @@
|
||||
From d66a52cfb25f5436255ecc65a407c0166a720146 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 3 Sep 2024 12:55:28 +0200
|
||||
Subject: [PATCH 1/2] Filesystem: dont sleep during stop-action when there are
|
||||
no processes to kill
|
||||
|
||||
Thanks @SatomiOSAWA for the initial code.
|
||||
---
|
||||
heartbeat/Filesystem | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index 3eb520e0c..f54969f20 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -685,12 +685,13 @@ signal_processes() {
|
||||
pids=$(get_pids "$dir")
|
||||
if [ -z "$pids" ]; then
|
||||
ocf_log info "No processes on $dir were signalled. force_unmount is set to '$FORCE_UNMOUNT'"
|
||||
- return
|
||||
+ return 1
|
||||
fi
|
||||
for pid in $pids; do
|
||||
ocf_log info "sending signal $sig to: $(ps -f $pid | tail -1)"
|
||||
kill -s $sig $pid
|
||||
done
|
||||
+ return 0
|
||||
}
|
||||
try_umount() {
|
||||
local SUB="$1"
|
||||
@@ -717,12 +718,13 @@ timeout_child() {
|
||||
return $ret
|
||||
}
|
||||
fs_stop_loop() {
|
||||
- local SUB="$1" signals="$2" sig
|
||||
+ local SUB="$1" signals="$2" sig send_signal
|
||||
while true; do
|
||||
+ send_signal=false
|
||||
for sig in $signals; do
|
||||
- signal_processes "$SUB" $sig
|
||||
+ signal_processes "$SUB" $sig && send_signal=true
|
||||
done
|
||||
- sleep $OCF_RESKEY_signal_delay
|
||||
+ $send_signal && sleep $OCF_RESKEY_signal_delay
|
||||
try_umount "$SUB" && return $OCF_SUCCESS
|
||||
done
|
||||
}
|
||||
|
||||
From cb6aaffc260eea0f0fee6fab44393c6cf12b8a83 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 9 Sep 2024 10:58:12 +0200
|
||||
Subject: [PATCH 2/2] Filesystem: only use $umount_force after sending
|
||||
kill_signals
|
||||
|
||||
---
|
||||
heartbeat/Filesystem | 12 ++++++------
|
||||
1 file changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index f54969f20..4dd962fd9 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -694,8 +694,8 @@ signal_processes() {
|
||||
return 0
|
||||
}
|
||||
try_umount() {
|
||||
- local SUB="$1"
|
||||
- $UMOUNT $umount_force "$SUB"
|
||||
+ local force_arg="$1" SUB="$2"
|
||||
+ $UMOUNT $force_arg "$SUB"
|
||||
list_mounts | grep "${TAB}${SUB}${TAB}" >/dev/null 2>&1 || {
|
||||
ocf_log info "unmounted $SUB successfully"
|
||||
return $OCF_SUCCESS
|
||||
@@ -718,14 +718,14 @@ timeout_child() {
|
||||
return $ret
|
||||
}
|
||||
fs_stop_loop() {
|
||||
- local SUB="$1" signals="$2" sig send_signal
|
||||
+ local force_arg="$1" SUB="$2" signals="$3" sig send_signal
|
||||
while true; do
|
||||
send_signal=false
|
||||
for sig in $signals; do
|
||||
signal_processes "$SUB" $sig && send_signal=true
|
||||
done
|
||||
$send_signal && sleep $OCF_RESKEY_signal_delay
|
||||
- try_umount "$SUB" && return $OCF_SUCCESS
|
||||
+ try_umount "$force_arg" "$SUB" && return $OCF_SUCCESS
|
||||
done
|
||||
}
|
||||
fs_stop() {
|
||||
@@ -733,13 +733,13 @@ fs_stop() {
|
||||
grace_time=$((timeout/2))
|
||||
|
||||
# try gracefully terminating processes for up to half of the configured timeout
|
||||
- fs_stop_loop "$SUB" "$OCF_RESKEY_term_signals" &
|
||||
+ fs_stop_loop "" "$SUB" "$OCF_RESKEY_term_signals" &
|
||||
timeout_child $! $grace_time
|
||||
ret=$?
|
||||
[ $ret -eq $OCF_SUCCESS ] && return $ret
|
||||
|
||||
# try killing them for the rest of the timeout
|
||||
- fs_stop_loop "$SUB" "$OCF_RESKEY_kill_signals" &
|
||||
+ fs_stop_loop "$umount_force" "$SUB" "$OCF_RESKEY_kill_signals" &
|
||||
timeout_child $! $grace_time
|
||||
ret=$?
|
||||
[ $ret -eq $OCF_SUCCESS ] && return $ret
|
@ -1,37 +0,0 @@
|
||||
From c72dc2f2e502486d93aeec26abc12e720b14a0a7 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 10 Oct 2024 16:41:03 +0200
|
||||
Subject: [PATCH] azure-events*: use node name from cluster instead of hostname
|
||||
to avoid failing if they're not the same
|
||||
|
||||
---
|
||||
heartbeat/azure-events-az.in | 2 +-
|
||||
heartbeat/azure-events.in | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in
|
||||
index 6d31e5aba..0ed001037 100644
|
||||
--- a/heartbeat/azure-events-az.in
|
||||
+++ b/heartbeat/azure-events-az.in
|
||||
@@ -441,7 +441,7 @@ class Node:
|
||||
self.raOwner = ra
|
||||
self.azInfo = azHelper.getInstanceInfo()
|
||||
self.azName = self.azInfo.name
|
||||
- self.hostName = socket.gethostname()
|
||||
+ self.hostName = clusterHelper._exec("crm_node", "-n")
|
||||
self.setAttr("azName", self.azName)
|
||||
clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName)
|
||||
|
||||
diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in
|
||||
index 90acaba62..32f71ee26 100644
|
||||
--- a/heartbeat/azure-events.in
|
||||
+++ b/heartbeat/azure-events.in
|
||||
@@ -411,7 +411,7 @@ class Node:
|
||||
self.raOwner = ra
|
||||
self.azInfo = azHelper.getInstanceInfo()
|
||||
self.azName = self.azInfo.name
|
||||
- self.hostName = socket.gethostname()
|
||||
+ self.hostName = clusterHelper._exec("crm_node", "-n")
|
||||
self.setAttr("azName", self.azName)
|
||||
clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName)
|
||||
|
@ -1,100 +0,0 @@
|
||||
From f02afd0fadb581ca0fc9798beaf28044cf211200 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Wed, 18 Sep 2024 11:53:52 +0200
|
||||
Subject: [PATCH 1/2] Filesystem: on stop, try umount directly, before scanning
|
||||
for users
|
||||
|
||||
48ed6e6d (Filesystem: improve stop-action and allow setting term/kill signals and signal_delay for large filesystems, 2023-07-04)
|
||||
changed the logic from
|
||||
"try umount; if that fails, find and kill users; repeat" to
|
||||
"try to find and kill users; then try umount; repeat"
|
||||
|
||||
But even just walking /proc may take "a long time" on busy systems,
|
||||
and may still turn up with "no users found".
|
||||
|
||||
It will take even longer for "force_umount=safe"
|
||||
(observed 8 to 10 seconds just for "get_pids() with "safe" to return nothing)
|
||||
than for "force_umount=yes" (still ~ 2 to 3 seconds),
|
||||
but it will take "a long time" in any case.
|
||||
(BTW, that may be longer than the hardcoded default of 6 seconds for "fast_stop",
|
||||
which is also the default on many systems now)
|
||||
|
||||
If the dependencies are properly configured,
|
||||
there should be no users left,
|
||||
and the umount should just work.
|
||||
|
||||
Revert back to "try umount first", and only then try to find "rogue" users.
|
||||
---
|
||||
heartbeat/Filesystem | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index 4dd962fd9..99bddaf62 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -732,6 +732,11 @@ fs_stop() {
|
||||
local SUB="$1" timeout=$2 grace_time ret
|
||||
grace_time=$((timeout/2))
|
||||
|
||||
+ # Just walking /proc may take "a long time", even if we don't find any users of this FS.
|
||||
+ # If dependencies are properly configured, umount should just work.
|
||||
+ # Only if that fails, try to find and kill processes that still use it.
|
||||
+ try_umount "" "$SUB" && return $OCF_SUCCESS
|
||||
+
|
||||
# try gracefully terminating processes for up to half of the configured timeout
|
||||
fs_stop_loop "" "$SUB" "$OCF_RESKEY_term_signals" &
|
||||
timeout_child $! $grace_time
|
||||
|
||||
From b42d698f12aaeb871f4cc6a3c0327a27862b4376 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Wed, 18 Sep 2024 13:42:38 +0200
|
||||
Subject: [PATCH 2/2] Filesystem: stop/get_pids to be signaled
|
||||
|
||||
The "safe" way to get process ids that may be using a particular filesystem
|
||||
currently uses shell globs ("find /proc/[0-9]*").
|
||||
With a million processes (and/or a less capable shell),
|
||||
that may result in "Argument list too long".
|
||||
|
||||
Replace with find /proc -path "/proc/[0-9]*" instead.
|
||||
While at it, also fix the non-posix -or to be -o,
|
||||
and add explicit grouping parentheses \( \) and explicit -print.
|
||||
|
||||
Add a comment to not include "interesting" characters in mount point names.
|
||||
---
|
||||
heartbeat/Filesystem | 23 ++++++++++++++++++++---
|
||||
1 file changed, 20 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index 99bddaf62..3405e2c26 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -669,9 +669,26 @@ get_pids()
|
||||
$FUSER -Mm $dir 2>/dev/null
|
||||
fi
|
||||
elif [ "$FORCE_UNMOUNT" = "safe" ]; then
|
||||
- procs=$(find /proc/[0-9]*/ -type l -lname "${dir}/*" -or -lname "${dir}" 2>/dev/null | awk -F/ '{print $3}')
|
||||
- mmap_procs=$(grep " ${dir}/" /proc/[0-9]*/maps | awk -F/ '{print $3}')
|
||||
- printf "${procs}\n${mmap_procs}" | sort | uniq
|
||||
+ # Yes, in theory, ${dir} could contain "intersting" characters
|
||||
+ # and would need to be quoted for glob (find) and regex (grep).
|
||||
+ # Don't do that, then.
|
||||
+
|
||||
+ # Avoid /proc/[0-9]*, it may cause "Argument list too long".
|
||||
+ # There are several ways to filter for /proc/<pid>
|
||||
+ # -mindepth 1 -not -path "/proc/[0-9]*" -prune -o ...
|
||||
+ # -path "/proc/[!0-9]*" -prune -o ...
|
||||
+ # -path "/proc/[0-9]*" -a ...
|
||||
+ # the latter seemd to be significantly faster for this one in my naive test.
|
||||
+ procs=$(exec 2>/dev/null;
|
||||
+ find /proc -path "/proc/[0-9]*" -type l \( -lname "${dir}/*" -o -lname "${dir}" \) -print |
|
||||
+ awk -F/ '{print $3}' | uniq)
|
||||
+
|
||||
+ # This finds both /proc/<pid>/maps and /proc/<pid>/task/<tid>/maps;
|
||||
+ # if you don't want the latter, add -maxdepth.
|
||||
+ mmap_procs=$(exec 2>/dev/null;
|
||||
+ find /proc -path "/proc/[0-9]*/maps" -print |
|
||||
+ xargs -r grep -l " ${dir}/" | awk -F/ '{print $3}' | uniq)
|
||||
+ printf "${procs}\n${mmap_procs}" | sort -u
|
||||
fi
|
||||
}
|
||||
|
@ -1,48 +0,0 @@
|
||||
From 82958dc115c47232ae0468b1ddf64e728ec325e4 Mon Sep 17 00:00:00 2001
|
||||
From: Georg Pfuetzenreuter <mail@georg-pfuetzenreuter.net>
|
||||
Date: Wed, 9 Oct 2024 00:16:44 +0200
|
||||
Subject: [PATCH] ocf-shellfuncs: systemd_drop_in only if needed
|
||||
|
||||
Avoid dbus overload upon many simultaneous "daemon-reload" invocations
|
||||
(when a resource agent using systemd_drop_in() is called multiple times
|
||||
as part of parallel resource operations in Pacemaker) by skipping the
|
||||
file creation and reload if the expected data already exists.
|
||||
|
||||
Whilst at it, align the indentation of the heredoc with the other parts
|
||||
of the function.
|
||||
|
||||
Signed-off-by: Georg Pfuetzenreuter <mail@georg-pfuetzenreuter.net>
|
||||
---
|
||||
heartbeat/ocf-shellfuncs.in | 19 +++++++++++--------
|
||||
1 file changed, 11 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 9335cbf00..5c4bb3264 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -662,14 +662,17 @@ systemd_drop_in()
|
||||
systemdrundir="/run/systemd/system/resource-agents-deps.target.d"
|
||||
mkdir -p "$systemdrundir"
|
||||
conf_file="$systemdrundir/$1.conf"
|
||||
- cat >"$conf_file" <<EOF
|
||||
-[Unit]
|
||||
-$2=$3
|
||||
-EOF
|
||||
- # The information is accessible through systemd API and systemd would
|
||||
- # complain about improper permissions.
|
||||
- chmod o+r "$conf_file"
|
||||
- systemctl daemon-reload
|
||||
+ conf_line="$2=$3"
|
||||
+ if ! { [ -f "$conf_file" ] && grep -q "^$conf_line$" "$conf_file" ; } ; then
|
||||
+ cat > "$conf_file" <<-EOF
|
||||
+ [Unit]
|
||||
+ $conf_line
|
||||
+ EOF
|
||||
+ # The information is accessible through systemd API and systemd would
|
||||
+ # complain about improper permissions.
|
||||
+ chmod o+r "$conf_file"
|
||||
+ systemctl daemon-reload
|
||||
+ fi
|
||||
}
|
||||
|
||||
# usage: curl_retry RETRIES SLEEP ARGS URL
|
@ -1,132 +0,0 @@
|
||||
From 6fab544e702a7601714cd017aecc00193f23ae72 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Fri, 11 Oct 2024 13:13:10 +0200
|
||||
Subject: [PATCH] IPaddr2: improve fail logic and check ip_status after adding
|
||||
IP
|
||||
|
||||
* check that the label got applied
|
||||
* return OCF_ERR_GENERIC to avoid false-positive when IP was manually added before starting the resource
|
||||
* check ip_status after adding IP to fail without having to wait for the first monitor-action
|
||||
|
||||
Co-authored-by: Evan J. Felix <evan.felix@pnnl.gov>
|
||||
---
|
||||
heartbeat/IPaddr2 | 35 ++++++++++++++++++++++++++---------
|
||||
1 file changed, 26 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index e325aa574..27cae2d11 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -586,7 +586,7 @@ ip_init() {
|
||||
exit $rc
|
||||
fi
|
||||
fi
|
||||
-
|
||||
+
|
||||
SENDARPPIDFILE="$SENDARPPIDDIR/send_arp-$OCF_RESKEY_ip"
|
||||
|
||||
if [ -n "$IFLABEL" ]; then
|
||||
@@ -985,6 +985,7 @@ run_send_ua() {
|
||||
# ok = served (for CIP: + hash bucket)
|
||||
# partial = served and no hash bucket (CIP only)
|
||||
# partial2 = served and no CIP iptables rule
|
||||
+# partial3 = served with no label
|
||||
# no = nothing
|
||||
#
|
||||
ip_served() {
|
||||
@@ -1002,6 +1003,11 @@ ip_served() {
|
||||
|
||||
if [ -z "$IP_CIP" ]; then
|
||||
for i in $cur_nic; do
|
||||
+ # check address label
|
||||
+ if [ -n "$IFLABEL" ] && [ -z "`$IP2UTIL -o -f $FAMILY addr show $nic label $IFLABEL`" ]; then
|
||||
+ echo partial3
|
||||
+ return 0
|
||||
+ fi
|
||||
# only mark as served when on the same interfaces as $NIC
|
||||
[ "$i" = "$NIC" ] || continue
|
||||
echo "ok"
|
||||
@@ -1065,7 +1071,12 @@ ip_start() {
|
||||
if [ "$ip_status" = "ok" ]; then
|
||||
exit $OCF_SUCCESS
|
||||
fi
|
||||
-
|
||||
+
|
||||
+ if [ "$ip_status" = "partial3" ]; then
|
||||
+ ocf_exit_reason "IP $OCF_RESKEY_ip available, but label missing"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+
|
||||
if [ -n "$IP_CIP" ] && ([ $ip_status = "no" ] || [ $ip_status = "partial2" ]); then
|
||||
$MODPROBE ip_conntrack
|
||||
$IPADDR2_CIP_IPTABLES -I INPUT -d $OCF_RESKEY_ip -i $NIC -j CLUSTERIP \
|
||||
@@ -1083,7 +1094,7 @@ ip_start() {
|
||||
if [ -n "$IP_CIP" ] && [ $ip_status = "partial" ]; then
|
||||
echo "+$IP_INC_NO" >$IP_CIP_FILE
|
||||
fi
|
||||
-
|
||||
+
|
||||
if [ "$ip_status" = "no" ]; then
|
||||
if ocf_is_true ${OCF_RESKEY_lvs_support}; then
|
||||
for i in `find_interface $OCF_RESKEY_ip 32`; do
|
||||
@@ -1094,7 +1105,7 @@ ip_start() {
|
||||
esac
|
||||
done
|
||||
fi
|
||||
-
|
||||
+
|
||||
add_interface "$OCF_RESKEY_ip" "$NETMASK" "${BRDCAST:-none}" "$NIC" "$IFLABEL" "$METRIC"
|
||||
rc=$?
|
||||
|
||||
@@ -1102,6 +1113,12 @@ ip_start() {
|
||||
ocf_exit_reason "Failed to add $OCF_RESKEY_ip"
|
||||
exit $rc
|
||||
fi
|
||||
+
|
||||
+ ip_status=`ip_served`
|
||||
+ if [ "$ip_status" != "ok" ]; then
|
||||
+ ocf_exit_reason "Failed to add $OCF_RESKEY_ip with error $ip_status"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
fi
|
||||
|
||||
case $NIC in
|
||||
@@ -1134,7 +1151,7 @@ ip_stop() {
|
||||
ocf_take_lock $CIP_lockfile
|
||||
ocf_release_lock_on_exit $CIP_lockfile
|
||||
fi
|
||||
-
|
||||
+
|
||||
if [ -f "$SENDARPPIDFILE" ] ; then
|
||||
kill `cat "$SENDARPPIDFILE"`
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -1171,17 +1188,17 @@ ip_stop() {
|
||||
i=`expr $i + 1`
|
||||
done
|
||||
else
|
||||
- ip_del_if="no"
|
||||
+ ip_del_if="no"
|
||||
fi
|
||||
fi
|
||||
-
|
||||
+
|
||||
if [ "$ip_del_if" = "yes" ]; then
|
||||
delete_interface $OCF_RESKEY_ip $NIC $NETMASK
|
||||
if [ $? -ne 0 ]; then
|
||||
ocf_exit_reason "Unable to remove IP [${OCF_RESKEY_ip} from interface [ $NIC ]"
|
||||
exit $OCF_ERR_GENERIC
|
||||
fi
|
||||
-
|
||||
+
|
||||
if ocf_is_true ${OCF_RESKEY_lvs_support}; then
|
||||
restore_loopback "$OCF_RESKEY_ip"
|
||||
fi
|
||||
@@ -1200,7 +1217,7 @@ ip_monitor() {
|
||||
run_arp_sender refresh
|
||||
return $OCF_SUCCESS
|
||||
;;
|
||||
- partial|no|partial2)
|
||||
+ no)
|
||||
exit $OCF_NOT_RUNNING
|
||||
;;
|
||||
*)
|
@ -1,455 +0,0 @@
|
||||
From 61cec34a754017537c61e79cd1212f2688c32429 Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
||||
Date: Mon, 4 Nov 2024 12:19:10 +0530
|
||||
Subject: [PATCH 1/7] Introduce a new shell function to reuse IMDS token
|
||||
|
||||
---
|
||||
heartbeat/ocf-shellfuncs.in | 31 +++++++++++++++++++++++++++++++
|
||||
1 file changed, 31 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 5c4bb3264..0c4632cf9 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -1111,3 +1111,34 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
||||
if ocf_is_true "$HA_use_logd"; then
|
||||
: ${HA_LOGD:=yes}
|
||||
fi
|
||||
+
|
||||
+# File to store the token and timestamp
|
||||
+TOKEN_FILE="/tmp/.imds_token"
|
||||
+TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
+TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
+
|
||||
+# Function to fetch a new token
|
||||
+fetch_new_token() {
|
||||
+ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME")
|
||||
+ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
||||
+ echo "$TOKEN"
|
||||
+}
|
||||
+
|
||||
+# Function to retrieve or renew the token
|
||||
+get_token() {
|
||||
+ if [[ -f "$TOKEN_FILE" ]]; then
|
||||
+ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
||||
+ CURRENT_TIME=$(date +%s)
|
||||
+ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
||||
+
|
||||
+ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
||||
+ # Token is still valid
|
||||
+ echo "$STORED_TOKEN"
|
||||
+ return
|
||||
+ fi
|
||||
+ fi
|
||||
+ # Fetch a new token if not valid
|
||||
+ fetch_new_token
|
||||
+}
|
||||
+
|
||||
+
|
||||
|
||||
From 00629fa44cb7a8dd1045fc8cad755e1d0c808476 Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
||||
Date: Mon, 4 Nov 2024 12:21:18 +0530
|
||||
Subject: [PATCH 2/7] Utilize the get_token function to reuse the token
|
||||
|
||||
---
|
||||
heartbeat/aws-vpc-move-ip | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index 6115e5ba8..fbeb2ee64 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -270,7 +270,7 @@ ec2ip_validate() {
|
||||
fi
|
||||
fi
|
||||
|
||||
- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+ TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
From 36126cdcb90ad617ecfce03d986550907732aa4f Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <83770157+harshkiprofile@users.noreply.github.com>
|
||||
Date: Mon, 4 Nov 2024 12:22:16 +0530
|
||||
Subject: [PATCH 3/7] Utilize to get_token function to reuse the token
|
||||
|
||||
---
|
||||
heartbeat/awsvip | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index f2b238a0f..ca19ac086 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -266,7 +266,7 @@ if [ -n "${OCF_RESKEY_region}" ]; then
|
||||
AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||
fi
|
||||
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
||||
-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
From dcd0050df5ba94905bc71d38b05cbb93f5687b61 Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <beer18317@gmail.com>
|
||||
Date: Mon, 4 Nov 2024 20:05:33 +0530
|
||||
Subject: [PATCH 4/7] Move token renewal function to aws.sh for reuse in AWS
|
||||
agent scripts
|
||||
|
||||
---
|
||||
heartbeat/Makefile.am | 1 +
|
||||
heartbeat/aws-vpc-move-ip | 1 +
|
||||
heartbeat/aws-vpc-route53.in | 3 ++-
|
||||
heartbeat/aws.sh | 46 ++++++++++++++++++++++++++++++++++++
|
||||
heartbeat/awseip | 3 ++-
|
||||
heartbeat/awsvip | 1 +
|
||||
heartbeat/ocf-shellfuncs.in | 33 +-------------------------
|
||||
7 files changed, 54 insertions(+), 34 deletions(-)
|
||||
create mode 100644 heartbeat/aws.sh
|
||||
|
||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
index 409847970..655740f14 100644
|
||||
--- a/heartbeat/Makefile.am
|
||||
+++ b/heartbeat/Makefile.am
|
||||
@@ -218,6 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \
|
||||
ocf-rarun \
|
||||
ocf-distro \
|
||||
apache-conf.sh \
|
||||
+ aws.sh \
|
||||
http-mon.sh \
|
||||
sapdb-nosha.sh \
|
||||
sapdb.sh \
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index fbeb2ee64..f4b0492f2 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -33,6 +33,7 @@
|
||||
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
||||
|
||||
# Defaults
|
||||
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||
index eba2ed95c..f7e756782 100644
|
||||
--- a/heartbeat/aws-vpc-route53.in
|
||||
+++ b/heartbeat/aws-vpc-route53.in
|
||||
@@ -43,6 +43,7 @@
|
||||
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
||||
|
||||
# Defaults
|
||||
OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
@@ -377,7 +378,7 @@ r53_monitor() {
|
||||
_get_ip() {
|
||||
case $OCF_RESKEY_ip in
|
||||
local|public)
|
||||
- TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+ TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
new file mode 100644
|
||||
index 000000000..fc557109c
|
||||
--- /dev/null
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -0,0 +1,46 @@
|
||||
+#!/bin/sh
|
||||
+#
|
||||
+#
|
||||
+# AWS Helper Scripts
|
||||
+#
|
||||
+#
|
||||
+
|
||||
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
+
|
||||
+# Defaults
|
||||
+OCF_RESKEY_curl_retries_default="3"
|
||||
+OCF_RESKEY_curl_sleep_default="1"
|
||||
+
|
||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
+
|
||||
+# Function to enable reusable IMDS token retrieval for efficient repeated access
|
||||
+# File to store the token and timestamp
|
||||
+TOKEN_FILE="/tmp/.imds_token"
|
||||
+TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
+TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
+
|
||||
+# Function to fetch a new token
|
||||
+fetch_new_token() {
|
||||
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token")
|
||||
+ echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
||||
+ echo "$TOKEN"
|
||||
+}
|
||||
+
|
||||
+# Function to retrieve or renew the token
|
||||
+get_token() {
|
||||
+ if [ -f "$TOKEN_FILE" ]; then
|
||||
+ read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
||||
+ CURRENT_TIME=$(date +%s)
|
||||
+ ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
||||
+
|
||||
+ if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
||||
+ # Token is still valid
|
||||
+ echo "$STORED_TOKEN"
|
||||
+ return
|
||||
+ fi
|
||||
+ fi
|
||||
+ # Fetch a new token if not valid
|
||||
+ fetch_new_token
|
||||
+}
|
||||
\ No newline at end of file
|
||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||
index ffb6223a1..049c2e566 100755
|
||||
--- a/heartbeat/awseip
|
||||
+++ b/heartbeat/awseip
|
||||
@@ -38,6 +38,7 @@
|
||||
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
||||
|
||||
#######################################################################
|
||||
|
||||
@@ -306,7 +307,7 @@ fi
|
||||
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
|
||||
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
||||
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
||||
-TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index ca19ac086..de67981d8 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -37,6 +37,7 @@
|
||||
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
+. ${OCF_FUNCTIONS_DIR}/aws.sh
|
||||
|
||||
#######################################################################
|
||||
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 0c4632cf9..922c6ea45 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -1110,35 +1110,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
||||
# pacemaker sets HA_use_logd, some others use HA_LOGD :/
|
||||
if ocf_is_true "$HA_use_logd"; then
|
||||
: ${HA_LOGD:=yes}
|
||||
-fi
|
||||
-
|
||||
-# File to store the token and timestamp
|
||||
-TOKEN_FILE="/tmp/.imds_token"
|
||||
-TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
-TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
-
|
||||
-# Function to fetch a new token
|
||||
-fetch_new_token() {
|
||||
- TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME")
|
||||
- echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
||||
- echo "$TOKEN"
|
||||
-}
|
||||
-
|
||||
-# Function to retrieve or renew the token
|
||||
-get_token() {
|
||||
- if [[ -f "$TOKEN_FILE" ]]; then
|
||||
- read -r STORED_TOKEN STORED_TIMESTAMP < "$TOKEN_FILE"
|
||||
- CURRENT_TIME=$(date +%s)
|
||||
- ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
||||
-
|
||||
- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
||||
- # Token is still valid
|
||||
- echo "$STORED_TOKEN"
|
||||
- return
|
||||
- fi
|
||||
- fi
|
||||
- # Fetch a new token if not valid
|
||||
- fetch_new_token
|
||||
-}
|
||||
-
|
||||
-
|
||||
+fi
|
||||
\ No newline at end of file
|
||||
|
||||
From 9f7be201923c8eab1b121f2067ed74a69841cf8a Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <beer18317@gmail.com>
|
||||
Date: Tue, 5 Nov 2024 19:12:34 +0530
|
||||
Subject: [PATCH 5/7] Refactor to use common temp path and update shell syntax
|
||||
|
||||
---
|
||||
heartbeat/Makefile.am | 2 +-
|
||||
heartbeat/aws.sh | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
index 655740f14..8352f3a3d 100644
|
||||
--- a/heartbeat/Makefile.am
|
||||
+++ b/heartbeat/Makefile.am
|
||||
@@ -218,7 +218,7 @@ ocfcommon_DATA = ocf-shellfuncs \
|
||||
ocf-rarun \
|
||||
ocf-distro \
|
||||
apache-conf.sh \
|
||||
- aws.sh \
|
||||
+ aws.sh \
|
||||
http-mon.sh \
|
||||
sapdb-nosha.sh \
|
||||
sapdb.sh \
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
index fc557109c..c77f93b91 100644
|
||||
--- a/heartbeat/aws.sh
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -17,7 +17,7 @@ OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
# Function to enable reusable IMDS token retrieval for efficient repeated access
|
||||
# File to store the token and timestamp
|
||||
-TOKEN_FILE="/tmp/.imds_token"
|
||||
+TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
|
||||
@@ -35,7 +35,7 @@ get_token() {
|
||||
CURRENT_TIME=$(date +%s)
|
||||
ELAPSED_TIME=$((CURRENT_TIME - STORED_TIMESTAMP))
|
||||
|
||||
- if (( ELAPSED_TIME < (TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD) )); then
|
||||
+ if [ "$ELAPSED_TIME" -lt "$((TOKEN_LIFETIME - TOKEN_EXPIRY_THRESHOLD))" ]; then
|
||||
# Token is still valid
|
||||
echo "$STORED_TOKEN"
|
||||
return
|
||||
|
||||
From 4f61048064d1df3bebdb5c1441cf0020f213c01b Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <beer18317@gmail.com>
|
||||
Date: Tue, 5 Nov 2024 19:30:15 +0530
|
||||
Subject: [PATCH 6/7] Consolidate curl_retry and curl_sleep variable to a
|
||||
single location in aws.sh
|
||||
|
||||
---
|
||||
heartbeat/aws-vpc-move-ip | 4 ----
|
||||
heartbeat/aws-vpc-route53.in | 4 ----
|
||||
heartbeat/awseip | 4 ----
|
||||
heartbeat/awsvip | 4 ----
|
||||
4 files changed, 16 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index f4b0492f2..3aa9ceb02 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -48,8 +48,6 @@ OCF_RESKEY_interface_default="eth0"
|
||||
OCF_RESKEY_iflabel_default=""
|
||||
OCF_RESKEY_monapi_default="false"
|
||||
OCF_RESKEY_lookup_type_default="InstanceId"
|
||||
-OCF_RESKEY_curl_retries_default="3"
|
||||
-OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
@@ -63,8 +61,6 @@ OCF_RESKEY_curl_sleep_default="1"
|
||||
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
||||
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
#######################################################################
|
||||
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||
index f7e756782..85c8de3c1 100644
|
||||
--- a/heartbeat/aws-vpc-route53.in
|
||||
+++ b/heartbeat/aws-vpc-route53.in
|
||||
@@ -54,8 +54,6 @@ OCF_RESKEY_hostedzoneid_default=""
|
||||
OCF_RESKEY_fullname_default=""
|
||||
OCF_RESKEY_ip_default="local"
|
||||
OCF_RESKEY_ttl_default=10
|
||||
-OCF_RESKEY_curl_retries_default="3"
|
||||
-OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
@@ -65,8 +63,6 @@ OCF_RESKEY_curl_sleep_default="1"
|
||||
: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}}
|
||||
: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}}
|
||||
: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
|
||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
usage() {
|
||||
cat <<-EOT
|
||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||
index 049c2e566..4b1c3bc6a 100755
|
||||
--- a/heartbeat/awseip
|
||||
+++ b/heartbeat/awseip
|
||||
@@ -50,16 +50,12 @@ OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
-OCF_RESKEY_curl_retries_default="3"
|
||||
-OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index de67981d8..8c71e7fac 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -49,16 +49,12 @@ OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
-OCF_RESKEY_curl_retries_default="3"
|
||||
-OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||
-: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
-: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
|
||||
From d451c5c595b08685f84ec85da96ae9cb4fc076fe Mon Sep 17 00:00:00 2001
|
||||
From: harshkiprofile <beer18317@gmail.com>
|
||||
Date: Tue, 5 Nov 2024 20:50:24 +0530
|
||||
Subject: [PATCH 7/7] aws.sh needs to added to be symlinkstargets in
|
||||
doc/man/Makefile.am
|
||||
|
||||
---
|
||||
doc/man/Makefile.am | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||
index ef7639bff..447f5cba3 100644
|
||||
--- a/doc/man/Makefile.am
|
||||
+++ b/doc/man/Makefile.am
|
||||
@@ -42,7 +42,7 @@ radir = $(abs_top_builddir)/heartbeat
|
||||
# required for out-of-tree build
|
||||
symlinkstargets = \
|
||||
ocf-distro ocf.py ocf-rarun ocf-returncodes \
|
||||
- findif.sh apache-conf.sh http-mon.sh mysql-common.sh \
|
||||
+ findif.sh apache-conf.sh aws.sh http-mon.sh mysql-common.sh \
|
||||
nfsserver-redhat.sh ora-common.sh
|
||||
|
||||
preptree:
|
@ -1,161 +0,0 @@
|
||||
From cc5ffa5e599c974c426e93faa821b342e96b916d Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 11 Nov 2024 12:46:27 +0100
|
||||
Subject: [PATCH 1/2] aws.sh: chmod 600 $TOKEN_FILE, add get_instance_id() with
|
||||
DMI support, and use get_instance_id() in AWS agents
|
||||
|
||||
---
|
||||
heartbeat/aws-vpc-move-ip | 2 +-
|
||||
heartbeat/aws.sh | 30 +++++++++++++++++++++++++++---
|
||||
heartbeat/awseip | 2 +-
|
||||
heartbeat/awsvip | 2 +-
|
||||
4 files changed, 30 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index 3aa9ceb02..09ae68b57 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -269,7 +269,7 @@ ec2ip_validate() {
|
||||
|
||||
TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
- EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+ EC2_INSTANCE_ID=$(get_instance_id)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
if [ -z "${EC2_INSTANCE_ID}" ]; then
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
index c77f93b91..9cd343c16 100644
|
||||
--- a/heartbeat/aws.sh
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -9,8 +9,8 @@
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
# Defaults
|
||||
-OCF_RESKEY_curl_retries_default="3"
|
||||
-OCF_RESKEY_curl_sleep_default="1"
|
||||
+OCF_RESKEY_curl_retries_default="4"
|
||||
+OCF_RESKEY_curl_sleep_default="3"
|
||||
|
||||
: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
@@ -20,11 +20,13 @@ OCF_RESKEY_curl_sleep_default="1"
|
||||
TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
+DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances.
|
||||
|
||||
# Function to fetch a new token
|
||||
fetch_new_token() {
|
||||
TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: $TOKEN_LIFETIME'" "http://169.254.169.254/latest/api/token")
|
||||
echo "$TOKEN $(date +%s)" > "$TOKEN_FILE"
|
||||
+ chmod 600 "$TOKEN_FILE"
|
||||
echo "$TOKEN"
|
||||
}
|
||||
|
||||
@@ -43,4 +45,26 @@ get_token() {
|
||||
fi
|
||||
# Fetch a new token if not valid
|
||||
fetch_new_token
|
||||
-}
|
||||
\ No newline at end of file
|
||||
+}
|
||||
+
|
||||
+get_instance_id() {
|
||||
+ local INSTANCE_ID
|
||||
+
|
||||
+ # Try to get the EC2 instance ID from DMI first before falling back to IMDS.
|
||||
+ ocf_log debug "EC2: Attempt to get EC2 Instance ID from local file."
|
||||
+ if [ -r "$DMI_FILE" ] && [ -s "$DMI_FILE" ]; then
|
||||
+ INSTANCE_ID="$(cat "$DMI_FILE")"
|
||||
+ case "$INSTANCE_ID" in
|
||||
+ i-0*) echo "$INSTANCE_ID"; return "$OCF_SUCCESS" ;;
|
||||
+ esac
|
||||
+ fi
|
||||
+
|
||||
+ INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+ if [ $? -ne 0 ]; then
|
||||
+ ocf_exit_reason "Failed to get EC2 Instance ID"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+
|
||||
+ echo "$INSTANCE_ID"
|
||||
+ return "$OCF_SUCCESS"
|
||||
+}
|
||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||
index 4b1c3bc6a..7f38376dc 100755
|
||||
--- a/heartbeat/awseip
|
||||
+++ b/heartbeat/awseip
|
||||
@@ -305,7 +305,7 @@ ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
||||
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
||||
TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+INSTANCE_ID=$(get_instance_id)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
case $__OCF_ACTION in
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index 8c71e7fac..0856ac5e4 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -265,7 +265,7 @@ fi
|
||||
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
||||
TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
-INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+INSTANCE_ID=$(get_instance_id)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
From b8d3ecc6a8ce4baf4b28d02978dd573728ccf5fa Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 18 Nov 2024 11:10:42 +0100
|
||||
Subject: [PATCH 2/2] aws.sh/ocf-shellfuncs: add ability to fresh token if it's
|
||||
invalid
|
||||
|
||||
---
|
||||
heartbeat/aws.sh | 1 +
|
||||
heartbeat/ocf-shellfuncs.in | 11 ++++++++++-
|
||||
2 files changed, 11 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
index 9cd343c16..64f2e13a7 100644
|
||||
--- a/heartbeat/aws.sh
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -18,6 +18,7 @@ OCF_RESKEY_curl_sleep_default="3"
|
||||
# Function to enable reusable IMDS token retrieval for efficient repeated access
|
||||
# File to store the token and timestamp
|
||||
TOKEN_FILE="${HA_RSCTMP}/.aws_imds_token"
|
||||
+TOKEN_FUNC="fetch_new_token" # Used by curl_retry() if saved token is invalid
|
||||
TOKEN_LIFETIME=21600 # Token lifetime in seconds (6 hours)
|
||||
TOKEN_EXPIRY_THRESHOLD=3600 # Renew token if less than 60 minutes (1 hour) remaining
|
||||
DMI_FILE="/sys/devices/virtual/dmi/id/board_asset_tag" # Only supported on nitro-based instances.
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 922c6ea45..8e51fa3c8 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -697,6 +697,15 @@ curl_retry()
|
||||
|
||||
ocf_log debug "result: $result"
|
||||
[ $rc -eq 0 ] && break
|
||||
+ if [ -n "$TOKEN" ] && [ -n "$TOKEN_FILE" ] && \
|
||||
+ [ -f "$TOKEN_FILE" ] && [ -n "$TOKEN_FUNC" ] && \
|
||||
+ echo "$result" | grep -q "The requested URL returned error: 401$"; then
|
||||
+ local OLD_TOKEN="$TOKEN"
|
||||
+ ocf_log err "Token invalid. Getting new token."
|
||||
+ TOKEN=$($TOKEN_FUNC)
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+ args=$(echo "$args" | sed "s/$OLD_TOKEN/$TOKEN/")
|
||||
+ fi
|
||||
sleep $sleep
|
||||
done
|
||||
|
||||
@@ -1110,4 +1119,4 @@ ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace
|
||||
# pacemaker sets HA_use_logd, some others use HA_LOGD :/
|
||||
if ocf_is_true "$HA_use_logd"; then
|
||||
: ${HA_LOGD:=yes}
|
||||
-fi
|
||||
\ No newline at end of file
|
||||
+fi
|
@ -1,184 +0,0 @@
|
||||
From 392d40048a25d7cb73ec5b5e9f7a5862f7a3fd48 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 11 Nov 2024 12:22:27 +0100
|
||||
Subject: [PATCH 1/2] aws.sh: add get_interface_mac()
|
||||
|
||||
---
|
||||
heartbeat/aws.sh | 21 +++++++++++++++++++++
|
||||
1 file changed, 21 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
index 64f2e13a7..ebb4eb1f4 100644
|
||||
--- a/heartbeat/aws.sh
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -69,3 +69,24 @@ get_instance_id() {
|
||||
echo "$INSTANCE_ID"
|
||||
return "$OCF_SUCCESS"
|
||||
}
|
||||
+
|
||||
+get_interface_mac() {
|
||||
+ local MAC_FILE MAC_ADDR rc
|
||||
+ MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
||||
+ if [ -f "$MAC_FILE" ]; then
|
||||
+ cmd="cat ${MAC_FILE}"
|
||||
+ else
|
||||
+ cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
||||
+ fi
|
||||
+ ocf_log debug "executing command: $cmd"
|
||||
+ MAC_ADDR="$(eval $cmd)"
|
||||
+ rc=$?
|
||||
+ if [ $rc != 0 ]; then
|
||||
+ ocf_log warn "command failed, rc: $rc"
|
||||
+ return $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+ ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
||||
+
|
||||
+ echo $MAC_ADDR
|
||||
+ return $OCF_SUCCESS
|
||||
+}
|
||||
|
||||
From 87337ac4da931d5a53c83d53d4bab17ee123ba9f Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 11 Nov 2024 12:26:38 +0100
|
||||
Subject: [PATCH 2/2] awsvip: let user specify which interface to use, and make
|
||||
the parameter optional in aws-vpc-move-ip
|
||||
|
||||
---
|
||||
heartbeat/aws-vpc-move-ip | 20 ++++----------------
|
||||
heartbeat/aws.sh | 4 +++-
|
||||
heartbeat/awsvip | 24 +++++++++++++++++-------
|
||||
3 files changed, 24 insertions(+), 24 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index 09ae68b57..2afc0ba53 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -157,7 +157,7 @@ Role to use to query/update the route table
|
||||
<content type="string" default="${OCF_RESKEY_routing_table_role_default}" />
|
||||
</parameter>
|
||||
|
||||
-<parameter name="interface" required="1">
|
||||
+<parameter name="interface" required="0">
|
||||
<longdesc lang="en">
|
||||
Name of the network interface, i.e. eth0
|
||||
</longdesc>
|
||||
@@ -321,7 +321,7 @@ ec2ip_monitor() {
|
||||
ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
|
||||
fi
|
||||
|
||||
- cmd="ip addr show to $OCF_RESKEY_ip up"
|
||||
+ cmd="ip addr show dev $OCF_RESKEY_interface to $OCF_RESKEY_ip up"
|
||||
ocf_log debug "executing command: $cmd"
|
||||
RESULT=$($cmd | grep "$OCF_RESKEY_ip")
|
||||
if [ -z "$RESULT" ]; then
|
||||
@@ -331,7 +331,7 @@ ec2ip_monitor() {
|
||||
level="info"
|
||||
fi
|
||||
|
||||
- ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to running interface"
|
||||
+ ocf_log "$level" "IP $OCF_RESKEY_ip not assigned to interface $OCF_RESKEY_interface"
|
||||
return $OCF_NOT_RUNNING
|
||||
fi
|
||||
|
||||
@@ -369,19 +369,7 @@ ec2ip_drop() {
|
||||
}
|
||||
|
||||
ec2ip_get_instance_eni() {
|
||||
- MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
||||
- if [ -f $MAC_FILE ]; then
|
||||
- cmd="cat ${MAC_FILE}"
|
||||
- else
|
||||
- cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
||||
- fi
|
||||
- ocf_log debug "executing command: $cmd"
|
||||
- MAC_ADDR="$(eval $cmd)"
|
||||
- rc=$?
|
||||
- if [ $rc != 0 ]; then
|
||||
- ocf_log warn "command failed, rc: $rc"
|
||||
- return $OCF_ERR_GENERIC
|
||||
- fi
|
||||
+ MAC_ADDR=$(get_interface_mac)
|
||||
ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
||||
|
||||
cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\""
|
||||
diff --git a/heartbeat/aws.sh b/heartbeat/aws.sh
|
||||
index ebb4eb1f4..216033afe 100644
|
||||
--- a/heartbeat/aws.sh
|
||||
+++ b/heartbeat/aws.sh
|
||||
@@ -73,7 +73,9 @@ get_instance_id() {
|
||||
get_interface_mac() {
|
||||
local MAC_FILE MAC_ADDR rc
|
||||
MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address"
|
||||
- if [ -f "$MAC_FILE" ]; then
|
||||
+ if [ -z "$OCF_RESKEY_interface" ]; then
|
||||
+ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/mac\""
|
||||
+ elif [ -f "$MAC_FILE" ]; then
|
||||
cmd="cat ${MAC_FILE}"
|
||||
else
|
||||
cmd="ip -br link show dev ${OCF_RESKEY_interface} | tr -s ' ' | cut -d' ' -f3"
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index 0856ac5e4..015180d5a 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -49,12 +49,14 @@ OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
+OCF_RESKEY_interface_default=""
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||
+: ${OCF_RESKEY_interface=${OCF_RESKEY_interface_default}}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
@@ -125,6 +127,14 @@ a short delay between API calls, to avoid sending API too quick
|
||||
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="interface" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Name of the network interface, i.e. eth0
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">network interface name</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_interface_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="curl_retries" unique="0">
|
||||
<longdesc lang="en">
|
||||
curl retries before failing
|
||||
@@ -207,16 +217,16 @@ awsvip_stop() {
|
||||
}
|
||||
|
||||
awsvip_monitor() {
|
||||
- $AWSCLI_CMD ec2 describe-instances \
|
||||
- --instance-id "${INSTANCE_ID}" \
|
||||
- --query 'Reservations[].Instances[].NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
|
||||
+ $AWSCLI_CMD ec2 describe-network-interfaces \
|
||||
+ --network-interface-ids "${NETWORK_ID}" \
|
||||
+ --query 'NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress[]' \
|
||||
--output text | \
|
||||
grep -qE "(^|\s)${SECONDARY_PRIVATE_IP}(\s|$)"
|
||||
- RET=$?
|
||||
-
|
||||
- if [ $RET -ne 0 ]; then
|
||||
+ if [ $? -ne 0 ]; then
|
||||
+ [ "$__OCF_ACTION" = "monitor" ] && ! ocf_is_probe && ocf_log error "IP $SECONDARY_PRIVATE_IP not assigned to interface ${NETWORK_ID}"
|
||||
return $OCF_NOT_RUNNING
|
||||
fi
|
||||
+
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
@@ -267,7 +277,7 @@ TOKEN=$(get_token)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
INSTANCE_ID=$(get_instance_id)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
-MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
||||
+MAC_ADDRESS=$(get_interface_mac)
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id")
|
||||
[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
448
SOURCES/RHEL-79823-portblock-fix-version-detection.patch
Normal file
448
SOURCES/RHEL-79823-portblock-fix-version-detection.patch
Normal file
@ -0,0 +1,448 @@
|
||||
--- a/heartbeat/portblock 2025-02-20 14:54:18.047134471 +0100
|
||||
+++ b/heartbeat/portblock 2025-02-20 14:09:44.546869740 +0100
|
||||
@@ -25,6 +25,7 @@
|
||||
# Defaults
|
||||
OCF_RESKEY_protocol_default=""
|
||||
OCF_RESKEY_portno_default=""
|
||||
+OCF_RESKEY_direction_default="in"
|
||||
OCF_RESKEY_action_default=""
|
||||
OCF_RESKEY_ip_default="0.0.0.0/0"
|
||||
OCF_RESKEY_reset_local_on_unblock_stop_default="false"
|
||||
@@ -33,6 +34,7 @@
|
||||
|
||||
: ${OCF_RESKEY_protocol=${OCF_RESKEY_protocol_default}}
|
||||
: ${OCF_RESKEY_portno=${OCF_RESKEY_portno_default}}
|
||||
+: ${OCF_RESKEY_direction=${OCF_RESKEY_direction_default}}
|
||||
: ${OCF_RESKEY_action=${OCF_RESKEY_action_default}}
|
||||
: ${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}}
|
||||
: ${OCF_RESKEY_reset_local_on_unblock_stop=${OCF_RESKEY_reset_local_on_unblock_stop_default}}
|
||||
@@ -217,6 +219,18 @@
|
||||
<shortdesc lang="en">Connection state file synchronization script</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_sync_script_default}" />
|
||||
</parameter>
|
||||
+
|
||||
+<parameter name="direction" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Whether to block incoming or outgoing traffic. Can be either "in",
|
||||
+"out", or "both".
|
||||
+If "in" is used, the incoming ports are blocked on the INPUT chain.
|
||||
+If "out" is used, the outgoing ports are blocked on the OUTPUT chain.
|
||||
+If "both" is used, both the incoming and outgoing ports are blocked.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Whether to block incoming or outgoing traffic, or both</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_direction_default}" />
|
||||
+</parameter>
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -240,36 +254,73 @@
|
||||
# and disable us -- but we're still in some sense active...
|
||||
#
|
||||
|
||||
-#active_grep_pat {udp|tcp} portno,portno
|
||||
+#active_grep_pat {udp|tcp} portno,portno ip {d|s}
|
||||
+# d = look for destination ports
|
||||
+# s = look for source ports
|
||||
active_grep_pat()
|
||||
{
|
||||
w="[ ][ ]*"
|
||||
any="0\\.0\\.0\\.0/0"
|
||||
- echo "^DROP${w}${1}${w}--${w}${any}${w}${3}${w}multiport${w}dports${w}${2}\>"
|
||||
+ src=$any dst=$3
|
||||
+ if [ "$4" = "s" ]; then
|
||||
+ local src=$3
|
||||
+ local dst=$any
|
||||
+ fi
|
||||
+ # iptables 1.8.9 briefly broke the output format, returning the
|
||||
+ # numeric protocol value instead of a string. Support both variants.
|
||||
+ if [ "$1" = "tcp" ]; then
|
||||
+ local prot="(tcp|6)"
|
||||
+ else
|
||||
+ local prot="(udp|17)"
|
||||
+ fi
|
||||
+ echo "^DROP${w}${prot}${w}--${w}${src}${w}${dst}${w}multiport${w}${4}ports${w}${2}$"
|
||||
}
|
||||
|
||||
-#chain_isactive {udp|tcp} portno,portno ip
|
||||
+#chain_isactive {udp|tcp} portno,portno ip chain
|
||||
chain_isactive()
|
||||
{
|
||||
- PAT=`active_grep_pat "$1" "$2" "$3"`
|
||||
- $IPTABLES $wait -n -L INPUT | grep "$PAT" >/dev/null
|
||||
+ [ "$4" = "OUTPUT" ] && ds="s" || ds="d"
|
||||
+ PAT=$(active_grep_pat "$1" "$2" "$3" "$ds")
|
||||
+ $IPTABLES $wait -n -L "$4" | grep -qE "$PAT"
|
||||
+}
|
||||
+
|
||||
+# netstat -tn and ss -Htn, split on whitespace and colon,
|
||||
+# look very similar:
|
||||
+# tcp 0 0 10.43.55.1 675 10.43.9.8 2049 ESTABLISHED
|
||||
+# ESTAB 0 0 10.43.55.1 675 10.43.9.8 2049
|
||||
+# so we can write one awk script for both
|
||||
+get_established_tcp_connections()
|
||||
+{
|
||||
+ local columns
|
||||
+ if [ -z "$1" ] ; then
|
||||
+ columns='$4,$5, $6,$7'
|
||||
+ else
|
||||
+ # swap local and remote for "tickle_local"
|
||||
+ columns='$6,$7, $4,$5'
|
||||
+ fi
|
||||
+ $ss_or_netstat | awk -F '[:[:space:]]+' '
|
||||
+ ( $8 == "ESTABLISHED" || $1 == "ESTAB" ) && $4 == "'$OCF_RESKEY_ip'" \
|
||||
+ {printf "%s:%s\t%s:%s\n", '"$columns"'}'
|
||||
}
|
||||
|
||||
save_tcp_connections()
|
||||
{
|
||||
[ -z "$OCF_RESKEY_tickle_dir" ] && return
|
||||
statefile=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip
|
||||
+ # If we have _no_ sync script, we probably have a shared
|
||||
+ # (or replicated) directory, and need to fsync, or we might
|
||||
+ # end up with the just truncated file after failover, exactly
|
||||
+ # when we need it.
|
||||
+ #
|
||||
+ # If we _do_ have a sync script, it is not that important whether
|
||||
+ # the local state file is fsync'ed or not, the sync script is
|
||||
+ # responsible to "atomically" communicate the state to the peer(s).
|
||||
if [ -z "$OCF_RESKEY_sync_script" ]; then
|
||||
- netstat -tn |awk -F '[:[:space:]]+' '
|
||||
- $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \
|
||||
- {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' |
|
||||
- dd of="$statefile".new conv=fsync status=none &&
|
||||
- mv "$statefile".new "$statefile"
|
||||
+ get_established_tcp_connections |
|
||||
+ dd of="$statefile".new conv=fsync status=none &&
|
||||
+ mv "$statefile".new "$statefile"
|
||||
else
|
||||
- netstat -tn |awk -F '[:[:space:]]+' '
|
||||
- $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \
|
||||
- {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' \
|
||||
- > $statefile
|
||||
+ get_established_tcp_connections > $statefile
|
||||
$OCF_RESKEY_sync_script $statefile > /dev/null 2>&1 &
|
||||
fi
|
||||
}
|
||||
@@ -277,7 +328,6 @@
|
||||
tickle_remote()
|
||||
{
|
||||
[ -z "$OCF_RESKEY_tickle_dir" ] && return
|
||||
- echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle
|
||||
f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip
|
||||
[ -r $f ] || return
|
||||
$TICKLETCP -n 3 < $f
|
||||
@@ -289,11 +339,6 @@
|
||||
f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip
|
||||
[ -r $f ] || return
|
||||
|
||||
- checkcmd="netstat -tn"
|
||||
- if ! have_binary "netstat"; then
|
||||
- checkcmd="ss -Htn"
|
||||
- fi
|
||||
-
|
||||
# swap "local" and "remote" address,
|
||||
# so we tickle ourselves.
|
||||
# We set up a REJECT with tcp-reset before we do so, so we get rid of
|
||||
@@ -302,122 +347,152 @@
|
||||
# the way if we switch-over and then switch-back in quick succession.
|
||||
local i
|
||||
awk '{ print $2, $1; }' $f | $TICKLETCP
|
||||
- $checkcmd | grep -Fw $OCF_RESKEY_ip || return
|
||||
+ $ss_or_netstat | grep -Fw $OCF_RESKEY_ip || return
|
||||
for i in 0.1 0.5 1 2 4 ; do
|
||||
sleep $i
|
||||
- awk '{ print $2, $1; }' $f | $TICKLETCP
|
||||
- $checkcmd | grep -Fw $OCF_RESKEY_ip || break
|
||||
+ # now kill what is currently in the list,
|
||||
+ # not what was recorded during last monitor
|
||||
+ get_established_tcp_connections swap | $TICKLETCP
|
||||
+ $ss_or_netstat | grep -Fw $OCF_RESKEY_ip || break
|
||||
done
|
||||
}
|
||||
|
||||
SayActive()
|
||||
{
|
||||
- echo "$CMD DROP rule for INPUT chain [$*] is running (OK)"
|
||||
+ ocf_log debug "$CMD DROP rule [$*] is running (OK)"
|
||||
}
|
||||
|
||||
SayConsideredActive()
|
||||
{
|
||||
- echo "$CMD DROP rule for INPUT chain [$*] considered to be running (OK)"
|
||||
+ ocf_log debug "$CMD DROP rule [$*] considered to be running (OK)"
|
||||
}
|
||||
|
||||
SayInactive()
|
||||
{
|
||||
- echo "$CMD DROP rule for INPUT chain [$*] is inactive"
|
||||
+ ocf_log debug "$CMD DROP rule [$*] is inactive"
|
||||
}
|
||||
|
||||
-#IptablesStatus {udp|tcp} portno,portno ip {block|unblock}
|
||||
+#IptablesStatus {udp|tcp} portno,portno ip {in|out|both} {block|unblock}
|
||||
IptablesStatus() {
|
||||
- local rc
|
||||
- rc=$OCF_ERR_GENERIC
|
||||
- activewords="$CMD $1 $2 is running (OK)"
|
||||
- if chain_isactive "$1" "$2" "$3"; then
|
||||
- case $4 in
|
||||
- block)
|
||||
- SayActive $*
|
||||
- rc=$OCF_SUCCESS
|
||||
- ;;
|
||||
- *)
|
||||
- SayInactive $*
|
||||
- rc=$OCF_NOT_RUNNING
|
||||
- ;;
|
||||
- esac
|
||||
- else
|
||||
- case $4 in
|
||||
- block)
|
||||
- if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then
|
||||
- SayConsideredActive $*
|
||||
- rc=$OCF_SUCCESS
|
||||
- else
|
||||
- SayInactive $*
|
||||
- rc=$OCF_NOT_RUNNING
|
||||
- fi
|
||||
- ;;
|
||||
-
|
||||
- *)
|
||||
- if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then
|
||||
- SayActive $*
|
||||
- #This is only run on real monitor events.
|
||||
- save_tcp_connections
|
||||
- rc=$OCF_SUCCESS
|
||||
- else
|
||||
- SayInactive $*
|
||||
- rc=$OCF_NOT_RUNNING
|
||||
- fi
|
||||
- ;;
|
||||
- esac
|
||||
- fi
|
||||
-
|
||||
- return $rc
|
||||
+ local rc
|
||||
+ rc=$OCF_ERR_GENERIC
|
||||
+ is_active=0
|
||||
+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then
|
||||
+ chain_isactive "$1" "$2" "$3" INPUT
|
||||
+ is_active=$?
|
||||
+ fi
|
||||
+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then
|
||||
+ chain_isactive "$1" "$2" "$3" OUTPUT
|
||||
+ r=$?
|
||||
+ [ $r -gt $is_active ] && is_active=$r
|
||||
+ fi
|
||||
+ if [ $is_active -eq 0 ]; then
|
||||
+ case $5 in
|
||||
+ block)
|
||||
+ SayActive $*
|
||||
+ rc=$OCF_SUCCESS
|
||||
+ ;;
|
||||
+ *)
|
||||
+ SayInactive $*
|
||||
+ rc=$OCF_NOT_RUNNING
|
||||
+ ;;
|
||||
+ esac
|
||||
+ else
|
||||
+ case $5 in
|
||||
+ block)
|
||||
+ if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then
|
||||
+ SayConsideredActive $*
|
||||
+ rc=$OCF_SUCCESS
|
||||
+ else
|
||||
+ SayInactive $*
|
||||
+ rc=$OCF_NOT_RUNNING
|
||||
+ fi
|
||||
+ ;;
|
||||
+ *)
|
||||
+ if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then
|
||||
+ SayActive $*
|
||||
+ #This is only run on real monitor events.
|
||||
+ save_tcp_connections
|
||||
+ rc=$OCF_SUCCESS
|
||||
+ else
|
||||
+ SayInactive $*
|
||||
+ rc=$OCF_NOT_RUNNING
|
||||
+ fi
|
||||
+ ;;
|
||||
+ esac
|
||||
+ fi
|
||||
+ return $rc
|
||||
}
|
||||
|
||||
-#IptablesBLOCK {udp|tcp} portno,portno ip
|
||||
-IptablesBLOCK()
|
||||
+#DoIptables {-I|-D} {udp|tcp} portno,portno ip chain
|
||||
+DoIptables()
|
||||
{
|
||||
- local rc=0
|
||||
- local try_reset=false
|
||||
- if [ "$1/$4/$__OCF_ACTION" = tcp/unblock/stop ] &&
|
||||
- ocf_is_true $reset_local_on_unblock_stop
|
||||
- then
|
||||
- try_reset=true
|
||||
- fi
|
||||
- if
|
||||
- chain_isactive "$1" "$2" "$3"
|
||||
- then
|
||||
- : OK -- chain already active
|
||||
+ op=$1 proto=$2 ports=$3 ip=$4 chain=$5
|
||||
+ active=0; chain_isactive "$proto" "$ports" "$ip" "$chain" && active=1
|
||||
+ want_active=0; [ "$op" = "-I" ] && want_active=1
|
||||
+ ocf_log debug "active: $active want_active: $want_active"
|
||||
+ if [ $active -eq $want_active ] ; then
|
||||
+ : Chain already in desired state
|
||||
else
|
||||
- if $try_reset ; then
|
||||
- $IPTABLES $wait -I OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset
|
||||
- tickle_local
|
||||
+ [ "$chain" = "OUTPUT" ] && ds="s" || ds="d"
|
||||
+ $IPTABLES $wait "$op" "$chain" -p "$proto" -${ds} "$ip" -m multiport --${ds}ports "$ports" -j DROP
|
||||
+ fi
|
||||
+}
|
||||
+
|
||||
+#IptablesBLOCK {udp|tcp} portno,portno ip {in|out|both} {block|unblock}
|
||||
+IptablesBLOCK()
|
||||
+{
|
||||
+ local rc_in=0
|
||||
+ local rc_out=0
|
||||
+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then
|
||||
+ local try_reset=false
|
||||
+ if [ "$1/$5/$__OCF_ACTION" = tcp/unblock/stop ] &&
|
||||
+ ocf_is_true $reset_local_on_unblock_stop
|
||||
+ then
|
||||
+ try_reset=true
|
||||
fi
|
||||
- $IPTABLES $wait -I INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP
|
||||
- rc=$?
|
||||
- if $try_reset ; then
|
||||
- $IPTABLES $wait -D OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset
|
||||
+ if
|
||||
+ chain_isactive "$1" "$2" "$3" INPUT
|
||||
+ then
|
||||
+ : OK -- chain already active
|
||||
+ else
|
||||
+ if $try_reset ; then
|
||||
+ $IPTABLES $wait -I OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset
|
||||
+ tickle_local
|
||||
+ fi
|
||||
+ $IPTABLES $wait -I INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP
|
||||
+ rc_in=$?
|
||||
+ if $try_reset ; then
|
||||
+ $IPTABLES $wait -D OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset
|
||||
+ fi
|
||||
fi
|
||||
fi
|
||||
+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then
|
||||
+ DoIptables -I "$1" "$2" "$3" OUTPUT
|
||||
+ rc_out=$?
|
||||
+ fi
|
||||
|
||||
- return $rc
|
||||
+ [ $rc_in -gt $rc_out ] && return $rc_in || return $rc_out
|
||||
}
|
||||
|
||||
-#IptablesUNBLOCK {udp|tcp} portno,portno ip
|
||||
+#IptablesUNBLOCK {udp|tcp} portno,portno ip {in|out|both}
|
||||
IptablesUNBLOCK()
|
||||
{
|
||||
- if
|
||||
- chain_isactive "$1" "$2" "$3"
|
||||
- then
|
||||
- $IPTABLES $wait -D INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP
|
||||
- else
|
||||
- : Chain Not active
|
||||
+ if [ "$4" = "in" ] || [ "$4" = "both" ]; then
|
||||
+ DoIptables -D "$1" "$2" "$3" INPUT
|
||||
+ fi
|
||||
+ if [ "$4" = "out" ] || [ "$4" = "both" ]; then
|
||||
+ DoIptables -D "$1" "$2" "$3" OUTPUT
|
||||
fi
|
||||
|
||||
return $?
|
||||
}
|
||||
|
||||
-#IptablesStart {udp|tcp} portno,portno ip {block|unblock}
|
||||
+#IptablesStart {udp|tcp} portno,portno ip {in|out|both} {block|unblock}
|
||||
IptablesStart()
|
||||
{
|
||||
ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" start
|
||||
- case $4 in
|
||||
+ case $5 in
|
||||
block) IptablesBLOCK "$@";;
|
||||
unblock)
|
||||
IptablesUNBLOCK "$@"
|
||||
@@ -432,11 +507,11 @@
|
||||
return $?
|
||||
}
|
||||
|
||||
-#IptablesStop {udp|tcp} portno,portno ip {block|unblock}
|
||||
+#IptablesStop {udp|tcp} portno,portno ip {in|out|both} {block|unblock}
|
||||
IptablesStop()
|
||||
{
|
||||
ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" stop
|
||||
- case $4 in
|
||||
+ case $5 in
|
||||
block) IptablesUNBLOCK "$@";;
|
||||
unblock)
|
||||
save_tcp_connections
|
||||
@@ -454,7 +529,7 @@
|
||||
CheckPort() {
|
||||
# Examples of valid port: "1080", "1", "0080"
|
||||
# Examples of invalid port: "1080bad", "0", "0000", ""
|
||||
- echo $1 |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*'
|
||||
+ echo $1 | $EGREP -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*'
|
||||
}
|
||||
|
||||
IptablesValidateAll()
|
||||
@@ -543,7 +618,7 @@
|
||||
fi
|
||||
|
||||
# iptables v1.4.20+ is required to use -w (wait)
|
||||
-version=$(iptables -V | awk -F ' v' '{print $NF}')
|
||||
+version=$(iptables -V | grep -oE '[0-9]+[\.0-9]+')
|
||||
ocf_version_cmp "$version" "1.4.19.1"
|
||||
if [ "$?" -eq "2" ]; then
|
||||
wait="-w"
|
||||
@@ -553,21 +628,36 @@
|
||||
|
||||
protocol=$OCF_RESKEY_protocol
|
||||
portno=$OCF_RESKEY_portno
|
||||
+direction=$OCF_RESKEY_direction
|
||||
action=$OCF_RESKEY_action
|
||||
ip=$OCF_RESKEY_ip
|
||||
reset_local_on_unblock_stop=$OCF_RESKEY_reset_local_on_unblock_stop
|
||||
|
||||
+
|
||||
+# If "tickle" is enabled, we need to record the list of currently established
|
||||
+# connections during monitor. Use ss where available, and netstat otherwise.
|
||||
+if [ -n "$OCF_RESKEY_tickle_dir" ] ; then
|
||||
+ if have_binary ss ; then
|
||||
+ ss_or_netstat="ss -Htn"
|
||||
+ elif have_binary netstat ; then
|
||||
+ ss_or_netstat="netstat -tn"
|
||||
+ else
|
||||
+ ocf_log err "Neither ss nor netstat found, but needed to record estblished connections."
|
||||
+ exit $OCF_ERR_INSTALLED
|
||||
+ fi
|
||||
+fi
|
||||
+
|
||||
case $1 in
|
||||
start)
|
||||
- IptablesStart $protocol $portno $ip $action
|
||||
+ IptablesStart $protocol $portno $ip $direction $action
|
||||
;;
|
||||
|
||||
stop)
|
||||
- IptablesStop $protocol $portno $ip $action
|
||||
+ IptablesStop $protocol $portno $ip $direction $action
|
||||
;;
|
||||
|
||||
status|monitor)
|
||||
- IptablesStatus $protocol $portno $ip $action
|
||||
+ IptablesStatus $protocol $portno $ip $direction $action
|
||||
;;
|
||||
|
||||
validate-all)
|
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
@ -0,0 +1,15 @@
|
||||
--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200
|
||||
+++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200
|
||||
@@ -35,10 +35,10 @@
|
||||
USAGE="usage: $0 {start|stop|status|meta-data}";
|
||||
|
||||
if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then
|
||||
- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||
+ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||
fi
|
||||
|
||||
-if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||
+if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli-ra' ] || [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||
OUTPUT="text"
|
||||
EXECUTING='{ print $3 }'
|
||||
IFS_=" "
|
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
@ -0,0 +1,398 @@
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
print("A profile is needed! please use \'--filename\' and add the profile name.")
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
def getInstanceCount(self,keyValues):
|
||||
count = 1
|
||||
- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0:
|
||||
+ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0:
|
||||
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
|
||||
count = keyValues['--instancecount'][0]
|
||||
else:
|
||||
@@ -113,7 +113,7 @@
|
||||
|
||||
def isAllocatePublicIpAddress(self,keyValues):
|
||||
_publicIp = False
|
||||
- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0:
|
||||
+ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0:
|
||||
if keyValues['--allocatepublicip'][0] == "yes":
|
||||
_publicIp = True
|
||||
return _publicIp
|
||||
@@ -125,7 +125,7 @@
|
||||
'''
|
||||
data = json.loads(jsonbody)
|
||||
'''
|
||||
- if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
|
||||
+ if 'InstanceId' in data and len(data['InstanceId']) > 0:
|
||||
instanceId = data['InstanceId']
|
||||
except Exception as e:
|
||||
pass
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A filename is needed! please use \'--filename\' and add the file name."
|
||||
@@ -21,7 +21,7 @@
|
||||
def getInstanceCount(self,keyValues):
|
||||
count = 1
|
||||
import_count = "--count"
|
||||
- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0:
|
||||
+ if import_count in keyValues and len(keyValues[import_count]) > 0:
|
||||
if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0:
|
||||
count = keyValues[import_count][0]
|
||||
else:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100
|
||||
@@ -17,37 +17,37 @@
|
||||
|
||||
def getConfigHandlerOptions(self):
|
||||
return [ConfigCmd.name]
|
||||
-
|
||||
+
|
||||
def showConfig(self):
|
||||
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
|
||||
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
|
||||
config = dict()
|
||||
configContent = dict()
|
||||
- credentialsContent = dict ()
|
||||
- if os.path.exists(_configurePath):
|
||||
+ credentialsContent = dict ()
|
||||
+ if os.path.exists(_configurePath):
|
||||
for line in open(_configurePath):
|
||||
line = line.strip('\n')
|
||||
if line.find('=') > 0:
|
||||
list = line.split("=",1)
|
||||
- configContent[list[0]] = list[1]
|
||||
- else:
|
||||
- pass
|
||||
- config['configure'] = configContent
|
||||
- if os.path.exists(_credentialsPath):
|
||||
- for line in open(_credentialsPath):
|
||||
+ configContent[list[0]] = list[1]
|
||||
+ else:
|
||||
+ pass
|
||||
+ config['configure'] = configContent
|
||||
+ if os.path.exists(_credentialsPath):
|
||||
+ for line in open(_credentialsPath):
|
||||
line = line.strip('\n')
|
||||
if line.find('=') > 0:
|
||||
list = line.split("=",1)
|
||||
- credentialsContent[list[0]] = list[1]
|
||||
- else:
|
||||
- pass
|
||||
- config ['credentials'] = credentialsContent
|
||||
- response.display_response("showConfigure",config,'table')
|
||||
+ credentialsContent[list[0]] = list[1]
|
||||
+ else:
|
||||
+ pass
|
||||
+ config ['credentials'] = credentialsContent
|
||||
+ response.display_response("showConfigure",config,'table')
|
||||
def importConfig():
|
||||
pass
|
||||
def exportConfig():
|
||||
pass
|
||||
-
|
||||
+
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100
|
||||
@@ -20,7 +20,7 @@
|
||||
def handleProfileCmd(self, cmd, keyValues):
|
||||
if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right
|
||||
#check --name is valid
|
||||
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||
_value = keyValues[ProfileCmd.name][0] # use the first value
|
||||
self.extensionCliHandler.setUserProfile(_value)
|
||||
else:
|
||||
@@ -34,7 +34,7 @@
|
||||
newProfileName = ''
|
||||
if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right
|
||||
#check --name is valid
|
||||
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||
_value = keyValues[ProfileCmd.name][0] # check the first value
|
||||
# only input key and secret
|
||||
newProfileName = _value
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100
|
||||
@@ -137,9 +137,9 @@
|
||||
values.append(self.args[index])
|
||||
index = index + 1
|
||||
keyValues[currentValue] = values
|
||||
- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
|
||||
+ if keystr in keyValues and keyValues[keystr].__len__() > 0:
|
||||
_key = keyValues[keystr][0]
|
||||
- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
|
||||
+ if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
|
||||
_secret = keyValues[secretstr][0]
|
||||
#print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
|
||||
return _key, _secret
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100
|
||||
@@ -19,8 +19,9 @@
|
||||
'''
|
||||
|
||||
import sys
|
||||
-reload(sys)
|
||||
-sys.setdefaultencoding('utf-8')
|
||||
+if sys.version_info[0] < 3:
|
||||
+ reload(sys)
|
||||
+ sys.setdefaultencoding('utf-8')
|
||||
__author__ = 'xixi.xxx'
|
||||
import aliyunCliMain
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100
|
||||
@@ -18,7 +18,7 @@
|
||||
'''
|
||||
|
||||
import aliyunCliConfiugre
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import re
|
||||
import os
|
||||
import platform
|
||||
@@ -151,7 +151,7 @@
|
||||
# this functino will get the latest version
|
||||
def _getLatestTimeFromServer(self):
|
||||
try:
|
||||
- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||
+ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||
s = f.read()
|
||||
return s
|
||||
except Exception as e:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100
|
||||
@@ -26,7 +26,7 @@
|
||||
import aliyunSdkConfigure
|
||||
import json
|
||||
import cliError
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import handleEndPoint
|
||||
|
||||
from __init__ import __version__
|
||||
@@ -259,7 +259,7 @@
|
||||
def changeEndPoint(self, classname, keyValues):
|
||||
endpoint = "Endpoint"
|
||||
try:
|
||||
- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0:
|
||||
+ if endpoint in keyValues and keyValues[endpoint].__len__() > 0:
|
||||
classname._RestApi__domain = keyValues[endpoint][0]
|
||||
except Exception as e:
|
||||
pass
|
||||
@@ -444,10 +444,10 @@
|
||||
|
||||
def getTempVersion(self,keyValues):
|
||||
key='--version'
|
||||
- if keyValues is not None and keyValues.has_key(key):
|
||||
+ if keyValues is not None and key in keyValues:
|
||||
return keyValues.get(key)
|
||||
key = 'version'
|
||||
- if keyValues is not None and keyValues.has_key(key):
|
||||
+ if keyValues is not None and key in keyValues:
|
||||
return keyValues.get(key)
|
||||
|
||||
def getVersionFromFile(self,cmd):
|
||||
@@ -513,7 +513,7 @@
|
||||
self.checkForServer(response,cmd,operation)
|
||||
def getRequestId(self,response):
|
||||
try:
|
||||
- if response.has_key('RequestId') and len(response['RequestId']) > 0:
|
||||
+ if 'RequestId' in response and len(response['RequestId']) > 0:
|
||||
requestId = response['RequestId']
|
||||
return requestId
|
||||
except Exception:
|
||||
@@ -532,7 +532,7 @@
|
||||
ua = ""
|
||||
url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation
|
||||
try:
|
||||
- f = urllib2.urlopen(url,data=None,timeout=5)
|
||||
+ f = urllib3.urlopen(url,data=None,timeout=5)
|
||||
s = f.read()
|
||||
return s
|
||||
except Exception :
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100
|
||||
@@ -39,7 +39,7 @@
|
||||
|
||||
def sdkConfigure(self,cmd,operation):
|
||||
keyValues = self.parser._getKeyValues()
|
||||
- if keyValues.has_key('--version') and len(keyValues['--version']) > 0:
|
||||
+ if '--version' in keyValues and len(keyValues['--version']) > 0:
|
||||
version=keyValues['--version'][0]
|
||||
filename=self.fileName
|
||||
self.writeCmdVersionToFile(cmd,version,filename)
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100
|
||||
@@ -23,6 +23,8 @@
|
||||
import aliyunCliParser
|
||||
import platform
|
||||
|
||||
+if sys.version_info[0] > 2:
|
||||
+ raw_input = input
|
||||
|
||||
OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
|
||||
OSS_CONFIG_SECTION = 'OSSCredentials'
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100
|
||||
@@ -19,7 +19,7 @@
|
||||
#/usr/bin/env python
|
||||
#!-*- coding:utf-8 -*-
|
||||
import os
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import cliError
|
||||
|
||||
|
||||
@@ -64,9 +64,9 @@
|
||||
print(e)
|
||||
def _getParamFromUrl(prefix,value,mode):
|
||||
|
||||
- req = urllib2.Request(value)
|
||||
+ req = urllib3.Request(value)
|
||||
try:
|
||||
- response=urllib2.urlopen(req)
|
||||
+ response=urllib3.urlopen(req)
|
||||
if response.getcode() == 200:
|
||||
return response.read()
|
||||
else:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100
|
||||
@@ -340,8 +340,8 @@
|
||||
|
||||
|
||||
_urllib_error_moved_attributes = [
|
||||
- MovedAttribute("URLError", "urllib2", "urllib.error"),
|
||||
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
|
||||
+ MovedAttribute("URLError", "urllib3", "urllib.error"),
|
||||
+ MovedAttribute("HTTPError", "urllib3", "urllib.error"),
|
||||
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
|
||||
]
|
||||
for attr in _urllib_error_moved_attributes:
|
||||
@@ -359,34 +359,34 @@
|
||||
|
||||
|
||||
_urllib_request_moved_attributes = [
|
||||
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
|
||||
+ MovedAttribute("urlopen", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("install_opener", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("build_opener", "urllib3", "urllib.request"),
|
||||
MovedAttribute("pathname2url", "urllib", "urllib.request"),
|
||||
MovedAttribute("url2pathname", "urllib", "urllib.request"),
|
||||
MovedAttribute("getproxies", "urllib", "urllib.request"),
|
||||
- MovedAttribute("Request", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
|
||||
+ MovedAttribute("Request", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("BaseHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("FileHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("FTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"),
|
||||
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
|
||||
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
|
||||
MovedAttribute("URLopener", "urllib", "urllib.request"),
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py
|
||||
--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
install_requires = [
|
||||
'colorama>=0.2.5,<=0.3.3',
|
||||
- 'jmespath>=0.7.0,<=0.7.1',
|
||||
+ 'jmespath>=0.7.0',
|
||||
]
|
||||
def main():
|
||||
setup(
|
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
@ -0,0 +1,14 @@
|
||||
--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2021-10-14 11:30:17.726138166 +0200
|
||||
@@ -128,6 +128,11 @@
|
||||
fi
|
||||
}
|
||||
|
||||
+if [ -z "$CLOUDSDK_PYTHON" ]; then
|
||||
+ CLOUDSDK_PYTHON="/usr/libexec/platform-python"
|
||||
+ CLOUDSDK_PYTHON_SITEPACKAGES=1
|
||||
+fi
|
||||
+
|
||||
setup_cloudsdk_python
|
||||
|
||||
# $PYTHONHOME can interfere with gcloud. Users should use
|
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
@ -0,0 +1,52 @@
|
||||
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
|
||||
From: Georg Brandl <georg@python.org>
|
||||
Date: Thu, 10 Dec 2020 08:19:21 +0100
|
||||
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
|
||||
|
||||
Reason was a lookahead-only pattern which was included in the state
|
||||
where the lookahead was transitioning to.
|
||||
---
|
||||
pygments/lexers/ml.py | 12 ++++++------
|
||||
2 files changed, 14 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
|
||||
index 8ca8ce3eb..f2ac367c5 100644
|
||||
--- a/pygments/lexers/ml.py
|
||||
+++ b/pygments/lexers/ml.py
|
||||
@@ -142,7 +142,7 @@ def id_callback(self, match):
|
||||
(r'#\s+(%s)' % symbolicid_re, Name.Label),
|
||||
# Some reserved words trigger a special, local lexer state change
|
||||
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
|
||||
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
|
||||
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
|
||||
(r'\b(functor|include|open|signature|structure)\b(?!\')',
|
||||
Keyword.Reserved, 'sname'),
|
||||
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
|
||||
@@ -315,15 +315,14 @@ def id_callback(self, match):
|
||||
'ename': [
|
||||
include('whitespace'),
|
||||
|
||||
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
|
||||
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
|
||||
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
|
||||
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
|
||||
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||
(r'\b(of)\b(?!\')', Keyword.Reserved),
|
||||
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
|
||||
|
||||
- include('breakout'),
|
||||
- include('core'),
|
||||
- (r'\S+', Error),
|
||||
+ default('#pop'),
|
||||
],
|
||||
|
||||
'datcon': [
|
||||
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
|
||||
],
|
||||
}
|
||||
|
||||
+
|
||||
class OpaLexer(RegexLexer):
|
||||
"""
|
||||
Lexer for the Opa language (http://opalang.org).
|
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
@ -0,0 +1,138 @@
|
||||
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
|
||||
From: Georg Brandl <georg@python.org>
|
||||
Date: Mon, 11 Jan 2021 09:46:34 +0100
|
||||
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
|
||||
Caller/Doyensec
|
||||
|
||||
---
|
||||
pygments/lexers/archetype.py | 2 +-
|
||||
pygments/lexers/factor.py | 4 ++--
|
||||
pygments/lexers/jvm.py | 1 -
|
||||
pygments/lexers/matlab.py | 6 +++---
|
||||
pygments/lexers/objective.py | 4 ++--
|
||||
pygments/lexers/templates.py | 2 +-
|
||||
pygments/lexers/varnish.py | 2 +-
|
||||
8 files changed, 14 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
|
||||
index 65046613d..26f5ea8c9 100644
|
||||
--- a/pygments/lexers/archetype.py
|
||||
+++ b/pygments/lexers/archetype.py
|
||||
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
|
||||
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
|
||||
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
|
||||
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
|
||||
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
|
||||
+ (r'[+-]?\d*\.\d+%?', Number.Float),
|
||||
(r'0x[0-9a-fA-F]+', Number.Hex),
|
||||
(r'[+-]?\d+%?', Number.Integer),
|
||||
],
|
||||
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
|
||||
index be7b30dff..9200547f9 100644
|
||||
--- a/pygments/lexers/factor.py
|
||||
+++ b/pygments/lexers/factor.py
|
||||
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
|
||||
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
|
||||
|
||||
# strings
|
||||
- (r'"""\s+(?:.|\n)*?\s+"""', String),
|
||||
+ (r'"""\s(?:.|\n)*?\s"""', String),
|
||||
(r'"(?:\\\\|\\"|[^"])*"', String),
|
||||
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
|
||||
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
|
||||
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
|
||||
'slots': [
|
||||
(r'\s+', Text),
|
||||
(r';\s', Keyword, '#pop'),
|
||||
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
|
||||
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
|
||||
bygroups(Text, Name.Variable, Text)),
|
||||
(r'\S+', Name.Variable),
|
||||
],
|
||||
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
|
||||
index 62dfd45e5..9a9397c2d 100644
|
||||
--- a/pygments/lexers/jvm.py
|
||||
+++ b/pygments/lexers/jvm.py
|
||||
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
|
||||
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
|
||||
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
|
||||
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
|
||||
- (r'".*``.*``.*"', String.Interpol),
|
||||
(r'(\.)([a-z_]\w*)',
|
||||
bygroups(Operator, Name.Attribute)),
|
||||
(r'[a-zA-Z_]\w*:', Name.Label),
|
||||
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
|
||||
index 4823c6a7e..578848623 100644
|
||||
--- a/pygments/lexers/matlab.py
|
||||
+++ b/pygments/lexers/matlab.py
|
||||
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
|
||||
(r'.', Comment.Multiline),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
|
||||
(r"[^']*'", String, '#pop'),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
|
||||
(r'.', String, '#pop'),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
|
||||
index 34e4062f6..38ac9bb05 100644
|
||||
--- a/pygments/lexers/objective.py
|
||||
+++ b/pygments/lexers/objective.py
|
||||
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
|
||||
'logos_classname'),
|
||||
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
|
||||
bygroups(Keyword, Text, Name.Class)),
|
||||
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
|
||||
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
|
||||
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
|
||||
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
|
||||
'function'),
|
||||
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
|
||||
+ (r'(%new)(\s*)(\()(.*?)(\))',
|
||||
bygroups(Keyword, Text, Keyword, String, Keyword)),
|
||||
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
|
||||
inherit,
|
||||
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
|
||||
index 33c06c4c4..5c3346b4c 100644
|
||||
--- a/pygments/lexers/templates.py
|
||||
+++ b/pygments/lexers/templates.py
|
||||
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
|
||||
# see doc for handling first name arg: /directives/evoque/
|
||||
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
|
||||
# should be using(PythonLexer), not passed out as String
|
||||
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
|
||||
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
|
||||
r'(.*?)((?(4)%)\})',
|
||||
bygroups(Punctuation, Name.Builtin, Punctuation, None,
|
||||
String, using(PythonLexer), Punctuation)),
|
||||
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
|
||||
index 23653f7a1..9d358bd7c 100644
|
||||
--- a/pygments/lexers/varnish.py
|
||||
+++ b/pygments/lexers/varnish.py
|
||||
@@ -61,7 +61,7 @@ def analyse_text(text):
|
||||
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
|
||||
(r'(\.probe)(\s*=\s*)(\{)',
|
||||
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
|
||||
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
|
||||
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
|
||||
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
|
||||
(r'\{', Punctuation, '#push'),
|
||||
(r'\}', Punctuation, '#pop'),
|
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
@ -0,0 +1,24 @@
|
||||
From ed5bc606a4db5108995df9297698cf9dc14cccb2 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 18 Jan 2022 11:32:05 +0100
|
||||
Subject: [PATCH] mysql-common: fix local SSL connection by using
|
||||
--ssl-mode=REQUIRED which is available on 5.7+ (--ssl is not available in
|
||||
8.0)
|
||||
|
||||
---
|
||||
heartbeat/mysql-common.sh | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||
index 459948b10..de8763544 100755
|
||||
--- a/heartbeat/mysql-common.sh
|
||||
+++ b/heartbeat/mysql-common.sh
|
||||
@@ -97,7 +97,7 @@ MYSQL_BINDIR=`dirname ${OCF_RESKEY_binary}`
|
||||
|
||||
MYSQL=$OCF_RESKEY_client_binary
|
||||
if ocf_is_true "$OCF_RESKEY_replication_require_ssl"; then
|
||||
- MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl"
|
||||
+ MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl-mode=REQUIRED"
|
||||
else
|
||||
MYSQL_OPTIONS_LOCAL_SSL_OPTIONS=""
|
||||
fi
|
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
@ -0,0 +1,23 @@
|
||||
From 09cde6531a87fd6a04568eaae94d5c489f36a8b6 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 6 Sep 2021 15:07:41 +0200
|
||||
Subject: [PATCH] storage-mon: update metadata to suggest usage in combination
|
||||
with HealthSMART agent
|
||||
|
||||
---
|
||||
heartbeat/storage-mon.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
|
||||
index 5b289fe55..875095670 100644
|
||||
--- a/heartbeat/storage-mon.in
|
||||
+++ b/heartbeat/storage-mon.in
|
||||
@@ -75,7 +75,7 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
System health agent that checks the storage I/O status of the given drives and
|
||||
updates the #health-storage attribute. Usage is highly recommended in combination
|
||||
-with storage-mon monitoring agent. The agent currently support a maximum of 25
|
||||
+with the HealthSMART monitoring agent. The agent currently support a maximum of 25
|
||||
devices per instance.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">storage I/O health status</shortdesc>
|
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,64 @@
|
||||
From fcd2565602146c0b9317d159cecb8935e304c7ce Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 30 Sep 2021 10:23:17 +0200
|
||||
Subject: [PATCH] gcp-pd-move/gcp-vpc-move-route: dont fail failed resources
|
||||
instantly (caused by OCF_ERR_CONFIGURED)
|
||||
|
||||
---
|
||||
heartbeat/gcp-pd-move.in | 4 ++--
|
||||
heartbeat/gcp-vpc-move-route.in | 6 +++---
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||
index e99cc71f88..cbe703c3c5 100644
|
||||
--- a/heartbeat/gcp-pd-move.in
|
||||
+++ b/heartbeat/gcp-pd-move.in
|
||||
@@ -157,7 +157,7 @@ def populate_vars():
|
||||
CONN = googleapiclient.discovery.build('compute', 'v1')
|
||||
except Exception as e:
|
||||
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
|
||||
for param in PARAMETERS:
|
||||
value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param])
|
||||
@@ -172,7 +172,7 @@ def populate_vars():
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
'Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
|
||||
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
|
||||
PROJECT = get_metadata('project/project-id')
|
||||
if PARAMETERS['disk_scope'] in ['detect', 'regional']:
|
||||
diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||
index dac6e4ea8c..6b240c04d0 100644
|
||||
--- a/heartbeat/gcp-vpc-move-route.in
|
||||
+++ b/heartbeat/gcp-vpc-move-route.in
|
||||
@@ -243,7 +243,7 @@ def validate(ctx):
|
||||
ctx.conn = googleapiclient.discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False)
|
||||
except Exception as e:
|
||||
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
|
||||
ctx.ip = os.environ.get('OCF_RESKEY_ip')
|
||||
if not ctx.ip:
|
||||
@@ -258,7 +258,7 @@ def validate(ctx):
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
'Instance information not found. Is this a GCE instance ?: %s', str(e))
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
|
||||
ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
|
||||
GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
|
||||
@@ -273,7 +273,7 @@ def validate(ctx):
|
||||
idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
|
||||
if not idxs:
|
||||
logger.error('Network interface not found')
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
ctx.iface_idx = idxs[0]
|
||||
|
||||
|
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
@ -0,0 +1,366 @@
|
||||
From 764dacb6195f8940f13b9c322b1bc8189c5619fc Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Mon, 6 Sep 2021 12:13:42 +0200
|
||||
Subject: [PATCH 1/6] Fix NFSv4 lock failover: set NFS Server Scope
|
||||
|
||||
Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||
RFC8881, 8.4.2.1 State Reclaim:
|
||||
|
||||
| If the server scope is different, the client should not attempt to
|
||||
| reclaim locks. In this situation, no lock reclaim is possible.
|
||||
| Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
| problematic since there is no guarantee that the existing
|
||||
| filehandles will be recognized by the new server, or that if
|
||||
| recognized, they denote the same objects. It is best to treat the
|
||||
| locks as having been revoked by the reconfiguration event.
|
||||
|
||||
That's why for lock reclaim to even be attempted, we have to define and set
|
||||
the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||
cluster. And in linux, that is done by setting the uts nodename for the
|
||||
command that starts the nfsd kernel threads.
|
||||
|
||||
For "init scripts", just set it directly using unshare --uts.
|
||||
For systemd units, add NFS_SERVER_SCOPE to some environment files
|
||||
and inject the "unshare --uts" into the ExecStart command lines
|
||||
using override drop-in files.
|
||||
---
|
||||
heartbeat/nfsserver | 120 +++++++++++++++++++++++++++++++++++++++++++-
|
||||
1 file changed, 119 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 96b19abe36..0888378645 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -5,6 +5,18 @@
|
||||
# by hxinwei@gmail.com
|
||||
# License: GNU General Public License v2 (GPLv2) and later
|
||||
|
||||
+
|
||||
+# I don't know for certain whether all services actuall _need_ this,
|
||||
+# I know that at least nfs-server needs it.
|
||||
+# The rgmanager resource agent in rgmanager/src/resources/nfsserver.sh.in
|
||||
+# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||
+# Let's start with just the nfs-server, and add others if/when we have clear
|
||||
+# indication they need it.
|
||||
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||
+NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||
+SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||
+SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
||||
+
|
||||
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||
. $OCF_DEBUG_LIBRARY
|
||||
else
|
||||
@@ -99,6 +111,31 @@ Specifies the length of sm-notify retry time (minutes).
|
||||
<content type="integer" default="" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="nfs_server_scope" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+RFC8881, 8.4.2.1 State Reclaim:
|
||||
+
|
||||
+If the server scope is different, the client should not attempt to
|
||||
+reclaim locks. In this situation, no lock reclaim is possible.
|
||||
+Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
+problematic since there is no guarantee that the existing
|
||||
+filehandles will be recognized by the new server, or that if
|
||||
+recognized, they denote the same objects. It is best to treat the
|
||||
+locks as having been revoked by the reconfiguration event.
|
||||
+
|
||||
+For lock reclaim to even be attempted, we have to define and set the same
|
||||
+server scope for NFSD on all cluster nodes in the NFS failover cluster.
|
||||
+
|
||||
+This agent won't "guess" a suitable server scope name for you, you need to
|
||||
+explicitly specify this. But without it, NFSv4 lock reclaim after failover
|
||||
+won't work properly. Suggested value: the failover "service IP".
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">
|
||||
+RFC8881 NFS server scope for (lock) state reclaim after failover.
|
||||
+</shortdesc>
|
||||
+<content type="string"/>
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="nfs_ip" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
Comma separated list of floating IP addresses used to access the nfs service
|
||||
@@ -269,7 +306,11 @@ nfs_exec()
|
||||
set_exec_mode
|
||||
|
||||
case $EXEC_MODE in
|
||||
- 1) ${OCF_RESKEY_nfs_init_script} $cmd;;
|
||||
+ 1) if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
+ ${OCF_RESKEY_nfs_init_script} $cmd
|
||||
+ else
|
||||
+ unshare -u /bin/sh -c "hostname ${OCF_RESKEY_nfs_server_scope}; exec ${OCF_RESKEY_nfs_init_script} $cmd"
|
||||
+ fi ;;
|
||||
2) if ! echo $svc | grep -q "\."; then
|
||||
svc="${svc}.service"
|
||||
fi
|
||||
@@ -623,6 +664,74 @@ notify_locks()
|
||||
fi
|
||||
}
|
||||
|
||||
+# Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||
+# RFC8881, 8.4.2.1 State Reclaim:
|
||||
+#
|
||||
+# | If the server scope is different, the client should not attempt to
|
||||
+# | reclaim locks. In this situation, no lock reclaim is possible.
|
||||
+# | Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
+# | problematic since there is no guarantee that the existing
|
||||
+# | filehandles will be recognized by the new server, or that if
|
||||
+# | recognized, they denote the same objects. It is best to treat the
|
||||
+# | locks as having been revoked by the reconfiguration event.
|
||||
+#
|
||||
+# That's why for lock reclaim to even be attempted, we have to define and set
|
||||
+# the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||
+# cluster. And in linux, that is done by setting the uts nodename for the
|
||||
+# command that starts the nfsd kernel threads.
|
||||
+#
|
||||
+inject_unshare_uts_name_into_systemd_units ()
|
||||
+{
|
||||
+ local END_TAG="# END OF DROP-IN FOR NFS SERVER SCOPE"
|
||||
+ local services
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
+
|
||||
+ local svc dir dropin edited_exec_start do_reload=false
|
||||
+ for svc in $services ; do
|
||||
+ dir=/run/systemd/system/$svc.d
|
||||
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
+ grep -sqF "$END_TAG" "$dropin" && continue
|
||||
+
|
||||
+ test -d "$dir" || mkdir -p "$dir"
|
||||
+ test -e "$dropin" && rm -f "$dropin"
|
||||
+
|
||||
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||
+ cat > "$dropin" <<___
|
||||
+[Service]
|
||||
+EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||
+# reset list of exec start, then re-populate with unshared uts namespace
|
||||
+ExecStart=
|
||||
+$edited_exec_start
|
||||
+$END_TAG
|
||||
+___
|
||||
+ do_reload=true
|
||||
+ ocf_log debug "injected unshare --uts into $dropin"
|
||||
+ done
|
||||
+
|
||||
+ mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||
+ echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||
+
|
||||
+ $do_reload && systemctl daemon-reload
|
||||
+}
|
||||
+
|
||||
+remove_unshare_uts_dropins ()
|
||||
+{
|
||||
+ local services
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||
+
|
||||
+ local svc dir dropin do_reload=false
|
||||
+ for svc in $services ; do
|
||||
+ dir=/run/systemd/system/$svc.d
|
||||
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
+ test -e "$dropin" || continue
|
||||
+ rm -f "$dropin"
|
||||
+ do_reload=true
|
||||
+ ocf_log debug "removed unshare --uts from $svc"
|
||||
+ done
|
||||
+ rm -f "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE}"
|
||||
+ $do_reload && systemctl daemon-reload
|
||||
+}
|
||||
+
|
||||
nfsserver_start ()
|
||||
{
|
||||
local rc;
|
||||
@@ -636,6 +745,13 @@ nfsserver_start ()
|
||||
is_redhat_based && set_env_args
|
||||
bind_tree
|
||||
prepare_directory
|
||||
+ case $EXEC_MODE in [23])
|
||||
+ if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
+ remove_unshare_uts_dropins
|
||||
+ else
|
||||
+ inject_unshare_uts_name_into_systemd_units
|
||||
+ fi ;;
|
||||
+ esac
|
||||
|
||||
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||
@@ -854,6 +970,8 @@ nfsserver_stop ()
|
||||
ocf_log info "NFS server stopped"
|
||||
fi
|
||||
|
||||
+ case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||
+
|
||||
return $rc
|
||||
}
|
||||
|
||||
|
||||
From 515697b53c1614d05d39491c9af83e8d8b844b17 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 12:01:41 +0200
|
||||
Subject: [PATCH 2/6] Fix NFSv4 lock failover: set NFS Server Scope, regardless
|
||||
of EXEC_MODE
|
||||
|
||||
Debian (and other systems) may provide "init scripts",
|
||||
which will only redirect back to systemd.
|
||||
|
||||
If we just unshare --uts the init script invocation,
|
||||
the uts namespace is useless in that case.
|
||||
|
||||
If systemd is running, mangle the nfs-server.service unit,
|
||||
independent of the "EXEC_MODE".
|
||||
---
|
||||
heartbeat/nfsserver | 18 ++++++++++++++----
|
||||
1 file changed, 14 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 0888378645..054aabbaf6 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -745,13 +745,20 @@ nfsserver_start ()
|
||||
is_redhat_based && set_env_args
|
||||
bind_tree
|
||||
prepare_directory
|
||||
- case $EXEC_MODE in [23])
|
||||
+
|
||||
+ # Debian (and other systems) may provide "init scripts",
|
||||
+ # which will only redirect back to systemd.
|
||||
+ # If we just unshare --uts the init script invocation,
|
||||
+ # the uts namespace is useless in that case.
|
||||
+ # If systemd is running, mangle the nfs-server.service unit,
|
||||
+ # independent of the "EXEC_MODE" we detected.
|
||||
+ if $systemd_is_running ; then
|
||||
if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
remove_unshare_uts_dropins
|
||||
else
|
||||
inject_unshare_uts_name_into_systemd_units
|
||||
- fi ;;
|
||||
- esac
|
||||
+ fi
|
||||
+ fi
|
||||
|
||||
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||
@@ -970,7 +977,9 @@ nfsserver_stop ()
|
||||
ocf_log info "NFS server stopped"
|
||||
fi
|
||||
|
||||
- case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||
+ if $systemd_is_running; then
|
||||
+ remove_unshare_uts_dropins
|
||||
+ fi
|
||||
|
||||
return $rc
|
||||
}
|
||||
@@ -1008,6 +1017,7 @@ nfsserver_validate ()
|
||||
}
|
||||
|
||||
nfsserver_validate
|
||||
+systemd_is_running && systemd_is_running=true || systemd_is_running=false
|
||||
|
||||
case $__OCF_ACTION in
|
||||
start) nfsserver_start
|
||||
|
||||
From e83c20d88f404f9f9d829c654883d60eb6cc9ff3 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:06:18 +0200
|
||||
Subject: [PATCH 3/6] Fix NFSv4 lock failover: add missing "|cut -f1" in
|
||||
remove_unshare_uts_dropins
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 054aabbaf6..d3db89a537 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -717,7 +717,7 @@ ___
|
||||
remove_unshare_uts_dropins ()
|
||||
{
|
||||
local services
|
||||
- services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
|
||||
local svc dir dropin do_reload=false
|
||||
for svc in $services ; do
|
||||
|
||||
From b5b0e4a0b60d285af576b2d8ecfbe95e5a177a87 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:07:13 +0200
|
||||
Subject: [PATCH 4/6] Fix NFSv4 lock failover: get rid of "world-inaccessible"
|
||||
warning
|
||||
|
||||
by temporarily changing the umask before generating the dropins
|
||||
---
|
||||
heartbeat/nfsserver | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index d3db89a537..447e0302b2 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -687,6 +687,8 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||
services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
|
||||
local svc dir dropin edited_exec_start do_reload=false
|
||||
+ local old_umask=$(umask)
|
||||
+ umask 0022
|
||||
for svc in $services ; do
|
||||
dir=/run/systemd/system/$svc.d
|
||||
dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
@@ -710,6 +712,7 @@ ___
|
||||
|
||||
mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||
echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||
+ umask $old_umask
|
||||
|
||||
$do_reload && systemctl daemon-reload
|
||||
}
|
||||
|
||||
From 3c6c91ce5a00eeef9cd766389d73a0b42580a1e6 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:08:09 +0200
|
||||
Subject: [PATCH 5/6] Fix NFSv4 lock failover: deal with "special executable
|
||||
prefix" chars in ExecStart
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 447e0302b2..5326bd2c6e 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -697,7 +697,7 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||
test -d "$dir" || mkdir -p "$dir"
|
||||
test -e "$dropin" && rm -f "$dropin"
|
||||
|
||||
- edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\([-+:!@]*\\)\\(.*\\)#ExecStart=\\1/usr/bin/unshare --uts /bin/sh -c 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\2#p")
|
||||
cat > "$dropin" <<___
|
||||
[Service]
|
||||
EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||
|
||||
From 512fbaf61e6d24a1236ef50e323ea17a62485c36 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:08:59 +0200
|
||||
Subject: [PATCH 6/6] Fix NFSv4 lock failover: add rpc-statd-notify to the
|
||||
comment list of potentially interesting services
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 5326bd2c6e..240dd1a76c 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -12,7 +12,7 @@
|
||||
# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||
# Let's start with just the nfs-server, and add others if/when we have clear
|
||||
# indication they need it.
|
||||
-#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpc-statd-notify.service rpcbind.service"
|
||||
NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||
SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||
SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
@ -0,0 +1,11 @@
|
||||
--- a/heartbeat/gcp-ilb 2021-11-09 14:13:20.311243373 +0100
|
||||
+++ b/heartbeat/gcp-ilb 2021-11-09 14:13:50.269329165 +0100
|
||||
@@ -28,7 +28,7 @@
|
||||
OCF_RESKEY_cat_default="socat"
|
||||
OCF_RESKEY_port_default="60000"
|
||||
OCF_RESKEY_log_enable_default="false"
|
||||
-OCF_RESKEY_log_cmd_default="gcloud"
|
||||
+OCF_RESKEY_log_cmd_default="gcloud-ra"
|
||||
OCF_RESKEY_log_params_default="logging write GCPILB"
|
||||
OCF_RESKEY_log_end_params_default=""
|
||||
|
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
@ -0,0 +1,22 @@
|
||||
From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 26 Aug 2021 12:27:50 +0200
|
||||
Subject: [PATCH] nfsnotify: fix default value for "notify_args"
|
||||
|
||||
---
|
||||
heartbeat/nfsnotify.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in
|
||||
index 851f6ad6b4..fe6d2793ba 100644
|
||||
--- a/heartbeat/nfsnotify.in
|
||||
+++ b/heartbeat/nfsnotify.in
|
||||
@@ -33,7 +33,7 @@
|
||||
# Parameter defaults
|
||||
|
||||
OCF_RESKEY_source_host_default=""
|
||||
-OCF_RESKEY_notify_args_default="false"
|
||||
+OCF_RESKEY_notify_args_default=""
|
||||
|
||||
: ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}}
|
||||
: ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}}
|
@ -1,22 +0,0 @@
|
||||
From c6338011cf9ea69324f44c8c31a4ca2478aab35a Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 7 Dec 2021 08:59:50 +0100
|
||||
Subject: [PATCH] podman: remove anonymous volumes
|
||||
|
||||
---
|
||||
heartbeat/podman | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/podman b/heartbeat/podman
|
||||
index fd901c968..2b73857f1 100755
|
||||
--- a/heartbeat/podman
|
||||
+++ b/heartbeat/podman
|
||||
@@ -251,7 +251,7 @@ remove_container()
|
||||
return 0
|
||||
fi
|
||||
ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
|
||||
- ocf_run podman rm $CONTAINER
|
||||
+ ocf_run podman rm -v $CONTAINER
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
# due to a podman bug (rhbz#1841485), sometimes a stopped
|
@ -1,543 +0,0 @@
|
||||
From 3e469239e8c853725b28a9c6b509152aacc2c5cc Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 13 Jun 2022 11:24:05 +0200
|
||||
Subject: [PATCH 1/2] all agents: update to promotable terms
|
||||
|
||||
---
|
||||
heartbeat/SAPInstance | 22 +++++++++++-----------
|
||||
heartbeat/conntrackd.in | 6 +++---
|
||||
heartbeat/db2 | 12 ++++++------
|
||||
heartbeat/dnsupdate.in | 2 +-
|
||||
heartbeat/galera.in | 26 +++++++++++++-------------
|
||||
heartbeat/iface-bridge | 6 +++---
|
||||
heartbeat/mariadb.in | 30 +++++++++++++++---------------
|
||||
heartbeat/mpathpersist.in | 24 ++++++++++++------------
|
||||
heartbeat/mysql | 4 ++--
|
||||
heartbeat/mysql-proxy | 2 +-
|
||||
heartbeat/pgsql | 2 +-
|
||||
heartbeat/redis.in | 4 ++--
|
||||
heartbeat/sg_persist.in | 4 ++--
|
||||
14 files changed, 74 insertions(+), 74 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance
|
||||
index 016f59aff..e3fe788ae 100755
|
||||
--- a/heartbeat/SAPInstance
|
||||
+++ b/heartbeat/SAPInstance
|
||||
@@ -25,8 +25,8 @@
|
||||
# OCF_RESKEY_AUTOMATIC_RECOVER (optional, automatic startup recovery using cleanipc, default is false)
|
||||
# OCF_RESKEY_MONITOR_SERVICES (optional, default is to monitor critical services only)
|
||||
# OCF_RESKEY_SHUTDOWN_METHOD (optional, defaults to NORMAL, KILL: terminate the SAP instance with OS commands - faster, at your own risk)
|
||||
-# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Master/Slave configuration)
|
||||
-# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Master/Slave configuration)
|
||||
+# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Promotable configuration)
|
||||
+# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Promotable configuration)
|
||||
# OCF_RESKEY_PRE_START_USEREXIT (optional, lists a script which can be executed before the resource is started)
|
||||
# OCF_RESKEY_POST_START_USEREXIT (optional, lists a script which can be executed after the resource is started)
|
||||
# OCF_RESKEY_PRE_STOP_USEREXIT (optional, lists a script which can be executed before the resource is stopped)
|
||||
@@ -92,11 +92,11 @@ sapinstance_usage() {
|
||||
|
||||
$0 manages a SAP Instance as an HA resource.
|
||||
|
||||
- The 'start' operation starts the instance or the ERS instance in a Master/Slave configuration
|
||||
+ The 'start' operation starts the instance or the ERS instance in a Promotable configuration
|
||||
The 'stop' operation stops the instance
|
||||
The 'status' operation reports whether the instance is running
|
||||
The 'monitor' operation reports whether the instance seems to be working
|
||||
- The 'promote' operation starts the primary instance in a Master/Slave configuration
|
||||
+ The 'promote' operation starts the primary instance in a Promotable configuration
|
||||
The 'demote' operation stops the primary instance and starts the ERS instance
|
||||
The 'reload' operation allows changed parameters (non-unique only) without restarting the service
|
||||
The 'notify' operation always returns SUCCESS
|
||||
@@ -201,11 +201,11 @@ You may specify multiple services separated by a | (pipe) sign in this parameter
|
||||
<content type="string" default="${OCF_RESKEY_SHUTDOWN_METHOD_default}"/>
|
||||
</parameter>
|
||||
<parameter name="ERS_InstanceName" unique="1" required="0">
|
||||
- <longdesc lang="en">Only used in a Master/Slave resource configuration:
|
||||
+ <longdesc lang="en">Only used in a Promotable resource configuration:
|
||||
The full qualified SAP enqueue replication instance name. e.g. P01_ERS02_sapp01ers. Usually this is the name of the SAP instance profile.
|
||||
-The enqueue replication instance must be installed, before you want to configure a master-slave cluster resource.
|
||||
+The enqueue replication instance must be installed, before you want to configure a promotable cluster resource.
|
||||
|
||||
-The master-slave configuration in the cluster must use this properties:
|
||||
+The promotable configuration in the cluster must use this properties:
|
||||
clone_max = 2
|
||||
clone_node_max = 1
|
||||
master_node_max = 1
|
||||
@@ -215,7 +215,7 @@ master_max = 1
|
||||
<content type="string" default="${OCF_RESKEY_ERS_InstanceName_default}"/>
|
||||
</parameter>
|
||||
<parameter name="ERS_START_PROFILE" unique="1" required="0">
|
||||
- <longdesc lang="en">Only used in a Master/Slave resource configuration:
|
||||
+ <longdesc lang="en">Only used in a Promotable resource configuration:
|
||||
The parameter ERS_InstanceName must also be set in this configuration.
|
||||
The name of the SAP START profile. Specify this parameter, if you have changed the name of the SAP START profile after the default SAP installation. As SAP release 7.10 does not have a START profile anymore, you need to specify the Instance Profile than.
|
||||
</longdesc>
|
||||
@@ -243,7 +243,7 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
|
||||
<content type="string" default="${OCF_RESKEY_POST_STOP_USEREXIT_default}" />
|
||||
</parameter>
|
||||
<parameter name="IS_ERS" unique="0" required="0">
|
||||
- <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to
|
||||
+ <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a promotable resource to
|
||||
allow the ASCS to 'find' the ERS running on another cluster node after a resource failure. This parameter should be set
|
||||
to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also
|
||||
systems for NetWeaver less than 7.40, if you like to implement the NW-HA-CLU-740 scenario.
|
||||
@@ -266,8 +266,8 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
|
||||
<action name="stop" timeout="240s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="60s" interval="120s" />
|
||||
-<action name="monitor" depth="0" timeout="60s" interval="121s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="60s" interval="119s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="60s" interval="121s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="60s" interval="119s" role="Promoted" />
|
||||
<action name="promote" timeout="320s" />
|
||||
<action name="demote" timeout="320s" />
|
||||
<action name="reload" timeout="320s" />
|
||||
diff --git a/heartbeat/conntrackd.in b/heartbeat/conntrackd.in
|
||||
index f115250d6..1c2ee955b 100644
|
||||
--- a/heartbeat/conntrackd.in
|
||||
+++ b/heartbeat/conntrackd.in
|
||||
@@ -50,7 +50,7 @@ meta_data() {
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
-Master/Slave OCF Resource Agent for conntrackd
|
||||
+Promotable OCF Resource Agent for conntrackd
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">This resource agent manages conntrackd</shortdesc>
|
||||
@@ -81,8 +81,8 @@ For example "/packages/conntrackd-0.9.14/etc/conntrackd/conntrackd.conf"</longde
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" timeout="20s" interval="20s" role="Slave" />
|
||||
-<action name="monitor" timeout="20s" interval="10s" role="Master" />
|
||||
+<action name="monitor" timeout="20s" interval="20s" role="Unpromoted" />
|
||||
+<action name="monitor" timeout="20s" interval="10s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||
index 4a4b2f477..620b89583 100755
|
||||
--- a/heartbeat/db2
|
||||
+++ b/heartbeat/db2
|
||||
@@ -3,7 +3,7 @@
|
||||
# db2
|
||||
#
|
||||
# Resource agent that manages a DB2 LUW database in Standard role
|
||||
-# or HADR configuration in master/slave configuration.
|
||||
+# or HADR configuration in promotable configuration.
|
||||
# Multi partition is supported as well.
|
||||
#
|
||||
# Copyright (c) 2011 Holger Teutsch <holger.teutsch@web.de>
|
||||
@@ -61,7 +61,7 @@ cat <<END
|
||||
<resource-agent name="db2" version="1.0">
|
||||
<version>1.0</version>
|
||||
<longdesc lang="en">
|
||||
-Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in master/slave configuration. Multiple partitions are supported.
|
||||
+Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in promotable configuration. Multiple partitions are supported.
|
||||
|
||||
Standard mode:
|
||||
|
||||
@@ -71,8 +71,8 @@ Configure each partition as a separate primitive resource.
|
||||
HADR mode:
|
||||
|
||||
A single database in HADR configuration is made highly available by automating takeover operations.
|
||||
-Configure a master / slave resource with notifications enabled and an
|
||||
-additional monitoring operation with role "Master".
|
||||
+Configure a promotable resource with notifications enabled and an
|
||||
+additional monitoring operation with role "Promoted".
|
||||
|
||||
In case of HADR be very deliberate in specifying intervals/timeouts. The detection of a failure including promote must complete within HADR_PEER_WINDOW.
|
||||
|
||||
@@ -84,7 +84,7 @@ In addition to honoring requirements for crash recovery etc. for your specific d
|
||||
|
||||
For further information and examples consult http://www.linux-ha.org/wiki/db2_(resource_agent)
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as master/slave configuration. Multiple partitions are supported.</shortdesc>
|
||||
+<shortdesc lang="en">Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as promotable configuration. Multiple partitions are supported.</shortdesc>
|
||||
|
||||
<parameters>
|
||||
<parameter name="instance" unique="1" required="1">
|
||||
@@ -125,7 +125,7 @@ The number of the partition (DBPARTITIONNUM) to be managed.
|
||||
<action name="demote" timeout="120s"/>
|
||||
<action name="notify" timeout="10s"/>
|
||||
<action name="monitor" depth="0" timeout="60s" interval="20s"/>
|
||||
-<action name="monitor" depth="0" timeout="60s" role="Master" interval="22s"/>
|
||||
+<action name="monitor" depth="0" timeout="60s" role="Promoted" interval="22s"/>
|
||||
<action name="validate-all" timeout="5s"/>
|
||||
<action name="meta-data" timeout="5s"/>
|
||||
</actions>
|
||||
diff --git a/heartbeat/dnsupdate.in b/heartbeat/dnsupdate.in
|
||||
index 35b7c99bb..b54822cd8 100755
|
||||
--- a/heartbeat/dnsupdate.in
|
||||
+++ b/heartbeat/dnsupdate.in
|
||||
@@ -119,7 +119,7 @@ the exact syntax.
|
||||
<parameter name="server" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
Which DNS server to send these updates for. When no
|
||||
-server is provided, this defaults to the master server
|
||||
+server is provided, this defaults to the promoted server
|
||||
for the correct zone.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">DNS server to contact</shortdesc>
|
||||
diff --git a/heartbeat/galera.in b/heartbeat/galera.in
|
||||
index c363eb254..546b1a853 100755
|
||||
--- a/heartbeat/galera.in
|
||||
+++ b/heartbeat/galera.in
|
||||
@@ -26,31 +26,31 @@
|
||||
##
|
||||
# README.
|
||||
#
|
||||
-# This agent only supports being configured as a multistate Master
|
||||
+# This agent only supports being configured as a multistate Promoted
|
||||
# resource.
|
||||
#
|
||||
-# Slave vs Master role:
|
||||
+# Unpromoted vs Promoted role:
|
||||
#
|
||||
-# During the 'Slave' role, galera instances are in read-only mode and
|
||||
+# During the 'Unpromoted' role, galera instances are in read-only mode and
|
||||
# will not attempt to connect to the cluster. This role exists only as
|
||||
# a means to determine which galera instance is the most up-to-date. The
|
||||
# most up-to-date node will be used to bootstrap a galera cluster that
|
||||
# has no current members.
|
||||
#
|
||||
-# The galera instances will only begin to be promoted to the Master role
|
||||
+# The galera instances will only begin to be promoted to the Promoted role
|
||||
# once all the nodes in the 'wsrep_cluster_address' connection address
|
||||
# have entered read-only mode. At that point the node containing the
|
||||
-# database that is most current will be promoted to Master. Once the first
|
||||
-# Master instance bootstraps the galera cluster, the other nodes will be
|
||||
-# promoted to Master as well.
|
||||
+# database that is most current will be promoted to Promoted. Once the first
|
||||
+# Promoted instance bootstraps the galera cluster, the other nodes will be
|
||||
+# promoted to Promoted as well.
|
||||
#
|
||||
# Example: Create a galera cluster using nodes rhel7-node1 rhel7-node2 rhel7-node3
|
||||
#
|
||||
# pcs resource create db galera enable_creation=true \
|
||||
-# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta master-max=3 --master
|
||||
+# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta promoted-max=3 --promoted
|
||||
#
|
||||
# By setting the 'enable_creation' option, the database will be automatically
|
||||
-# generated at startup. The meta attribute 'master-max=3' means that all 3
|
||||
+# generated at startup. The meta attribute 'promoted-max=3' means that all 3
|
||||
# nodes listed in the wsrep_cluster_address list will be allowed to connect
|
||||
# to the galera cluster and perform replication.
|
||||
#
|
||||
@@ -114,8 +114,8 @@ The 'start' operation starts the database.
|
||||
The 'stop' operation stops the database.
|
||||
The 'status' operation reports whether the database is running
|
||||
The 'monitor' operation reports whether the database seems to be working
|
||||
-The 'promote' operation makes this mysql server run as master
|
||||
-The 'demote' operation makes this mysql server run as slave
|
||||
+The 'promote' operation makes this mysql server run as promoted
|
||||
+The 'demote' operation makes this mysql server run as unpromoted
|
||||
The 'validate-all' operation reports whether the parameters are valid
|
||||
|
||||
UEND
|
||||
@@ -298,8 +298,8 @@ Use it with caution! (and fencing)
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="300s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="validate-all" timeout="5s" />
|
||||
diff --git a/heartbeat/iface-bridge b/heartbeat/iface-bridge
|
||||
index 75d5371dd..a4e50adb9 100755
|
||||
--- a/heartbeat/iface-bridge
|
||||
+++ b/heartbeat/iface-bridge
|
||||
@@ -211,7 +211,7 @@ bridge_meta_data() {
|
||||
<longdesc lang="en">
|
||||
Set the port cost. This is a dimensionless metric.
|
||||
A list of port/cost can be specified using the following
|
||||
- format: slave cost slave cost.
|
||||
+ format: unpromoted cost unpromoted cost.
|
||||
Example: eth0 100 eth1 1000
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
@@ -228,7 +228,7 @@ bridge_meta_data() {
|
||||
This metric is used in the designated port and root port
|
||||
selection algorithms.
|
||||
A list of port/priority can be specified using the following
|
||||
- format: slave cost slave cost.
|
||||
+ format: unpromoted cost unpromoted cost.
|
||||
Example: eth0 10 eth1 60
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
@@ -262,7 +262,7 @@ bridge_meta_data() {
|
||||
Enable or disable a port from the multicast router.
|
||||
Kernel enables all port by default.
|
||||
A list of port can be specified using the following
|
||||
- format: slave 0|1 slave 0|1.
|
||||
+ format: unpromoted 0|1 unpromoted 0|1.
|
||||
Example: eth0 1 eth1 0
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in
|
||||
index 39ad191bb..5a39ccb66 100644
|
||||
--- a/heartbeat/mariadb.in
|
||||
+++ b/heartbeat/mariadb.in
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# MariaDB
|
||||
#
|
||||
-# Description: Manages a MariaDB Master/Slave database as Linux-HA resource
|
||||
+# Description: Manages a MariaDB Promotable database as Linux-HA resource
|
||||
#
|
||||
# Authors: Alan Robertson: DB2 Script
|
||||
# Jakub Janczak: rewrite as MySQL
|
||||
@@ -61,8 +61,8 @@ The 'start' operation starts the database.
|
||||
The 'stop' operation stops the database.
|
||||
The 'status' operation reports whether the database is running
|
||||
The 'monitor' operation reports whether the database seems to be working
|
||||
-The 'promote' operation makes this mysql server run as master
|
||||
-The 'demote' operation makes this mysql server run as slave
|
||||
+The 'promote' operation makes this mysql server run as promoted
|
||||
+The 'demote' operation makes this mysql server run as unpromoted
|
||||
The 'validate-all' operation reports whether the parameters are valid
|
||||
|
||||
UEND
|
||||
@@ -78,20 +78,20 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
Resource script for MariaDB.
|
||||
|
||||
-Manages a complete master/slave replication setup with GTID, for simpler
|
||||
+Manages a complete promotable replication setup with GTID, for simpler
|
||||
uses look at the mysql resource agent which supports older replication
|
||||
forms which mysql and mariadb have in common.
|
||||
|
||||
The resource must be setup to use notifications. Set 'notify=true' in the metadata
|
||||
-attributes when defining a MariaDB master/slave instance.
|
||||
+attributes when defining a MariaDB promotable instance.
|
||||
|
||||
-The default behavior is to use uname -n values in the change master to command.
|
||||
+The default behavior is to use uname -n values in the change promoted to command.
|
||||
Other IPs can be specified manually by adding a node attribute
|
||||
\${INSTANCE_ATTR_NAME}_mysql_master_IP giving the IP to use for replication.
|
||||
For example, if the mariadb primitive you are using is p_mariadb, the
|
||||
attribute to set will be p_mariadb_mysql_master_IP.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">Manages a MariaDB master/slave instance</shortdesc>
|
||||
+<shortdesc lang="en">Manages a MariaDB promotable instance</shortdesc>
|
||||
<parameters>
|
||||
|
||||
<parameter name="binary" unique="0" required="0">
|
||||
@@ -154,7 +154,7 @@ The logfile to be used for mysqld.
|
||||
<longdesc lang="en">
|
||||
All node names of nodes that will execute mariadb.
|
||||
Please separate each node name with a space.
|
||||
-This is required for the master selection to function.
|
||||
+This is required for the promoted selection to function.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">node list</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_node_list_default}" />
|
||||
@@ -220,11 +220,11 @@ Additional parameters which are passed to the mysqld on startup.
|
||||
<parameter name="replication_user" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
MariaDB replication user. This user is used for starting and stopping
|
||||
-MariaDB replication, for setting and resetting the master host, and for
|
||||
+MariaDB replication, for setting and resetting the promoted host, and for
|
||||
setting and unsetting read-only mode. Because of that, this user must
|
||||
have SUPER, REPLICATION SLAVE, REPLICATION CLIENT, PROCESS and RELOAD
|
||||
privileges on all nodes within the cluster. Mandatory if you define a
|
||||
-master-slave resource.
|
||||
+promotable resource.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication user</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_user_default}" />
|
||||
@@ -232,8 +232,8 @@ master-slave resource.
|
||||
|
||||
<parameter name="replication_passwd" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-MariaDB replication password. Used for replication client and slave.
|
||||
-Mandatory if you define a master-slave resource.
|
||||
+MariaDB replication password. Used for replication client and unpromoted.
|
||||
+Mandatory if you define a promotable resource.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication user password</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_passwd_default}" />
|
||||
@@ -241,7 +241,7 @@ Mandatory if you define a master-slave resource.
|
||||
|
||||
<parameter name="replication_port" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-The port on which the Master MariaDB instance is listening.
|
||||
+The port on which the Promoted MariaDB instance is listening.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication port</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_port_default}" />
|
||||
@@ -254,8 +254,8 @@ The port on which the Master MariaDB instance is listening.
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in
|
||||
index fcf1b3a4b..e47fef4bd 100644
|
||||
--- a/heartbeat/mpathpersist.in
|
||||
+++ b/heartbeat/mpathpersist.in
|
||||
@@ -80,9 +80,9 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
This resource agent manages SCSI persistent reservations on multipath devices.
|
||||
"mpathpersist" from multipath-tools is used, please see its documentation.
|
||||
-Should be used as multistate (Master/Slave) resource
|
||||
-Slave registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list.
|
||||
-Master reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter.
|
||||
+Should be used as multistate (Promotable) resource
|
||||
+Unpromoted registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list.
|
||||
+Promoted reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter.
|
||||
Please see man sg_persist(8) and mpathpersist(8) for reservation_type details.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Manages SCSI persistent reservations on multipath devices</shortdesc>
|
||||
@@ -132,7 +132,7 @@ reservation type
|
||||
master_score_base value
|
||||
"master_score_base" value is used in "master_score" calculation:
|
||||
master_score = master_score_base + master_score_dev_factor * working_devs
|
||||
-if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for master role.
|
||||
+if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for promoted role.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">base master_score value</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_master_score_base_default}" />
|
||||
@@ -140,9 +140,9 @@ if set to bigger value in mpathpersist resource configuration on some node, this
|
||||
|
||||
<parameter name="master_score_dev_factor" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-Working device factor in master_score calculation
|
||||
+Working device factor in promoted calculation
|
||||
each "working" device provides additional value to "master_score",
|
||||
-so the node that sees more devices will be preferred for the "Master"-role
|
||||
+so the node that sees more devices will be preferred for the "Promoted"-role
|
||||
Setting it to 0 will disable this behavior.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">working device factor in master_score calculation</shortdesc>
|
||||
@@ -151,10 +151,10 @@ Setting it to 0 will disable this behavior.
|
||||
|
||||
<parameter name="master_score_delay" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-master/slave decreases/increases its master_score after delay of "master_score_delay" seconds
|
||||
-so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched
|
||||
-and after this device reappears again the master increases its master_score first
|
||||
-this can work only if the master_score_delay is bigger then monitor interval on both master and slave
|
||||
+promoted/unpromoted decreases/increases its master_score after delay of "master_score_delay" seconds
|
||||
+so if some device gets inaccessible, the unpromoted decreases its promoted first and the resource will no be watched
|
||||
+and after this device reappears again the promoted increases its master_score first
|
||||
+this can work only if the master_score_delay is bigger then monitor interval on both promoted and unpromoted
|
||||
Setting it to 0 will disable this behavior.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">master_score decrease/increase delay time</shortdesc>
|
||||
@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior.
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="29s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="60s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="29s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="60s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
diff --git a/heartbeat/mysql b/heartbeat/mysql
|
||||
index 720de8c1a..aec44fe5e 100755
|
||||
--- a/heartbeat/mysql
|
||||
+++ b/heartbeat/mysql
|
||||
@@ -321,8 +321,8 @@ whether a node is usable for clients to read from.</shortdesc>
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/mysql-proxy b/heartbeat/mysql-proxy
|
||||
index e34396d9a..fdf2fa230 100755
|
||||
--- a/heartbeat/mysql-proxy
|
||||
+++ b/heartbeat/mysql-proxy
|
||||
@@ -162,7 +162,7 @@ Address:port of the remote back-end servers (default: 127.0.0.1:3306).
|
||||
|
||||
<parameter name="proxy_read_only_backend_addresses" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-Address:port of the remote (read only) slave-server (default: ).
|
||||
+Address:port of the remote (read only) unpromoted-server (default: ).
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MySql Proxy read only back-end servers</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_proxy_read_only_backend_addresses_default}" />
|
||||
diff --git a/heartbeat/pgsql b/heartbeat/pgsql
|
||||
index e3a39038f..94aceb324 100755
|
||||
--- a/heartbeat/pgsql
|
||||
+++ b/heartbeat/pgsql
|
||||
@@ -458,7 +458,7 @@ wal receiver is not running in the master and the attribute shows status as
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="30s"/>
|
||||
-<action name="monitor" depth="0" timeout="30s" interval="29s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="30s" interval="29s" role="Promoted" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
|
||||
index 7f886c7ea..6429477e1 100755
|
||||
--- a/heartbeat/redis.in
|
||||
+++ b/heartbeat/redis.in
|
||||
@@ -220,8 +220,8 @@ is in use.
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="60s" interval="45s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="60s" interval="20s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="60s" interval="60s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="60s" interval="20s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="60s" interval="60s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in
|
||||
index 678762f40..0497cc469 100644
|
||||
--- a/heartbeat/sg_persist.in
|
||||
+++ b/heartbeat/sg_persist.in
|
||||
@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior.
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="29s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="60s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="29s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="60s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
|
||||
From 14e5cb71e3749d311745f110f90cc1139f9cedaf Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 16 Jun 2022 15:54:39 +0200
|
||||
Subject: [PATCH 2/2] metadata: update to promoted roles
|
||||
|
||||
---
|
||||
heartbeat/metadata.rng | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/metadata.rng b/heartbeat/metadata.rng
|
||||
index 3dd735547..909efc284 100644
|
||||
--- a/heartbeat/metadata.rng
|
||||
+++ b/heartbeat/metadata.rng
|
||||
@@ -85,8 +85,8 @@
|
||||
|
||||
<define name="role-values">
|
||||
<choice>
|
||||
- <value>Master</value>
|
||||
- <value>Slave</value>
|
||||
+ <value>Promoted</value>
|
||||
+ <value>Unpromoted</value>
|
||||
</choice>
|
||||
</define>
|
||||
|
@ -1,312 +0,0 @@
|
||||
From fd1d6426a2d05f521207c305d10b49fedd92c2df Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 28 Feb 2022 09:27:42 +0100
|
||||
Subject: [PATCH 1/4] IPaddr2: Allow to disable Duplicate Address Detection for
|
||||
IPv6
|
||||
|
||||
"Starting" an IPv6 address with IPaddr2 involves performing Duplicate
|
||||
Address Detection which typically takes at least 1000 ms. Allow the user
|
||||
to disable DAD if they can guarantee that the configured address is not
|
||||
duplicate and they wish to start the resource faster.
|
||||
---
|
||||
heartbeat/IPaddr2 | 15 +++++++++++++++
|
||||
1 file changed, 15 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 735dd7779..650392b70 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -88,6 +88,7 @@ OCF_RESKEY_arp_sender_default=""
|
||||
OCF_RESKEY_send_arp_opts_default=""
|
||||
OCF_RESKEY_flush_routes_default="false"
|
||||
OCF_RESKEY_run_arping_default=false
|
||||
+OCF_RESKEY_nodad_default=false
|
||||
OCF_RESKEY_noprefixroute_default="false"
|
||||
OCF_RESKEY_preferred_lft_default="forever"
|
||||
OCF_RESKEY_network_namespace_default=""
|
||||
@@ -110,6 +111,7 @@ OCF_RESKEY_network_namespace_default=""
|
||||
: ${OCF_RESKEY_send_arp_opts=${OCF_RESKEY_send_arp_opts_default}}
|
||||
: ${OCF_RESKEY_flush_routes=${OCF_RESKEY_flush_routes_default}}
|
||||
: ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}}
|
||||
+: ${OCF_RESKEY_nodad=${OCF_RESKEY_nodad_default}}
|
||||
: ${OCF_RESKEY_noprefixroute=${OCF_RESKEY_noprefixroute_default}}
|
||||
: ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}}
|
||||
: ${OCF_RESKEY_network_namespace=${OCF_RESKEY_network_namespace_default}}
|
||||
@@ -391,6 +393,14 @@ Whether or not to run arping for IPv4 collision detection check.
|
||||
<content type="string" default="${OCF_RESKEY_run_arping_default}"/>
|
||||
</parameter>
|
||||
|
||||
+<parameter name="nodad">
|
||||
+<longdesc lang="en">
|
||||
+For IPv6, do not perform Duplicate Address Detection when adding the address.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Use nodad flag</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_nodad_default}"/>
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="noprefixroute">
|
||||
<longdesc lang="en">
|
||||
Use noprefixroute flag (see 'man ip-address').
|
||||
@@ -662,6 +672,11 @@ add_interface () {
|
||||
msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface"
|
||||
fi
|
||||
|
||||
+ if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
||||
+ cmd="$cmd nodad"
|
||||
+ msg="${msg} (with nodad)"
|
||||
+ fi
|
||||
+
|
||||
if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then
|
||||
cmd="$cmd noprefixroute"
|
||||
msg="${msg} (with noprefixroute)"
|
||||
|
||||
From f4a9e3281d48c5d37f5df593d014706c46ddb3a7 Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 7 Mar 2022 17:21:59 +0100
|
||||
Subject: [PATCH 2/4] IPaddr2: Allow to send IPv6 Neighbor Advertisements in
|
||||
background
|
||||
|
||||
"Starting" an IPv6 address with IPaddr2 involves sending Neighbor
|
||||
Advertisement packets to inform neighboring machines about the new
|
||||
IP+MAC translation. By default, 5x packets with 200 ms sleep after each
|
||||
are sent which delays the start by 1000 ms. Allow the user to run this
|
||||
operation in background, similarly as is possible with GARP for IPv4.
|
||||
---
|
||||
heartbeat/IPaddr2 | 33 +++++++++++++++++++++++++++++----
|
||||
1 file changed, 29 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 650392b70..e243a642d 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -83,7 +83,7 @@ OCF_RESKEY_unique_clone_address_default=false
|
||||
OCF_RESKEY_arp_interval_default=200
|
||||
OCF_RESKEY_arp_count_default=5
|
||||
OCF_RESKEY_arp_count_refresh_default=0
|
||||
-OCF_RESKEY_arp_bg_default=true
|
||||
+OCF_RESKEY_arp_bg_default=""
|
||||
OCF_RESKEY_arp_sender_default=""
|
||||
OCF_RESKEY_send_arp_opts_default=""
|
||||
OCF_RESKEY_flush_routes_default="false"
|
||||
@@ -336,9 +336,10 @@ situations.
|
||||
|
||||
<parameter name="arp_bg">
|
||||
<longdesc lang="en">
|
||||
-Whether or not to send the ARP packets in the background.
|
||||
+Whether or not to send the ARP (IPv4) or NA (IPv6) packets in the background.
|
||||
+The default is true for IPv4 and false for IPv6.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP from background</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA from background</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
|
||||
</parameter>
|
||||
|
||||
@@ -507,6 +508,9 @@ ip_init() {
|
||||
ocf_exit_reason "IPv4 does not support lvs_ipv6_addrlabel"
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
fi
|
||||
+ if [ -z "$OCF_RESKEY_arp_bg" ]; then
|
||||
+ OCF_RESKEY_arp_bg=true
|
||||
+ fi
|
||||
else
|
||||
FAMILY=inet6
|
||||
# address sanitization defined in RFC5952
|
||||
@@ -527,6 +531,9 @@ ip_init() {
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
fi
|
||||
fi
|
||||
+ if [ -z "$OCF_RESKEY_arp_bg" ]; then
|
||||
+ OCF_RESKEY_arp_bg=false
|
||||
+ fi
|
||||
fi
|
||||
|
||||
# support nic:iflabel format in nic parameter
|
||||
@@ -893,6 +900,20 @@ run_arp_sender() {
|
||||
fi
|
||||
}
|
||||
|
||||
+log_send_ua() {
|
||||
+ local cmdline
|
||||
+ local output
|
||||
+ local rc
|
||||
+
|
||||
+ cmdline="$@"
|
||||
+ output=$($cmdline 2>&1)
|
||||
+ rc=$?
|
||||
+ if [ $rc -ne 0 ] ; then
|
||||
+ ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements: rc=$rc"
|
||||
+ fi
|
||||
+ ocf_log info "$output"
|
||||
+ return $rc
|
||||
+}
|
||||
|
||||
#
|
||||
# Run send_ua to note send ICMPv6 Unsolicited Neighbor Advertisements.
|
||||
@@ -930,7 +951,11 @@ run_send_ua() {
|
||||
|
||||
ARGS="-i $OCF_RESKEY_arp_interval -c $OCF_RESKEY_arp_count $OCF_RESKEY_ip $NETMASK $NIC"
|
||||
ocf_log info "$SENDUA $ARGS"
|
||||
- $SENDUA $ARGS || ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements."
|
||||
+ if ocf_is_true $OCF_RESKEY_arp_bg; then
|
||||
+ log_send_ua $SENDUA $ARGS &
|
||||
+ else
|
||||
+ log_send_ua $SENDUA $ARGS
|
||||
+ fi
|
||||
}
|
||||
|
||||
# Do we already serve this IP address on the given $NIC?
|
||||
|
||||
From c8afb43012c264f3ee24013a92b2a2f3566db2fd Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Tue, 8 Mar 2022 12:35:56 +0100
|
||||
Subject: [PATCH 3/4] IPaddr2: Log 'ip addr add' options together
|
||||
|
||||
Change the log message in add_interface() from
|
||||
"Adding ... (with <opt1>) (with <opt2>)"
|
||||
to
|
||||
"Adding ... (with <opt1> <opt2>)".
|
||||
---
|
||||
heartbeat/IPaddr2 | 19 ++++++++++---------
|
||||
1 file changed, 10 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index e243a642d..dca1b6f5b 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -651,7 +651,7 @@ delete_interface () {
|
||||
# Add an interface
|
||||
#
|
||||
add_interface () {
|
||||
- local cmd msg ipaddr netmask broadcast iface label
|
||||
+ local cmd msg extra_opts ipaddr netmask broadcast iface label
|
||||
|
||||
ipaddr="$1"
|
||||
netmask="$2"
|
||||
@@ -679,23 +679,24 @@ add_interface () {
|
||||
msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface"
|
||||
fi
|
||||
|
||||
+ extra_opts=""
|
||||
if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
||||
- cmd="$cmd nodad"
|
||||
- msg="${msg} (with nodad)"
|
||||
+ extra_opts="$extra_opts nodad"
|
||||
fi
|
||||
|
||||
if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then
|
||||
- cmd="$cmd noprefixroute"
|
||||
- msg="${msg} (with noprefixroute)"
|
||||
+ extra_opts="$extra_opts noprefixroute"
|
||||
fi
|
||||
|
||||
if [ ! -z "$label" ]; then
|
||||
- cmd="$cmd label $label"
|
||||
- msg="${msg} (with label $label)"
|
||||
+ extra_opts="$extra_opts label $label"
|
||||
fi
|
||||
if [ "$FAMILY" = "inet6" ] ;then
|
||||
- cmd="$cmd preferred_lft $OCF_RESKEY_preferred_lft"
|
||||
- msg="${msg} (with preferred_lft $OCF_RESKEY_preferred_lft)"
|
||||
+ extra_opts="$extra_opts preferred_lft $OCF_RESKEY_preferred_lft"
|
||||
+ fi
|
||||
+ if [ -n "$extra_opts" ]; then
|
||||
+ cmd="$cmd$extra_opts"
|
||||
+ msg="$msg (with$extra_opts)"
|
||||
fi
|
||||
|
||||
ocf_log info "$msg"
|
||||
|
||||
From cb4d52ead694718282a40eab24e04b6d85bcc802 Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 7 Mar 2022 17:25:02 +0100
|
||||
Subject: [PATCH 4/4] IPaddr2: Clarify behavior of 'arp_*' parameters for IPv4
|
||||
and IPv6
|
||||
|
||||
* Mention that 'arp_*' parameters are shared by the IPv4 and IPv6 code.
|
||||
* Clarify description of these parameters and mark which of them apply
|
||||
only to IPv4.
|
||||
---
|
||||
heartbeat/IPaddr2 | 26 +++++++++++++++++---------
|
||||
1 file changed, 17 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index dca1b6f5b..97a7431a2 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -157,6 +157,12 @@ and/or clone-max < number of nodes. In case of node failure,
|
||||
clone instances need to be re-allocated on surviving nodes.
|
||||
This would not be possible if there is already an instance
|
||||
on those nodes, and clone-node-max=1 (which is the default).
|
||||
+
|
||||
+When the specified IP address gets assigned to a respective interface, the
|
||||
+resource agent sends unsolicited ARP (Address Resolution Protocol, IPv4) or NA
|
||||
+(Neighbor Advertisement, IPv6) packets to inform neighboring machines about the
|
||||
+change. This functionality is controlled for both IPv4 and IPv6 by shared
|
||||
+'arp_*' parameters.
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">Manages virtual IPv4 and IPv6 addresses (Linux specific version)</shortdesc>
|
||||
@@ -306,28 +312,30 @@ a unique address to manage
|
||||
|
||||
<parameter name="arp_interval">
|
||||
<longdesc lang="en">
|
||||
-Specify the interval between unsolicited ARP packets in milliseconds.
|
||||
+Specify the interval between unsolicited ARP (IPv4) or NA (IPv6) packets in
|
||||
+milliseconds.
|
||||
|
||||
This parameter is deprecated and used for the backward compatibility only.
|
||||
It is effective only for the send_arp binary which is built with libnet,
|
||||
and send_ua for IPv6. It has no effect for other arp_sender.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP packet interval in ms (deprecated)</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA packet interval in ms (deprecated)</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
|
||||
</parameter>
|
||||
|
||||
<parameter name="arp_count">
|
||||
<longdesc lang="en">
|
||||
-Number of unsolicited ARP packets to send at resource initialization.
|
||||
+Number of unsolicited ARP (IPv4) or NA (IPv6) packets to send at resource
|
||||
+initialization.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP packet count sent during initialization</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA packet count sent during initialization</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_arp_count_default}"/>
|
||||
</parameter>
|
||||
|
||||
<parameter name="arp_count_refresh">
|
||||
<longdesc lang="en">
|
||||
-Number of unsolicited ARP packets to send during resource monitoring. Doing
|
||||
-so helps mitigate issues of stuck ARP caches resulting from split-brain
|
||||
+For IPv4, number of unsolicited ARP packets to send during resource monitoring.
|
||||
+Doing so helps mitigate issues of stuck ARP caches resulting from split-brain
|
||||
situations.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">ARP packet count sent during monitoring</shortdesc>
|
||||
@@ -345,7 +353,7 @@ The default is true for IPv4 and false for IPv6.
|
||||
|
||||
<parameter name="arp_sender">
|
||||
<longdesc lang="en">
|
||||
-The program to send ARP packets with on start. Available options are:
|
||||
+For IPv4, the program to send ARP packets with on start. Available options are:
|
||||
- send_arp: default
|
||||
- ipoibarping: default for infiniband interfaces if ipoibarping is available
|
||||
- iputils_arping: use arping in iputils package
|
||||
@@ -357,7 +365,7 @@ The program to send ARP packets with on start. Available options are:
|
||||
|
||||
<parameter name="send_arp_opts">
|
||||
<longdesc lang="en">
|
||||
-Extra options to pass to the arp_sender program.
|
||||
+For IPv4, extra options to pass to the arp_sender program.
|
||||
Available options are vary depending on which arp_sender is used.
|
||||
|
||||
A typical use case is specifying '-A' for iputils_arping to use
|
||||
@@ -388,7 +396,7 @@ IP address goes away.
|
||||
|
||||
<parameter name="run_arping">
|
||||
<longdesc lang="en">
|
||||
-Whether or not to run arping for IPv4 collision detection check.
|
||||
+For IPv4, whether or not to run arping for collision detection check.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Run arping for IPv4 collision detection check</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_run_arping_default}"/>
|
@ -1,401 +0,0 @@
|
||||
From d59a000da2766476538bb82d1889f5c0f3882f9f Mon Sep 17 00:00:00 2001
|
||||
From: Jan Friesse <jfriesse@redhat.com>
|
||||
Date: Wed, 2 Mar 2022 18:43:31 +0100
|
||||
Subject: [PATCH] corosync-qnetd: Add resource agent
|
||||
|
||||
Mostly for better monitor operation using corosync-qnetd-tool. As qnetd
|
||||
is (almost) stateless only directory which has to be copied (once)
|
||||
across the nodes is nss db directory (usually
|
||||
/etc/corosync/qnetd/nssdb).
|
||||
|
||||
Signed-off-by: Jan Friesse <jfriesse@redhat.com>
|
||||
---
|
||||
doc/man/Makefile.am | 1 +
|
||||
heartbeat/Makefile.am | 1 +
|
||||
heartbeat/corosync-qnetd | 353 +++++++++++++++++++++++++++++++++++++++
|
||||
3 files changed, 355 insertions(+)
|
||||
create mode 100755 heartbeat/corosync-qnetd
|
||||
|
||||
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||
index 1093717fe..013aa392d 100644
|
||||
--- a/doc/man/Makefile.am
|
||||
+++ b/doc/man/Makefile.am
|
||||
@@ -127,6 +127,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
|
||||
ocf_heartbeat_azure-lb.7 \
|
||||
ocf_heartbeat_clvm.7 \
|
||||
ocf_heartbeat_conntrackd.7 \
|
||||
+ ocf_heartbeat_corosync-qnetd.7 \
|
||||
ocf_heartbeat_crypt.7 \
|
||||
ocf_heartbeat_db2.7 \
|
||||
ocf_heartbeat_dhcpd.7 \
|
||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
index 67b400679..38154e2da 100644
|
||||
--- a/heartbeat/Makefile.am
|
||||
+++ b/heartbeat/Makefile.am
|
||||
@@ -101,6 +101,7 @@ ocf_SCRIPTS = AoEtarget \
|
||||
azure-lb \
|
||||
clvm \
|
||||
conntrackd \
|
||||
+ corosync-qnetd \
|
||||
crypt \
|
||||
db2 \
|
||||
dhcpd \
|
||||
diff --git a/heartbeat/corosync-qnetd b/heartbeat/corosync-qnetd
|
||||
new file mode 100755
|
||||
index 000000000..6b9777711
|
||||
--- /dev/null
|
||||
+++ b/heartbeat/corosync-qnetd
|
||||
@@ -0,0 +1,353 @@
|
||||
+#!/bin/sh
|
||||
+#
|
||||
+# Copyright (C) 2022 Red Hat, Inc. All rights reserved.
|
||||
+#
|
||||
+# Authors: Jan Friesse <jfriesse@redhat.com>
|
||||
+#
|
||||
+# This program is free software; you can redistribute it and/or modify
|
||||
+# it under the terms of version 2 of the GNU General Public License as
|
||||
+# published by the Free Software Foundation.
|
||||
+#
|
||||
+# This program is distributed in the hope that it would be useful, but
|
||||
+# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
+#
|
||||
+# Further, this software is distributed without any warranty that it is
|
||||
+# free of the rightful claim of any third person regarding infringement
|
||||
+# or the like. Any license provided herein, whether implied or
|
||||
+# otherwise, applies only to this software file. Patent licenses, if
|
||||
+# any, provided herein do not apply to combinations of this program with
|
||||
+# other software, or any other product whatsoever.
|
||||
+#
|
||||
+# You should have received a copy of the GNU General Public License
|
||||
+# along with this program; if not, write the Free Software Foundation,
|
||||
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
+#
|
||||
+
|
||||
+# Initialization:
|
||||
+: "${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}"
|
||||
+. "${OCF_FUNCTIONS_DIR}/ocf-shellfuncs"
|
||||
+
|
||||
+# Use runuser if available for SELinux.
|
||||
+if [ -x "/sbin/runuser" ]; then
|
||||
+ SU="runuser"
|
||||
+else
|
||||
+ SU="su"
|
||||
+fi
|
||||
+
|
||||
+# Attempt to detect a default binary
|
||||
+OCF_RESKEY_binary_default=$(which corosync-qnetd 2> /dev/null)
|
||||
+if [ "${OCF_RESKEY_binary_default}" = "" ]; then
|
||||
+ OCF_RESKEY_binary_default="/usr/bin/corosync-qnetd"
|
||||
+fi
|
||||
+
|
||||
+# Defaults
|
||||
+OCF_RESKEY_qnetd_opts_default=""
|
||||
+OCF_RESKEY_qnetd_tool_binary_default="/usr/bin/corosync-qnetd-tool"
|
||||
+OCF_RESKEY_ip_default=""
|
||||
+OCF_RESKEY_port_default=""
|
||||
+OCF_RESKEY_nss_db_dir_default=""
|
||||
+OCF_RESKEY_pid_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.pid"
|
||||
+OCF_RESKEY_ipc_sock_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.sock"
|
||||
+OCF_RESKEY_user_default="coroqnetd"
|
||||
+OCF_RESKEY_group_default="${OCF_RESKEY_user_default}"
|
||||
+
|
||||
+: "${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}"
|
||||
+: "${OCF_RESKEY_qnetd_opts=${OCF_RESKEY_qnetd_opts_default}}"
|
||||
+: "${OCF_RESKEY_qnetd_tool_binary=${OCF_RESKEY_qnetd_tool_binary_default}}"
|
||||
+: "${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}}"
|
||||
+: "${OCF_RESKEY_port=${OCF_RESKEY_port_default}}"
|
||||
+: "${OCF_RESKEY_nss_db_dir=${OCF_RESKEY_nss_db_dir_default}}"
|
||||
+: "${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}"
|
||||
+: "${OCF_RESKEY_ipc_sock=${OCF_RESKEY_ipc_sock_default}}"
|
||||
+: "${OCF_RESKEY_user=${OCF_RESKEY_user_default}}"
|
||||
+: "${OCF_RESKEY_group=${OCF_RESKEY_group_default}}"
|
||||
+
|
||||
+corosync_qnetd_usage() {
|
||||
+ cat <<END
|
||||
+usage: $0 {start|stop|status|monitor|validate-all|meta-data}
|
||||
+
|
||||
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||
+END
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_meta_data() {
|
||||
+ cat <<END
|
||||
+<?xml version="1.0"?>
|
||||
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
+<resource-agent name="corosync-qnetd" version="1.0">
|
||||
+<version>1.0</version>
|
||||
+
|
||||
+<longdesc lang="en">OCF Resource script for corosync-qnetd. It manages a corosync-qnetd
|
||||
+instance as a HA resource. It is required to copy nss db directory (usually /etc/corosync/qnetd/nssdb)
|
||||
+across all nodes (only once - after database is initialized).</longdesc>
|
||||
+<shortdesc lang="en">Corosync QNet daemon resource agent</shortdesc>
|
||||
+
|
||||
+<parameters>
|
||||
+
|
||||
+<parameter name="binary">
|
||||
+ <longdesc lang="en">Location of the corosync-qnetd binary</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd binary</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_binary_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="qnetd_opts">
|
||||
+ <longdesc lang="en">
|
||||
+ Additional options for corosync-qnetd binary. "-4" for example.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd extra options</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_qnetd_opts_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="qnetd_tool_binary">
|
||||
+ <longdesc lang="en">
|
||||
+ The absolute path to the corosync-qnetd-tool for monitoring with OCF_CHECK_LEVEL greater zero.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">The absolute path to the corosync-qnetd-tool binary</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_qnetd_tool_binary_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="ip">
|
||||
+ <longdesc lang="en">
|
||||
+ IP address to listen on. By default the daemon listens on all addresses (wildcard).
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">IP address to listen on</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_ip_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="port">
|
||||
+ <longdesc lang="en">
|
||||
+ TCP port to listen on. Default port is 5403.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">TCP port to listen on</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_port_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="nss_db_dir">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd nss db directory (empty for default - usually /etc/corosync/qnetd/nssdb)
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd nss db directory</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_nss_db_dir_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="pid">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd pid/lock
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd pid file</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_pid_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="ipc_sock">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd ipc socket
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd ipc socket file</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_ipc_sock_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="user">
|
||||
+ <longdesc lang="en">User running corosync-qnetd</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd user</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_user_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="group">
|
||||
+ <longdesc lang="en">Group running corosync-qnetd</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd group</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_group_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+</parameters>
|
||||
+
|
||||
+<actions>
|
||||
+<action name="start" timeout="20s" />
|
||||
+<action name="stop" timeout="20s" />
|
||||
+<action name="status" timeout="20s" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="10s" start-delay="10s" />
|
||||
+<action name="validate-all" timeout="20s" />
|
||||
+<action name="meta-data" timeout="20s" />
|
||||
+</actions>
|
||||
+</resource-agent>
|
||||
+END
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_status() {
|
||||
+ ocf_pidfile_status "${OCF_RESKEY_pid}" > /dev/null 2>&1
|
||||
+ case "$?" in
|
||||
+ 0)
|
||||
+ rc="$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+ 1|2)
|
||||
+ rc="$OCF_NOT_RUNNING"
|
||||
+ ;;
|
||||
+ *)
|
||||
+ rc="$OCF_ERR_GENERIC"
|
||||
+ ;;
|
||||
+ esac
|
||||
+
|
||||
+ return "$rc"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_start() {
|
||||
+ corosync_qnetd_validate_all
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -ne 0 ]; then
|
||||
+ return "$rc"
|
||||
+ fi
|
||||
+
|
||||
+ # if resource is already running,no need to continue code after this.
|
||||
+ if corosync_qnetd_status; then
|
||||
+ ocf_log info "corosync-qnetd is already running"
|
||||
+ return "${OCF_SUCCESS}"
|
||||
+ fi
|
||||
+
|
||||
+ pid_dir=$(dirname "${OCF_RESKEY_pid}")
|
||||
+ sock_dir=$(dirname "${OCF_RESKEY_ipc_sock}")
|
||||
+
|
||||
+ for d in "$pid_dir" "$sock_dir";do
|
||||
+ if [ ! -d "$d" ];then
|
||||
+ mkdir -p "$d"
|
||||
+ chmod 0770 "$d"
|
||||
+ chown "${OCF_RESKEY_user}:${OCF_RESKEY_group}" "$d"
|
||||
+ fi
|
||||
+ done
|
||||
+
|
||||
+ params="-S \"local_socket_file=${OCF_RESKEY_ipc_sock}\" -S \"lock_file=${OCF_RESKEY_pid}\""
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_nss_db_dir}" ];then
|
||||
+ params="$params -S \"nss_db_dir=${OCF_RESKEY_nss_db_dir}\""
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_ip}" ];then
|
||||
+ params="$params -l \"${OCF_RESKEY_ip}\""
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_port}" ];then
|
||||
+ params="$params -p \"${OCF_RESKEY_port}\""
|
||||
+ fi
|
||||
+
|
||||
+ params="$params ${OCF_RESKEY_qnetd_opts}"
|
||||
+
|
||||
+ ocf_run "$SU" -s "/bin/sh" "${OCF_RESKEY_user}" -c "${OCF_RESKEY_binary} $params"
|
||||
+
|
||||
+ while :; do
|
||||
+ corosync_qnetd_monitor "debug"
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -eq "${OCF_SUCCESS}" ]; then
|
||||
+ break
|
||||
+ fi
|
||||
+ sleep 1
|
||||
+
|
||||
+ ocf_log debug "corosync-qnetd still hasn't started yet. Waiting..."
|
||||
+ done
|
||||
+
|
||||
+ ocf_log info "corosync-qnetd started"
|
||||
+ return "${OCF_SUCCESS}"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_stop() {
|
||||
+ corosync_qnetd_status
|
||||
+
|
||||
+ if [ "$?" -ne "$OCF_SUCCESS" ]; then
|
||||
+ # Currently not running. Nothing to do.
|
||||
+ ocf_log info "corosync-qnetd is already stopped"
|
||||
+
|
||||
+ return "$OCF_SUCCESS"
|
||||
+ fi
|
||||
+
|
||||
+ pid=$(cat "${OCF_RESKEY_pid}")
|
||||
+ kill "$pid"
|
||||
+
|
||||
+ # Wait for process to stop
|
||||
+ while corosync_qnetd_monitor "debug"; do
|
||||
+ sleep 1
|
||||
+ done
|
||||
+
|
||||
+ ocf_log info "corosync-qnetd stopped"
|
||||
+ return "$OCF_SUCCESS"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_monitor() {
|
||||
+ loglevel=${1:-err}
|
||||
+
|
||||
+ corosync_qnetd_status
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -ne "$OCF_SUCCESS" ];then
|
||||
+ return "$rc"
|
||||
+ fi
|
||||
+
|
||||
+ out=$("${OCF_RESKEY_qnetd_tool_binary}" -s -p "${OCF_RESKEY_ipc_sock}" 2>&1 >/dev/null)
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" != 0 ];then
|
||||
+ ocf_log "$loglevel" "$out"
|
||||
+ fi
|
||||
+
|
||||
+ case "$rc" in
|
||||
+ "0") rc="$OCF_SUCCESS" ;;
|
||||
+ "3") rc="$OCF_NOT_RUNNING" ;;
|
||||
+ *) rc="$OCF_ERR_GENERIC" ;;
|
||||
+ esac
|
||||
+
|
||||
+ return "$rc"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_validate_all() {
|
||||
+ check_binary "${OCF_RESKEY_binary}"
|
||||
+
|
||||
+ check_binary "${OCF_RESKEY_qnetd_tool_binary}"
|
||||
+}
|
||||
+
|
||||
+
|
||||
+# **************************** MAIN SCRIPT ************************************
|
||||
+
|
||||
+# Make sure meta-data and usage always succeed
|
||||
+case "$__OCF_ACTION" in
|
||||
+ meta-data)
|
||||
+ corosync_qnetd_meta_data
|
||||
+ exit "$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+ usage|help)
|
||||
+ corosync_qnetd_usage
|
||||
+ exit "$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+esac
|
||||
+
|
||||
+# This OCF agent script need to be run as root user.
|
||||
+if ! ocf_is_root; then
|
||||
+ echo "$0 agent script need to be run as root user."
|
||||
+ ocf_log debug "$0 agent script need to be run as root user."
|
||||
+ exit "$OCF_ERR_GENERIC"
|
||||
+fi
|
||||
+
|
||||
+# Translate each action into the appropriate function call
|
||||
+case "$__OCF_ACTION" in
|
||||
+ start)
|
||||
+ corosync_qnetd_start
|
||||
+ ;;
|
||||
+ stop)
|
||||
+ corosync_qnetd_stop
|
||||
+ ;;
|
||||
+ status)
|
||||
+ corosync_qnetd_status
|
||||
+ ;;
|
||||
+ monitor)
|
||||
+ corosync_qnetd_monitor
|
||||
+ ;;
|
||||
+ validate-all)
|
||||
+ corosync_qnetd_validate_all
|
||||
+ ;;
|
||||
+ *)
|
||||
+ corosync_qnetd_usage
|
||||
+ exit "$OCF_ERR_UNIMPLEMENTED"
|
||||
+ ;;
|
||||
+esac
|
||||
+
|
||||
+rc="$?"
|
||||
+exit "$rc"
|
||||
+# End of this script
|
@ -1,61 +0,0 @@
|
||||
From 340e12c0d457d244d375c2d805e78033c9dbdf78 Mon Sep 17 00:00:00 2001
|
||||
From: Takashi Kajinami <tkajinam@redhat.com>
|
||||
Date: Wed, 04 May 2022 23:13:35 +0900
|
||||
Subject: [PATCH] NovaCompute/Evacuate: Make user/project domain configurable
|
||||
|
||||
... so that we can use a user or a project in a non-default keystone
|
||||
domain.
|
||||
|
||||
Change-Id: I6e2175adca08fd97942cb83b8f8094e980b60c9d
|
||||
---
|
||||
|
||||
diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
index 596f520..4565766 100644
|
||||
--- a/heartbeat/NovaEvacuate
|
||||
+++ b/heartbeat/NovaEvacuate
|
||||
@@ -63,13 +63,29 @@
|
||||
|
||||
<parameter name="tenant_name" unique="0" required="1">
|
||||
<longdesc lang="en">
|
||||
-Tenant name for connecting to keystone in admin context.
|
||||
+Tenant(Project) name for connecting to keystone in admin context.
|
||||
Note that with Keystone V3 tenant names are only unique within a domain.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Tenant name</shortdesc>
|
||||
<content type="string" default="" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="user_domain" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Keystone domain the user belongs to
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="project_domain" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Keystone domain the tenant(project) belongs to
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="domain" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
|
||||
@@ -319,6 +335,14 @@
|
||||
|
||||
fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
|
||||
|
||||
+ if [ -n "${OCF_RESKEY_user_domain}" ]; then
|
||||
+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_project_domain}" ]; then
|
||||
+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
|
||||
+ fi
|
||||
+
|
||||
if [ -n "${OCF_RESKEY_domain}" ]; then
|
||||
fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
|
||||
fi
|
@ -0,0 +1,47 @@
|
||||
From 99c4f2af92a10155cf072198c72deffaed3883a5 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 3 Aug 2022 17:20:31 +0200
|
||||
Subject: [PATCH] CTDB: move process to root cgroup if realtime scheduling is
|
||||
enabled
|
||||
|
||||
---
|
||||
heartbeat/CTDB.in | 2 ++
|
||||
heartbeat/ocf-shellfuncs.in | 12 ++++++++++++
|
||||
2 files changed, 14 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
|
||||
index d25d026ca..46f56cfac 100755
|
||||
--- a/heartbeat/CTDB.in
|
||||
+++ b/heartbeat/CTDB.in
|
||||
@@ -709,6 +709,8 @@ EOF
|
||||
invoke_ctdbd() {
|
||||
local vers="$1"
|
||||
|
||||
+ ocf_move_to_root_cgroup_if_rt_enabled
|
||||
+
|
||||
ocf_version_cmp "$vers" "4.9.0"
|
||||
if [ "$?" -ne "0" ]; then
|
||||
# With 4.9+, all ctdbd binary parameters are provided as
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 6be4e4e30..2c53a967a 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -672,6 +672,18 @@ EOF
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
+# move process to root cgroup if realtime scheduling is enabled
|
||||
+ocf_move_to_root_cgroup_if_rt_enabled()
|
||||
+{
|
||||
+ if [ -e "/sys/fs/cgroup/cpu/cpu.rt_runtime_us" ]; then
|
||||
+ echo $$ >> /sys/fs/cgroup/cpu/tasks
|
||||
+
|
||||
+ if [ "$?" -ne "0" ]; then
|
||||
+ ocf_log warn "Unable to move PID $$ to the root cgroup"
|
||||
+ fi
|
||||
+ fi
|
||||
+}
|
||||
+
|
||||
# usage: crm_mon_no_validation args...
|
||||
# run crm_mon without any cib schema validation
|
||||
# This is useful when an agent runs in a bundle to avoid potential
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user