Compare commits
No commits in common. "c9" and "c8" have entirely different histories.
13
.gitignore
vendored
13
.gitignore
vendored
@ -1 +1,12 @@
|
||||
SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
|
||||
SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
SOURCES/colorama-0.3.3.tar.gz
|
||||
SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
SOURCES/httplib2-0.20.4.tar.gz
|
||||
SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
SOURCES/pyroute2-0.4.13.tar.gz
|
||||
SOURCES/urllib3-1.26.18.tar.gz
|
@ -1 +1,12 @@
|
||||
3b517ecdbe2103df77813050e5c998e102c5de7e SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
|
||||
dfc65f4cac3f95026b2f5674019814a527333004 SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz
|
||||
81f039cf075e9c8b70d5af99c189296a9e031de3 SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
7caf4412d9473bf17352316249a8133fa70b7e37 SOURCES/httplib2-0.20.4.tar.gz
|
||||
c55d177e9484d974c95078d4ae945f89ba2c7251 SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
c8307f47e3b75a2d02af72982a2dfefa3f56e407 SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz
|
||||
84e2852d8da1655373f7ce5e7d5d3e256b62b4e4 SOURCES/urllib3-1.26.18.tar.gz
|
||||
|
25
SOURCES/10-gcloud-support-info.patch
Normal file
25
SOURCES/10-gcloud-support-info.patch
Normal file
@ -0,0 +1,25 @@
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200
|
||||
@@ -900,6 +900,9 @@
|
||||
return """\
|
||||
For detailed information on this command and its flags, run:
|
||||
{command_path} --help
|
||||
+
|
||||
+WARNING: {command_path} is only supported for "{command_path} init" and for use
|
||||
+with the agents in resource-agents.
|
||||
""".format(command_path=' '.join(command.GetPath()))
|
||||
|
||||
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200
|
||||
@@ -84,7 +84,7 @@
|
||||
|
||||
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
|
||||
loader = cli.CLILoader(
|
||||
- name='gcloud',
|
||||
+ name='gcloud-ra',
|
||||
command_root_directory=os.path.join(pkg_root, 'surface'),
|
||||
allow_non_existing_modules=True,
|
||||
version_func=VersionFunc,
|
45
SOURCES/7-gcp-bundled.patch
Normal file
45
SOURCES/7-gcp-bundled.patch
Normal file
@ -0,0 +1,45 @@
|
||||
diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||
--- a/heartbeat/gcp-pd-move.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-pd-move.in 2024-07-22 11:01:51.455543850 +0200
|
||||
@@ -32,6 +32,7 @@
|
||||
from ocf import logger
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
except ImportError:
|
||||
pass
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
|
||||
--- a/heartbeat/gcp-vpc-move-ip.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-ip.in 2024-07-22 11:01:18.010752081 +0200
|
||||
@@ -36,7 +36,7 @@
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
# Defaults
|
||||
-OCF_RESKEY_gcloud_default="/usr/bin/gcloud"
|
||||
+OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra"
|
||||
OCF_RESKEY_configuration_default="default"
|
||||
OCF_RESKEY_vpc_network_default="default"
|
||||
OCF_RESKEY_interface_default="eth0"
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||
--- a/heartbeat/gcp-vpc-move-route.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-route.in 2024-07-22 11:01:18.011752105 +0200
|
||||
@@ -45,6 +45,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
import pyroute2
|
||||
try:
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
|
||||
--- a/heartbeat/gcp-vpc-move-vip.in 2024-07-22 10:59:42.170483160 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-vip.in 2024-07-22 11:01:18.012752128 +0200
|
||||
@@ -29,6 +29,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
|
||||
import googleapiclient.discovery
|
||||
try:
|
||||
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
@ -0,0 +1,129 @@
|
||||
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py
|
||||
--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200
|
||||
@@ -19,8 +19,14 @@
|
||||
certificates.
|
||||
"""
|
||||
|
||||
+from pyasn1.codec.der import decoder
|
||||
from pyasn1_modules import pem
|
||||
-import rsa
|
||||
+from pyasn1_modules.rfc2459 import Certificate
|
||||
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
|
||||
+from cryptography.hazmat.primitives import serialization, hashes
|
||||
+from cryptography.hazmat.primitives.asymmetric import padding
|
||||
+from cryptography import x509
|
||||
+from cryptography.hazmat.backends import default_backend
|
||||
import six
|
||||
|
||||
from oauth2client import _helpers
|
||||
@@ -40,7 +46,7 @@
|
||||
'-----END RSA PRIVATE KEY-----')
|
||||
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
|
||||
'-----END PRIVATE KEY-----')
|
||||
-_PKCS8_SPEC = None
|
||||
+_PKCS8_SPEC = PrivateKeyInfo()
|
||||
|
||||
|
||||
def _bit_list_to_bytes(bit_list):
|
||||
@@ -67,7 +73,8 @@
|
||||
"""
|
||||
|
||||
def __init__(self, pubkey):
|
||||
- self._pubkey = pubkey
|
||||
+ self._pubkey = serialization.load_pem_public_key(pubkey,
|
||||
+ backend=default_backend())
|
||||
|
||||
def verify(self, message, signature):
|
||||
"""Verifies a message against a signature.
|
||||
@@ -84,8 +91,9 @@
|
||||
"""
|
||||
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||
try:
|
||||
- return rsa.pkcs1.verify(message, signature, self._pubkey)
|
||||
- except (ValueError, rsa.pkcs1.VerificationError):
|
||||
+ return self._pubkey.verify(signature, message, padding.PKCS1v15(),
|
||||
+ hashes.SHA256())
|
||||
+ except (ValueError, TypeError, InvalidSignature):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
@@ -109,19 +117,18 @@
|
||||
"""
|
||||
key_pem = _helpers._to_bytes(key_pem)
|
||||
if is_x509_cert:
|
||||
- from pyasn1.codec.der import decoder
|
||||
- from pyasn1_modules import rfc2459
|
||||
-
|
||||
- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
|
||||
- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate())
|
||||
+ der = x509.load_pem_x509_certificate(pem_data, default_backend())
|
||||
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
|
||||
if remaining != b'':
|
||||
raise ValueError('Unused bytes', remaining)
|
||||
|
||||
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
|
||||
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
|
||||
- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
|
||||
+ pubkey = serialization.load_der_public_key(decoded_key,
|
||||
+ backend=default_backend())
|
||||
else:
|
||||
- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
|
||||
+ pubkey = serialization.load_pem_public_key(decoded_key,
|
||||
+ backend=default_backend())
|
||||
return cls(pubkey)
|
||||
|
||||
|
||||
@@ -134,6 +141,8 @@
|
||||
|
||||
def __init__(self, pkey):
|
||||
self._key = pkey
|
||||
+ self._pubkey = serialization.load_pem_private_key(pkey,
|
||||
+ backend=default_backend())
|
||||
|
||||
def sign(self, message):
|
||||
"""Signs a message.
|
||||
@@ -145,7 +154,7 @@
|
||||
string, The signature of the message for the given key.
|
||||
"""
|
||||
message = _helpers._to_bytes(message, encoding='utf-8')
|
||||
- return rsa.pkcs1.sign(message, self._key, 'SHA-256')
|
||||
+ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256())
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, key, password='notasecret'):
|
||||
@@ -163,27 +172,24 @@
|
||||
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
|
||||
PEM format.
|
||||
"""
|
||||
- global _PKCS8_SPEC
|
||||
key = _helpers._from_bytes(key) # pem expects str in Py3
|
||||
marker_id, key_bytes = pem.readPemBlocksFromFile(
|
||||
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
|
||||
|
||||
if marker_id == 0:
|
||||
- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
|
||||
- format='DER')
|
||||
- elif marker_id == 1:
|
||||
- from pyasn1.codec.der import decoder
|
||||
- from pyasn1_modules import rfc5208
|
||||
+ pkey = serialization.load_der_private_key(
|
||||
+ key_bytes, password=None,
|
||||
+ backend=default_backend())
|
||||
|
||||
- if _PKCS8_SPEC is None:
|
||||
- _PKCS8_SPEC = rfc5208.PrivateKeyInfo()
|
||||
+ elif marker_id == 1:
|
||||
key_info, remaining = decoder.decode(
|
||||
key_bytes, asn1Spec=_PKCS8_SPEC)
|
||||
if remaining != b'':
|
||||
raise ValueError('Unused bytes', remaining)
|
||||
pkey_info = key_info.getComponentByName('privateKey')
|
||||
- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
|
||||
- format='DER')
|
||||
+ pkey = serialization.load_der_private_key(
|
||||
+ pkey_info.asOctets(), password=None,
|
||||
+ backend=default_backend())
|
||||
else:
|
||||
raise ValueError('No key could be detected.')
|
||||
|
@ -1,71 +0,0 @@
|
||||
From 54fa7a59c36697cd8df5b619fff0b50af00df76e Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:35:52 +0100
|
||||
Subject: [PATCH 1/2] storage_mon: fix file handler out of scope leak and
|
||||
uninitialized values
|
||||
|
||||
---
|
||||
tools/storage_mon.c | 11 +++++++++--
|
||||
1 file changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||
index 1aae29e58..cc415e97f 100644
|
||||
--- a/tools/storage_mon.c
|
||||
+++ b/tools/storage_mon.c
|
||||
@@ -382,9 +382,11 @@ static int write_pid_file(const char *pidfile)
|
||||
syslog(LOG_ERR, "Failed to write '%s' to %s: %s", pid, pidfile, strerror(errno));
|
||||
goto done;
|
||||
}
|
||||
- close(fd);
|
||||
rc = 0;
|
||||
done:
|
||||
+ if (fd != -1) {
|
||||
+ close(fd);
|
||||
+ }
|
||||
if (pid != NULL) {
|
||||
free(pid);
|
||||
}
|
||||
@@ -663,6 +665,7 @@ storage_mon_client(void)
|
||||
snprintf(request.message, SMON_MAX_MSGSIZE, "%s", SMON_GET_RESULT_COMMAND);
|
||||
request.hdr.id = 0;
|
||||
request.hdr.size = sizeof(struct storage_mon_check_value_req);
|
||||
+ response.hdr.id = 0;
|
||||
rc = qb_ipcc_send(conn, &request, request.hdr.size);
|
||||
if (rc < 0) {
|
||||
syslog(LOG_ERR, "qb_ipcc_send error : %d\n", rc);
|
||||
@@ -683,7 +686,11 @@ storage_mon_client(void)
|
||||
/* greater than 0 : monitoring error. */
|
||||
/* -1 : communication system error. */
|
||||
/* -2 : Not all checks completed for first device in daemon mode. */
|
||||
- rc = atoi(response.message);
|
||||
+ if (strnlen(response.message, 1)) {
|
||||
+ rc = atoi(response.message);
|
||||
+ } else {
|
||||
+ rc = -1;
|
||||
+ }
|
||||
|
||||
syslog(LOG_DEBUG, "daemon response[%d]: %s \n", response.hdr.id, response.message);
|
||||
|
||||
|
||||
From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:37:37 +0100
|
||||
Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS
|
||||
|
||||
---
|
||||
tools/findif.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/findif.c b/tools/findif.c
|
||||
index a25395fec..ab108a3c4 100644
|
||||
--- a/tools/findif.c
|
||||
+++ b/tools/findif.c
|
||||
@@ -669,7 +669,7 @@ main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
- if (netmaskbits) {
|
||||
+ if (netmaskbits != NULL && *netmaskbits != EOS) {
|
||||
best_netmask = netmask;
|
||||
}else if (best_netmask == 0L) {
|
||||
/*
|
@ -1,23 +0,0 @@
|
||||
From cb968378959b8aa334e98daf62a1b08ef6525fb4 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 22 Nov 2023 10:32:31 +0100
|
||||
Subject: [PATCH] storage_mon: use memset() to fix "uninitialized value"
|
||||
covscan error, as qb_ipcc_recv() will always set a message (according to
|
||||
honzaf)
|
||||
|
||||
---
|
||||
tools/storage_mon.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||
index cc415e97f..a9227ef90 100644
|
||||
--- a/tools/storage_mon.c
|
||||
+++ b/tools/storage_mon.c
|
||||
@@ -655,6 +655,7 @@ storage_mon_client(void)
|
||||
int32_t rc;
|
||||
|
||||
|
||||
+ memset(&response, 0, sizeof(response));
|
||||
snprintf(ipcs_name, SMON_MAX_IPCSNAME, "storage_mon_%s", attrname);
|
||||
conn = qb_ipcc_connect(ipcs_name, 0);
|
||||
if (conn == NULL) {
|
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
22
SOURCES/RHEL-17083-findif-EOS-fix.patch
Normal file
@ -0,0 +1,22 @@
|
||||
From b23ba4eaefb500199c4845751f4c5545c81f42f1 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 20 Nov 2023 16:37:37 +0100
|
||||
Subject: [PATCH 2/2] findif: also check that netmaskbits != EOS
|
||||
|
||||
---
|
||||
tools/findif.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/findif.c b/tools/findif.c
|
||||
index a25395fec..ab108a3c4 100644
|
||||
--- a/tools/findif.c
|
||||
+++ b/tools/findif.c
|
||||
@@ -669,7 +669,7 @@ main(int argc, char ** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
- if (netmaskbits) {
|
||||
+ if (netmaskbits != NULL && *netmaskbits != EOS) {
|
||||
best_netmask = netmask;
|
||||
}else if (best_netmask == 0L) {
|
||||
/*
|
23
SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch
Normal file
23
SOURCES/RHEL-32828-db2-fix-OCF_SUCESS-typo.patch
Normal file
@ -0,0 +1,23 @@
|
||||
From a9c4aeb971e9f4963345d0e215b729def62dd27c Mon Sep 17 00:00:00 2001
|
||||
From: pepadelic <162310096+pepadelic@users.noreply.github.com>
|
||||
Date: Mon, 15 Apr 2024 13:52:54 +0200
|
||||
Subject: [PATCH] Update db2: fix OCF_SUCESS name in db2_notify
|
||||
|
||||
fix OCF_SUCESS to OCF_SUCCESS in db2_notify
|
||||
---
|
||||
heartbeat/db2 | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||
index 95447ab6cb..1cd66f15af 100755
|
||||
--- a/heartbeat/db2
|
||||
+++ b/heartbeat/db2
|
||||
@@ -848,7 +848,7 @@ db2_notify() {
|
||||
|
||||
# only interested in pre-start
|
||||
[ $OCF_RESKEY_CRM_meta_notify_type = pre \
|
||||
- -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCESS
|
||||
+ -a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCCESS
|
||||
|
||||
# gets FIRST_ACTIVE_LOG
|
||||
db2_get_cfg $dblist || return $?
|
343
SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch
Normal file
343
SOURCES/RHEL-34137-aws-agents-use-curl_retry.patch
Normal file
@ -0,0 +1,343 @@
|
||||
From fc0657b936f6a58f741e33f851b22f82bc68bffa Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 6 Feb 2024 13:28:12 +0100
|
||||
Subject: [PATCH 1/2] ocf-shellfuncs: add curl_retry()
|
||||
|
||||
---
|
||||
heartbeat/ocf-shellfuncs.in | 34 ++++++++++++++++++++++++++++++++++
|
||||
1 file changed, 34 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index c5edb6f57..a69a9743d 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -672,6 +672,40 @@ EOF
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
+# usage: curl_retry RETRIES SLEEP ARGS URL
|
||||
+#
|
||||
+# Use --show-error in ARGS to log HTTP error code
|
||||
+#
|
||||
+# returns:
|
||||
+# 0 success
|
||||
+# exit:
|
||||
+# 1 fail
|
||||
+curl_retry()
|
||||
+{
|
||||
+ local retries=$1 sleep=$2 opts=$3 url=$4
|
||||
+ local tries=$(($retries + 1))
|
||||
+ local args="--fail $opts $url"
|
||||
+ local result rc
|
||||
+
|
||||
+ for try in $(seq $tries); do
|
||||
+ ocf_log debug "curl $args try $try of $tries"
|
||||
+ result=$(echo "$args" | xargs curl 2>&1)
|
||||
+ rc=$?
|
||||
+
|
||||
+ ocf_log debug "result: $result"
|
||||
+ [ $rc -eq 0 ] && break
|
||||
+ sleep $sleep
|
||||
+ done
|
||||
+
|
||||
+ if [ $rc -ne 0 ]; then
|
||||
+ ocf_exit_reason "curl $args failed $tries tries"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
+
|
||||
+ echo "$result"
|
||||
+ return $rc
|
||||
+}
|
||||
+
|
||||
# usage: crm_mon_no_validation args...
|
||||
# run crm_mon without any cib schema validation
|
||||
# This is useful when an agent runs in a bundle to avoid potential
|
||||
|
||||
From 80d330557319bdae9e45aad1279e435fc481d4e7 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 6 Feb 2024 13:28:25 +0100
|
||||
Subject: [PATCH 2/2] AWS agents: use curl_retry()
|
||||
|
||||
---
|
||||
heartbeat/aws-vpc-move-ip | 35 ++++++++++++++++++++++++++---------
|
||||
heartbeat/aws-vpc-route53.in | 27 +++++++++++++++++++++++++--
|
||||
heartbeat/awseip | 36 +++++++++++++++++++++++++++++++-----
|
||||
heartbeat/awsvip | 32 ++++++++++++++++++++++++++++----
|
||||
4 files changed, 110 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
index 54806f6ea..6115e5ba8 100755
|
||||
--- a/heartbeat/aws-vpc-move-ip
|
||||
+++ b/heartbeat/aws-vpc-move-ip
|
||||
@@ -47,6 +47,8 @@ OCF_RESKEY_interface_default="eth0"
|
||||
OCF_RESKEY_iflabel_default=""
|
||||
OCF_RESKEY_monapi_default="false"
|
||||
OCF_RESKEY_lookup_type_default="InstanceId"
|
||||
+OCF_RESKEY_curl_retries_default="3"
|
||||
+OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
@@ -60,6 +62,8 @@ OCF_RESKEY_lookup_type_default="InstanceId"
|
||||
: ${OCF_RESKEY_iflabel=${OCF_RESKEY_iflabel_default}}
|
||||
: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
|
||||
: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}}
|
||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
#######################################################################
|
||||
|
||||
|
||||
@@ -194,6 +198,22 @@ Name of resource type to lookup in route table.
|
||||
<content type="string" default="${OCF_RESKEY_lookup_type_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="curl_retries" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl retries before failing
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl retries</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="curl_sleep" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl sleep between tries
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -250,8 +270,10 @@ ec2ip_validate() {
|
||||
fi
|
||||
fi
|
||||
|
||||
- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
- EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+ EC2_INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
if [ -z "${EC2_INSTANCE_ID}" ]; then
|
||||
ocf_exit_reason "Instance ID not found. Is this a EC2 instance?"
|
||||
@@ -365,14 +387,9 @@ ec2ip_get_instance_eni() {
|
||||
fi
|
||||
ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}"
|
||||
|
||||
- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\""
|
||||
- ocf_log debug "executing command: $cmd"
|
||||
+ cmd="curl_retry \"$OCF_RESKEY_curl_retries\" \"$OCF_RESKEY_curl_sleep\" \"--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'\" \"http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id\""
|
||||
EC2_NETWORK_INTERFACE_ID="$(eval $cmd)"
|
||||
- rc=$?
|
||||
- if [ $rc != 0 ]; then
|
||||
- ocf_log warn "command failed, rc: $rc"
|
||||
- return $OCF_ERR_GENERIC
|
||||
- fi
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}"
|
||||
echo $EC2_NETWORK_INTERFACE_ID
|
||||
}
|
||||
diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||
index 18ab157e8..eba2ed95c 100644
|
||||
--- a/heartbeat/aws-vpc-route53.in
|
||||
+++ b/heartbeat/aws-vpc-route53.in
|
||||
@@ -53,6 +53,8 @@ OCF_RESKEY_hostedzoneid_default=""
|
||||
OCF_RESKEY_fullname_default=""
|
||||
OCF_RESKEY_ip_default="local"
|
||||
OCF_RESKEY_ttl_default=10
|
||||
+OCF_RESKEY_curl_retries_default="3"
|
||||
+OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
@@ -62,6 +64,8 @@ OCF_RESKEY_ttl_default=10
|
||||
: ${OCF_RESKEY_fullname:=${OCF_RESKEY_fullname_default}}
|
||||
: ${OCF_RESKEY_ip:=${OCF_RESKEY_ip_default}}
|
||||
: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
|
||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
usage() {
|
||||
cat <<-EOT
|
||||
@@ -185,6 +189,22 @@ Time to live for Route53 ARECORD
|
||||
<shortdesc lang="en">ARECORD TTL</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_ttl_default}" />
|
||||
</parameter>
|
||||
+
|
||||
+<parameter name="curl_retries" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl retries before failing
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl retries</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="curl_sleep" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl sleep between tries
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||
+</parameter>
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -357,8 +377,11 @@ r53_monitor() {
|
||||
_get_ip() {
|
||||
case $OCF_RESKEY_ip in
|
||||
local|public)
|
||||
- TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
- IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");;
|
||||
+ TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+ IPADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4")
|
||||
+ [ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+ ;;
|
||||
*.*.*.*)
|
||||
IPADDRESS="${OCF_RESKEY_ip}";;
|
||||
esac
|
||||
diff --git a/heartbeat/awseip b/heartbeat/awseip
|
||||
index 49b0ca615..ffb6223a1 100755
|
||||
--- a/heartbeat/awseip
|
||||
+++ b/heartbeat/awseip
|
||||
@@ -49,12 +49,16 @@ OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
+OCF_RESKEY_curl_retries_default="3"
|
||||
+OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
@@ -141,6 +145,22 @@ a short delay between API calls, to avoid sending API too quick
|
||||
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="curl_retries" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl retries before failing
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl retries</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="curl_sleep" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl sleep between tries
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -171,14 +191,18 @@ awseip_start() {
|
||||
awseip_monitor && return $OCF_SUCCESS
|
||||
|
||||
if [ -n "${PRIVATE_IP_ADDRESS}" ]; then
|
||||
- NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
+ NETWORK_INTERFACES_MACS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/")
|
||||
for MAC in ${NETWORK_INTERFACES_MACS}; do
|
||||
- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" |
|
||||
+ curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/local-ipv4s" |
|
||||
grep -q "^${PRIVATE_IP_ADDRESS}$"
|
||||
if [ $? -eq 0 ]; then
|
||||
- NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
+ NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "-s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC%/*}/interface-id")
|
||||
fi
|
||||
done
|
||||
+ if [ -z "$NETWORK_ID" ]; then
|
||||
+ ocf_exit_reason "Could not find network interface for private_ip_address: $PRIVATE_IP_ADDRESS"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
+ fi
|
||||
$AWSCLI_CMD ec2 associate-address \
|
||||
--network-interface-id ${NETWORK_ID} \
|
||||
--allocation-id ${ALLOCATION_ID} \
|
||||
@@ -282,8 +306,10 @@ fi
|
||||
ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
|
||||
ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
|
||||
PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
|
||||
-TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
-INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
case $__OCF_ACTION in
|
||||
start)
|
||||
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
index bdb4d68dd..f2b238a0f 100755
|
||||
--- a/heartbeat/awsvip
|
||||
+++ b/heartbeat/awsvip
|
||||
@@ -48,12 +48,16 @@ OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
+OCF_RESKEY_curl_retries_default="3"
|
||||
+OCF_RESKEY_curl_sleep_default="1"
|
||||
|
||||
: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
|
||||
: ${OCF_RESKEY_auth_type=${OCF_RESKEY_auth_type_default}}
|
||||
: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
|
||||
: ${OCF_RESKEY_region=${OCF_RESKEY_region_default}}
|
||||
: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
|
||||
+: ${OCF_RESKEY_curl_retries=${OCF_RESKEY_curl_retries_default}}
|
||||
+: ${OCF_RESKEY_curl_sleep=${OCF_RESKEY_curl_sleep_default}}
|
||||
|
||||
meta_data() {
|
||||
cat <<END
|
||||
@@ -124,6 +128,22 @@ a short delay between API calls, to avoid sending API too quick
|
||||
<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="curl_retries" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl retries before failing
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl retries</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_retries_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="curl_sleep" unique="0">
|
||||
+<longdesc lang="en">
|
||||
+curl sleep between tries
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">curl sleep</shortdesc>
|
||||
+<content type="integer" default="${OCF_RESKEY_curl_sleep_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -246,10 +266,14 @@ if [ -n "${OCF_RESKEY_region}" ]; then
|
||||
AWSCLI_CMD="$AWSCLI_CMD --region ${OCF_RESKEY_region}"
|
||||
fi
|
||||
SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
|
||||
-TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
-INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
-MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
-NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN")
|
||||
+TOKEN=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -sX PUT -H 'X-aws-ec2-metadata-token-ttl-seconds: 21600'" "http://169.254.169.254/latest/api/token")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+INSTANCE_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/instance-id")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+MAC_ADDRESS=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/mac")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
+NETWORK_ID=$(curl_retry "$OCF_RESKEY_curl_retries" "$OCF_RESKEY_curl_sleep" "--show-error -s -H 'X-aws-ec2-metadata-token: $TOKEN'" "http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id")
|
||||
+[ $? -ne 0 ] && exit $OCF_ERR_GENERIC
|
||||
|
||||
case $__OCF_ACTION in
|
||||
start)
|
@ -0,0 +1,48 @@
|
||||
From accff72ecc2f6cf5a76d9570198a93ac7c90270e Mon Sep 17 00:00:00 2001
|
||||
From: Quentin Pradet <quentin.pradet@gmail.com>
|
||||
Date: Mon, 17 Jun 2024 11:09:06 +0400
|
||||
Subject: [PATCH] Merge pull request from GHSA-34jh-p97f-mpxf
|
||||
|
||||
* Strip Proxy-Authorization header on redirects
|
||||
|
||||
* Fix test_retry_default_remove_headers_on_redirect
|
||||
|
||||
* Set release date
|
||||
---
|
||||
CHANGES.rst | 5 +++++
|
||||
src/urllib3/util/retry.py | 4 +++-
|
||||
test/test_retry.py | 6 ++++-
|
||||
test/with_dummyserver/test_poolmanager.py | 27 ++++++++++++++++++++---
|
||||
4 files changed, 37 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
index 7a76a4a6ad..0456cceba4 100644
|
||||
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/util/retry.py
|
||||
@@ -189,7 +189,9 @@ class Retry:
|
||||
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||
|
||||
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||
+ )
|
||||
|
||||
#: Default maximum backoff time.
|
||||
DEFAULT_BACKOFF_MAX = 120
|
||||
|
||||
diff --git a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
index 7a76a4a6ad..0456cceba4 100644
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/util/retry.py
|
||||
@@ -189,7 +189,9 @@ class Retry:
|
||||
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
|
||||
|
||||
#: Default headers to be used for ``remove_headers_on_redirect``
|
||||
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
|
||||
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
|
||||
+ ["Cookie", "Authorization", "Proxy-Authorization"]
|
||||
+ )
|
||||
|
||||
#: Default maximum backoff time.
|
||||
DEFAULT_BACKOFF_MAX = 120
|
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
201
SOURCES/RHEL-50360-setuptools-fix-CVE-2024-6345.patch
Normal file
@ -0,0 +1,201 @@
|
||||
--- a/setuptools/package_index.py 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/setuptools/package_index.py 2024-07-25 10:11:40.537307665 +0200
|
||||
@@ -1,5 +1,6 @@
|
||||
"""PyPI and direct package downloading"""
|
||||
import sys
|
||||
+import subprocess
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
@@ -563,7 +564,7 @@
|
||||
scheme = URL_SCHEME(spec)
|
||||
if scheme:
|
||||
# It's a url, download it to tmpdir
|
||||
- found = self._download_url(scheme.group(1), spec, tmpdir)
|
||||
+ found = self._download_url(spec, tmpdir)
|
||||
base, fragment = egg_info_for_url(spec)
|
||||
if base.endswith('.py'):
|
||||
found = self.gen_setup(found, fragment, tmpdir)
|
||||
@@ -775,7 +776,7 @@
|
||||
raise DistutilsError("Download error for %s: %s"
|
||||
% (url, v))
|
||||
|
||||
- def _download_url(self, scheme, url, tmpdir):
|
||||
+ def _download_url(self, url, tmpdir):
|
||||
# Determine download filename
|
||||
#
|
||||
name, fragment = egg_info_for_url(url)
|
||||
@@ -790,19 +791,59 @@
|
||||
|
||||
filename = os.path.join(tmpdir, name)
|
||||
|
||||
- # Download the file
|
||||
- #
|
||||
- if scheme == 'svn' or scheme.startswith('svn+'):
|
||||
- return self._download_svn(url, filename)
|
||||
- elif scheme == 'git' or scheme.startswith('git+'):
|
||||
- return self._download_git(url, filename)
|
||||
- elif scheme.startswith('hg+'):
|
||||
- return self._download_hg(url, filename)
|
||||
- elif scheme == 'file':
|
||||
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
|
||||
- else:
|
||||
- self.url_ok(url, True) # raises error if not allowed
|
||||
- return self._attempt_download(url, filename)
|
||||
+ return self._download_vcs(url, filename) or self._download_other(url, filename)
|
||||
+
|
||||
+ @staticmethod
|
||||
+ def _resolve_vcs(url):
|
||||
+ """
|
||||
+ >>> rvcs = PackageIndex._resolve_vcs
|
||||
+ >>> rvcs('git+http://foo/bar')
|
||||
+ 'git'
|
||||
+ >>> rvcs('hg+https://foo/bar')
|
||||
+ 'hg'
|
||||
+ >>> rvcs('git:myhost')
|
||||
+ 'git'
|
||||
+ >>> rvcs('hg:myhost')
|
||||
+ >>> rvcs('http://foo/bar')
|
||||
+ """
|
||||
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||
+ pre, sep, post = scheme.partition('+')
|
||||
+ # svn and git have their own protocol; hg does not
|
||||
+ allowed = set(['svn', 'git'] + ['hg'] * bool(sep))
|
||||
+ return next(iter({pre} & allowed), None)
|
||||
+
|
||||
+ def _download_vcs(self, url, spec_filename):
|
||||
+ vcs = self._resolve_vcs(url)
|
||||
+ if not vcs:
|
||||
+ return
|
||||
+ if vcs == 'svn':
|
||||
+ raise DistutilsError(
|
||||
+ f"Invalid config, SVN download is not supported: {url}"
|
||||
+ )
|
||||
+
|
||||
+ filename, _, _ = spec_filename.partition('#')
|
||||
+ url, rev = self._vcs_split_rev_from_url(url)
|
||||
+
|
||||
+ self.info(f"Doing {vcs} clone from {url} to {filename}")
|
||||
+ subprocess.check_call([vcs, 'clone', '--quiet', url, filename])
|
||||
+
|
||||
+ co_commands = dict(
|
||||
+ git=[vcs, '-C', filename, 'checkout', '--quiet', rev],
|
||||
+ hg=[vcs, '--cwd', filename, 'up', '-C', '-r', rev, '-q'],
|
||||
+ )
|
||||
+ if rev is not None:
|
||||
+ self.info(f"Checking out {rev}")
|
||||
+ subprocess.check_call(co_commands[vcs])
|
||||
+
|
||||
+ return filename
|
||||
+
|
||||
+ def _download_other(self, url, filename):
|
||||
+ scheme = urllib.parse.urlsplit(url).scheme
|
||||
+ if scheme == 'file': # pragma: no cover
|
||||
+ return urllib.request.url2pathname(urllib.parse.urlparse(url).path)
|
||||
+ # raise error if not allowed
|
||||
+ self.url_ok(url, True)
|
||||
+ return self._attempt_download(url, filename)
|
||||
|
||||
def scan_url(self, url):
|
||||
self.process_url(url, True)
|
||||
@@ -829,76 +870,37 @@
|
||||
os.unlink(filename)
|
||||
raise DistutilsError("Unexpected HTML page found at " + url)
|
||||
|
||||
- def _download_svn(self, url, filename):
|
||||
- url = url.split('#', 1)[0] # remove any fragment for svn's sake
|
||||
- creds = ''
|
||||
- if url.lower().startswith('svn:') and '@' in url:
|
||||
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
|
||||
- if not netloc and path.startswith('//') and '/' in path[2:]:
|
||||
- netloc, path = path[2:].split('/', 1)
|
||||
- auth, host = splituser(netloc)
|
||||
- if auth:
|
||||
- if ':' in auth:
|
||||
- user, pw = auth.split(':', 1)
|
||||
- creds = " --username=%s --password=%s" % (user, pw)
|
||||
- else:
|
||||
- creds = " --username=" + auth
|
||||
- netloc = host
|
||||
- parts = scheme, netloc, url, p, q, f
|
||||
- url = urllib.parse.urlunparse(parts)
|
||||
- self.info("Doing subversion checkout from %s to %s", url, filename)
|
||||
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
|
||||
- return filename
|
||||
-
|
||||
@staticmethod
|
||||
- def _vcs_split_rev_from_url(url, pop_prefix=False):
|
||||
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
|
||||
-
|
||||
- scheme = scheme.split('+', 1)[-1]
|
||||
-
|
||||
- # Some fragment identification fails
|
||||
- path = path.split('#', 1)[0]
|
||||
-
|
||||
- rev = None
|
||||
- if '@' in path:
|
||||
- path, rev = path.rsplit('@', 1)
|
||||
-
|
||||
- # Also, discard fragment
|
||||
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
|
||||
-
|
||||
- return url, rev
|
||||
-
|
||||
- def _download_git(self, url, filename):
|
||||
- filename = filename.split('#', 1)[0]
|
||||
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||
-
|
||||
- self.info("Doing git clone from %s to %s", url, filename)
|
||||
- os.system("git clone --quiet %s %s" % (url, filename))
|
||||
+ def _vcs_split_rev_from_url(url):
|
||||
+ """
|
||||
+ Given a possible VCS URL, return a clean URL and resolved revision if any.
|
||||
|
||||
- if rev is not None:
|
||||
- self.info("Checking out %s", rev)
|
||||
- os.system("(cd %s && git checkout --quiet %s)" % (
|
||||
- filename,
|
||||
- rev,
|
||||
- ))
|
||||
+ >>> vsrfu = PackageIndex._vcs_split_rev_from_url
|
||||
+ >>> vsrfu('git+https://github.com/pypa/setuptools@v69.0.0#egg-info=setuptools')
|
||||
+ ('https://github.com/pypa/setuptools', 'v69.0.0')
|
||||
+ >>> vsrfu('git+https://github.com/pypa/setuptools#egg-info=setuptools')
|
||||
+ ('https://github.com/pypa/setuptools', None)
|
||||
+ >>> vsrfu('http://foo/bar')
|
||||
+ ('http://foo/bar', None)
|
||||
+ """
|
||||
+ parts = urllib.parse.urlsplit(url)
|
||||
|
||||
- return filename
|
||||
+ clean_scheme = parts.scheme.split('+', 1)[-1]
|
||||
|
||||
- def _download_hg(self, url, filename):
|
||||
- filename = filename.split('#', 1)[0]
|
||||
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
|
||||
+ # Some fragment identification fails
|
||||
+ no_fragment_path, _, _ = parts.path.partition('#')
|
||||
|
||||
- self.info("Doing hg clone from %s to %s", url, filename)
|
||||
- os.system("hg clone --quiet %s %s" % (url, filename))
|
||||
+ pre, sep, post = no_fragment_path.rpartition('@')
|
||||
+ clean_path, rev = (pre, post) if sep else (post, None)
|
||||
|
||||
- if rev is not None:
|
||||
- self.info("Updating to %s", rev)
|
||||
- os.system("(cd %s && hg up -C -r %s >&-)" % (
|
||||
- filename,
|
||||
- rev,
|
||||
- ))
|
||||
+ resolved = parts._replace(
|
||||
+ scheme=clean_scheme,
|
||||
+ path=clean_path,
|
||||
+ # discard the fragment
|
||||
+ fragment='',
|
||||
+ ).geturl()
|
||||
|
||||
- return filename
|
||||
+ return resolved, rev
|
||||
|
||||
def debug(self, msg, *args):
|
||||
log.debug(msg, *args)
|
@ -0,0 +1,38 @@
|
||||
From 38eaf00bc81af7530c56eba282918762a47a9326 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 19 Sep 2024 13:01:53 +0200
|
||||
Subject: [PATCH] nfsserver: also stop rpc-statd for nfsv4_only to avoid stop
|
||||
failing in some cases
|
||||
|
||||
E.g. nfs_no_notify=true nfsv4_only=true nfs_shared_infodir=/nfsmq/nfsinfo would cause a "Failed to unmount a bind mount" error
|
||||
---
|
||||
heartbeat/nfsserver | 16 +++++++---------
|
||||
1 file changed, 7 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 5793d7a70..fd9268afc 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -947,15 +947,13 @@ nfsserver_stop ()
|
||||
sleep 1
|
||||
done
|
||||
|
||||
- if ! ocf_is_true "$OCF_RESKEY_nfsv4_only"; then
|
||||
- nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||
- ocf_log info "Stop: rpc-statd"
|
||||
- rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||
- rc=$?
|
||||
- if [ "$rc" -eq "0" ]; then
|
||||
- ocf_exit_reason "Failed to stop rpc-statd"
|
||||
- return $OCF_ERR_GENERIC
|
||||
- fi
|
||||
+ nfs_exec stop rpc-statd > /dev/null 2>&1
|
||||
+ ocf_log info "Stop: rpc-statd"
|
||||
+ rpcinfo -t localhost 100024 > /dev/null 2>&1
|
||||
+ rc=$?
|
||||
+ if [ "$rc" -eq "0" ]; then
|
||||
+ ocf_exit_reason "Failed to stop rpc-statd"
|
||||
+ return $OCF_ERR_GENERIC
|
||||
fi
|
||||
|
||||
nfs_exec stop nfs-idmapd > /dev/null 2>&1
|
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
15
SOURCES/aliyun-vpc-move-ip-4-bundled.patch
Normal file
@ -0,0 +1,15 @@
|
||||
--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200
|
||||
+++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200
|
||||
@@ -35,10 +35,10 @@
|
||||
USAGE="usage: $0 {start|stop|status|meta-data}";
|
||||
|
||||
if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then
|
||||
- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||
+ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
|
||||
fi
|
||||
|
||||
-if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||
+if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli-ra' ] || [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
|
||||
OUTPUT="text"
|
||||
EXECUTING='{ print $3 }'
|
||||
IFS_=" "
|
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
398
SOURCES/aliyuncli-python3-fixes.patch
Normal file
@ -0,0 +1,398 @@
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
print("A profile is needed! please use \'--filename\' and add the profile name.")
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
def getInstanceCount(self,keyValues):
|
||||
count = 1
|
||||
- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0:
|
||||
+ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0:
|
||||
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
|
||||
count = keyValues['--instancecount'][0]
|
||||
else:
|
||||
@@ -113,7 +113,7 @@
|
||||
|
||||
def isAllocatePublicIpAddress(self,keyValues):
|
||||
_publicIp = False
|
||||
- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0:
|
||||
+ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0:
|
||||
if keyValues['--allocatepublicip'][0] == "yes":
|
||||
_publicIp = True
|
||||
return _publicIp
|
||||
@@ -125,7 +125,7 @@
|
||||
'''
|
||||
data = json.loads(jsonbody)
|
||||
'''
|
||||
- if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
|
||||
+ if 'InstanceId' in data and len(data['InstanceId']) > 0:
|
||||
instanceId = data['InstanceId']
|
||||
except Exception as e:
|
||||
pass
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A file name is needed! please use \'--filename\' and add the file name."
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
def getFileName(self,keyValues):
|
||||
filename = None
|
||||
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
return filename, "A filename is needed! please use \'--filename\' and add the file name."
|
||||
@@ -21,7 +21,7 @@
|
||||
def getInstanceCount(self,keyValues):
|
||||
count = 1
|
||||
import_count = "--count"
|
||||
- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0:
|
||||
+ if import_count in keyValues and len(keyValues[import_count]) > 0:
|
||||
if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0:
|
||||
count = keyValues[import_count][0]
|
||||
else:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100
|
||||
@@ -17,37 +17,37 @@
|
||||
|
||||
def getConfigHandlerOptions(self):
|
||||
return [ConfigCmd.name]
|
||||
-
|
||||
+
|
||||
def showConfig(self):
|
||||
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
|
||||
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
|
||||
config = dict()
|
||||
configContent = dict()
|
||||
- credentialsContent = dict ()
|
||||
- if os.path.exists(_configurePath):
|
||||
+ credentialsContent = dict ()
|
||||
+ if os.path.exists(_configurePath):
|
||||
for line in open(_configurePath):
|
||||
line = line.strip('\n')
|
||||
if line.find('=') > 0:
|
||||
list = line.split("=",1)
|
||||
- configContent[list[0]] = list[1]
|
||||
- else:
|
||||
- pass
|
||||
- config['configure'] = configContent
|
||||
- if os.path.exists(_credentialsPath):
|
||||
- for line in open(_credentialsPath):
|
||||
+ configContent[list[0]] = list[1]
|
||||
+ else:
|
||||
+ pass
|
||||
+ config['configure'] = configContent
|
||||
+ if os.path.exists(_credentialsPath):
|
||||
+ for line in open(_credentialsPath):
|
||||
line = line.strip('\n')
|
||||
if line.find('=') > 0:
|
||||
list = line.split("=",1)
|
||||
- credentialsContent[list[0]] = list[1]
|
||||
- else:
|
||||
- pass
|
||||
- config ['credentials'] = credentialsContent
|
||||
- response.display_response("showConfigure",config,'table')
|
||||
+ credentialsContent[list[0]] = list[1]
|
||||
+ else:
|
||||
+ pass
|
||||
+ config ['credentials'] = credentialsContent
|
||||
+ response.display_response("showConfigure",config,'table')
|
||||
def importConfig():
|
||||
pass
|
||||
def exportConfig():
|
||||
pass
|
||||
-
|
||||
+
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100
|
||||
@@ -20,7 +20,7 @@
|
||||
def handleProfileCmd(self, cmd, keyValues):
|
||||
if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right
|
||||
#check --name is valid
|
||||
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||
_value = keyValues[ProfileCmd.name][0] # use the first value
|
||||
self.extensionCliHandler.setUserProfile(_value)
|
||||
else:
|
||||
@@ -34,7 +34,7 @@
|
||||
newProfileName = ''
|
||||
if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right
|
||||
#check --name is valid
|
||||
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
|
||||
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
|
||||
_value = keyValues[ProfileCmd.name][0] # check the first value
|
||||
# only input key and secret
|
||||
newProfileName = _value
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100
|
||||
@@ -137,9 +137,9 @@
|
||||
values.append(self.args[index])
|
||||
index = index + 1
|
||||
keyValues[currentValue] = values
|
||||
- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
|
||||
+ if keystr in keyValues and keyValues[keystr].__len__() > 0:
|
||||
_key = keyValues[keystr][0]
|
||||
- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
|
||||
+ if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
|
||||
_secret = keyValues[secretstr][0]
|
||||
#print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
|
||||
return _key, _secret
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100
|
||||
@@ -19,8 +19,9 @@
|
||||
'''
|
||||
|
||||
import sys
|
||||
-reload(sys)
|
||||
-sys.setdefaultencoding('utf-8')
|
||||
+if sys.version_info[0] < 3:
|
||||
+ reload(sys)
|
||||
+ sys.setdefaultencoding('utf-8')
|
||||
__author__ = 'xixi.xxx'
|
||||
import aliyunCliMain
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100
|
||||
@@ -18,7 +18,7 @@
|
||||
'''
|
||||
|
||||
import aliyunCliConfiugre
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import re
|
||||
import os
|
||||
import platform
|
||||
@@ -151,7 +151,7 @@
|
||||
# this functino will get the latest version
|
||||
def _getLatestTimeFromServer(self):
|
||||
try:
|
||||
- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||
+ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5)
|
||||
s = f.read()
|
||||
return s
|
||||
except Exception as e:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100
|
||||
@@ -26,7 +26,7 @@
|
||||
import aliyunSdkConfigure
|
||||
import json
|
||||
import cliError
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import handleEndPoint
|
||||
|
||||
from __init__ import __version__
|
||||
@@ -259,7 +259,7 @@
|
||||
def changeEndPoint(self, classname, keyValues):
|
||||
endpoint = "Endpoint"
|
||||
try:
|
||||
- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0:
|
||||
+ if endpoint in keyValues and keyValues[endpoint].__len__() > 0:
|
||||
classname._RestApi__domain = keyValues[endpoint][0]
|
||||
except Exception as e:
|
||||
pass
|
||||
@@ -444,10 +444,10 @@
|
||||
|
||||
def getTempVersion(self,keyValues):
|
||||
key='--version'
|
||||
- if keyValues is not None and keyValues.has_key(key):
|
||||
+ if keyValues is not None and key in keyValues:
|
||||
return keyValues.get(key)
|
||||
key = 'version'
|
||||
- if keyValues is not None and keyValues.has_key(key):
|
||||
+ if keyValues is not None and key in keyValues:
|
||||
return keyValues.get(key)
|
||||
|
||||
def getVersionFromFile(self,cmd):
|
||||
@@ -513,7 +513,7 @@
|
||||
self.checkForServer(response,cmd,operation)
|
||||
def getRequestId(self,response):
|
||||
try:
|
||||
- if response.has_key('RequestId') and len(response['RequestId']) > 0:
|
||||
+ if 'RequestId' in response and len(response['RequestId']) > 0:
|
||||
requestId = response['RequestId']
|
||||
return requestId
|
||||
except Exception:
|
||||
@@ -532,7 +532,7 @@
|
||||
ua = ""
|
||||
url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation
|
||||
try:
|
||||
- f = urllib2.urlopen(url,data=None,timeout=5)
|
||||
+ f = urllib3.urlopen(url,data=None,timeout=5)
|
||||
s = f.read()
|
||||
return s
|
||||
except Exception :
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100
|
||||
@@ -39,7 +39,7 @@
|
||||
|
||||
def sdkConfigure(self,cmd,operation):
|
||||
keyValues = self.parser._getKeyValues()
|
||||
- if keyValues.has_key('--version') and len(keyValues['--version']) > 0:
|
||||
+ if '--version' in keyValues and len(keyValues['--version']) > 0:
|
||||
version=keyValues['--version'][0]
|
||||
filename=self.fileName
|
||||
self.writeCmdVersionToFile(cmd,version,filename)
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100
|
||||
@@ -23,6 +23,8 @@
|
||||
import aliyunCliParser
|
||||
import platform
|
||||
|
||||
+if sys.version_info[0] > 2:
|
||||
+ raw_input = input
|
||||
|
||||
OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
|
||||
OSS_CONFIG_SECTION = 'OSSCredentials'
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100
|
||||
@@ -19,7 +19,7 @@
|
||||
#/usr/bin/env python
|
||||
#!-*- coding:utf-8 -*-
|
||||
import os
|
||||
-import urllib2
|
||||
+import urllib3
|
||||
import cliError
|
||||
|
||||
|
||||
@@ -64,9 +64,9 @@
|
||||
print(e)
|
||||
def _getParamFromUrl(prefix,value,mode):
|
||||
|
||||
- req = urllib2.Request(value)
|
||||
+ req = urllib3.Request(value)
|
||||
try:
|
||||
- response=urllib2.urlopen(req)
|
||||
+ response=urllib3.urlopen(req)
|
||||
if response.getcode() == 200:
|
||||
return response.read()
|
||||
else:
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100
|
||||
@@ -340,8 +340,8 @@
|
||||
|
||||
|
||||
_urllib_error_moved_attributes = [
|
||||
- MovedAttribute("URLError", "urllib2", "urllib.error"),
|
||||
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
|
||||
+ MovedAttribute("URLError", "urllib3", "urllib.error"),
|
||||
+ MovedAttribute("HTTPError", "urllib3", "urllib.error"),
|
||||
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
|
||||
]
|
||||
for attr in _urllib_error_moved_attributes:
|
||||
@@ -359,34 +359,34 @@
|
||||
|
||||
|
||||
_urllib_request_moved_attributes = [
|
||||
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
|
||||
+ MovedAttribute("urlopen", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("install_opener", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("build_opener", "urllib3", "urllib.request"),
|
||||
MovedAttribute("pathname2url", "urllib", "urllib.request"),
|
||||
MovedAttribute("url2pathname", "urllib", "urllib.request"),
|
||||
MovedAttribute("getproxies", "urllib", "urllib.request"),
|
||||
- MovedAttribute("Request", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
|
||||
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
|
||||
+ MovedAttribute("Request", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("BaseHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("FileHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("FTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"),
|
||||
+ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"),
|
||||
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
|
||||
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
|
||||
MovedAttribute("URLopener", "urllib", "urllib.request"),
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py
|
||||
--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
install_requires = [
|
||||
'colorama>=0.2.5,<=0.3.3',
|
||||
- 'jmespath>=0.7.0,<=0.7.1',
|
||||
+ 'jmespath>=0.7.0',
|
||||
]
|
||||
def main():
|
||||
setup(
|
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
14
SOURCES/bz1691456-gcloud-dont-detect-python2.patch
Normal file
@ -0,0 +1,14 @@
|
||||
--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 1980-01-01 09:00:00.000000000 +0100
|
||||
+++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2021-10-14 11:30:17.726138166 +0200
|
||||
@@ -128,6 +128,11 @@
|
||||
fi
|
||||
}
|
||||
|
||||
+if [ -z "$CLOUDSDK_PYTHON" ]; then
|
||||
+ CLOUDSDK_PYTHON="/usr/libexec/platform-python"
|
||||
+ CLOUDSDK_PYTHON_SITEPACKAGES=1
|
||||
+fi
|
||||
+
|
||||
setup_cloudsdk_python
|
||||
|
||||
# $PYTHONHOME can interfere with gcloud. Users should use
|
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
52
SOURCES/bz1935422-python-pygments-fix-CVE-2021-20270.patch
Normal file
@ -0,0 +1,52 @@
|
||||
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
|
||||
From: Georg Brandl <georg@python.org>
|
||||
Date: Thu, 10 Dec 2020 08:19:21 +0100
|
||||
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
|
||||
|
||||
Reason was a lookahead-only pattern which was included in the state
|
||||
where the lookahead was transitioning to.
|
||||
---
|
||||
pygments/lexers/ml.py | 12 ++++++------
|
||||
2 files changed, 14 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
|
||||
index 8ca8ce3eb..f2ac367c5 100644
|
||||
--- a/pygments/lexers/ml.py
|
||||
+++ b/pygments/lexers/ml.py
|
||||
@@ -142,7 +142,7 @@ def id_callback(self, match):
|
||||
(r'#\s+(%s)' % symbolicid_re, Name.Label),
|
||||
# Some reserved words trigger a special, local lexer state change
|
||||
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
|
||||
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
|
||||
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
|
||||
(r'\b(functor|include|open|signature|structure)\b(?!\')',
|
||||
Keyword.Reserved, 'sname'),
|
||||
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
|
||||
@@ -315,15 +315,14 @@ def id_callback(self, match):
|
||||
'ename': [
|
||||
include('whitespace'),
|
||||
|
||||
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
|
||||
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
|
||||
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
|
||||
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
|
||||
bygroups(Keyword.Reserved, Text, Name.Class)),
|
||||
(r'\b(of)\b(?!\')', Keyword.Reserved),
|
||||
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
|
||||
|
||||
- include('breakout'),
|
||||
- include('core'),
|
||||
- (r'\S+', Error),
|
||||
+ default('#pop'),
|
||||
],
|
||||
|
||||
'datcon': [
|
||||
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
|
||||
],
|
||||
}
|
||||
|
||||
+
|
||||
class OpaLexer(RegexLexer):
|
||||
"""
|
||||
Lexer for the Opa language (http://opalang.org).
|
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
138
SOURCES/bz1943464-python-pygments-fix-CVE-2021-27291.patch
Normal file
@ -0,0 +1,138 @@
|
||||
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
|
||||
From: Georg Brandl <georg@python.org>
|
||||
Date: Mon, 11 Jan 2021 09:46:34 +0100
|
||||
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
|
||||
Caller/Doyensec
|
||||
|
||||
---
|
||||
pygments/lexers/archetype.py | 2 +-
|
||||
pygments/lexers/factor.py | 4 ++--
|
||||
pygments/lexers/jvm.py | 1 -
|
||||
pygments/lexers/matlab.py | 6 +++---
|
||||
pygments/lexers/objective.py | 4 ++--
|
||||
pygments/lexers/templates.py | 2 +-
|
||||
pygments/lexers/varnish.py | 2 +-
|
||||
8 files changed, 14 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
|
||||
index 65046613d..26f5ea8c9 100644
|
||||
--- a/pygments/lexers/archetype.py
|
||||
+++ b/pygments/lexers/archetype.py
|
||||
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
|
||||
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
|
||||
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
|
||||
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
|
||||
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
|
||||
+ (r'[+-]?\d*\.\d+%?', Number.Float),
|
||||
(r'0x[0-9a-fA-F]+', Number.Hex),
|
||||
(r'[+-]?\d+%?', Number.Integer),
|
||||
],
|
||||
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
|
||||
index be7b30dff..9200547f9 100644
|
||||
--- a/pygments/lexers/factor.py
|
||||
+++ b/pygments/lexers/factor.py
|
||||
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
|
||||
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
|
||||
|
||||
# strings
|
||||
- (r'"""\s+(?:.|\n)*?\s+"""', String),
|
||||
+ (r'"""\s(?:.|\n)*?\s"""', String),
|
||||
(r'"(?:\\\\|\\"|[^"])*"', String),
|
||||
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
|
||||
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
|
||||
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
|
||||
'slots': [
|
||||
(r'\s+', Text),
|
||||
(r';\s', Keyword, '#pop'),
|
||||
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
|
||||
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
|
||||
bygroups(Text, Name.Variable, Text)),
|
||||
(r'\S+', Name.Variable),
|
||||
],
|
||||
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
|
||||
index 62dfd45e5..9a9397c2d 100644
|
||||
--- a/pygments/lexers/jvm.py
|
||||
+++ b/pygments/lexers/jvm.py
|
||||
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
|
||||
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
|
||||
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
|
||||
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
|
||||
- (r'".*``.*``.*"', String.Interpol),
|
||||
(r'(\.)([a-z_]\w*)',
|
||||
bygroups(Operator, Name.Attribute)),
|
||||
(r'[a-zA-Z_]\w*:', Name.Label),
|
||||
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
|
||||
index 4823c6a7e..578848623 100644
|
||||
--- a/pygments/lexers/matlab.py
|
||||
+++ b/pygments/lexers/matlab.py
|
||||
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
|
||||
(r'.', Comment.Multiline),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
|
||||
(r"[^']*'", String, '#pop'),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
|
||||
(r'.', String, '#pop'),
|
||||
],
|
||||
'deffunc': [
|
||||
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
|
||||
bygroups(Whitespace, Text, Whitespace, Punctuation,
|
||||
Whitespace, Name.Function, Punctuation, Text,
|
||||
Punctuation, Whitespace), '#pop'),
|
||||
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
|
||||
index 34e4062f6..38ac9bb05 100644
|
||||
--- a/pygments/lexers/objective.py
|
||||
+++ b/pygments/lexers/objective.py
|
||||
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
|
||||
'logos_classname'),
|
||||
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
|
||||
bygroups(Keyword, Text, Name.Class)),
|
||||
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
|
||||
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
|
||||
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
|
||||
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
|
||||
'function'),
|
||||
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
|
||||
+ (r'(%new)(\s*)(\()(.*?)(\))',
|
||||
bygroups(Keyword, Text, Keyword, String, Keyword)),
|
||||
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
|
||||
inherit,
|
||||
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
|
||||
index 33c06c4c4..5c3346b4c 100644
|
||||
--- a/pygments/lexers/templates.py
|
||||
+++ b/pygments/lexers/templates.py
|
||||
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
|
||||
# see doc for handling first name arg: /directives/evoque/
|
||||
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
|
||||
# should be using(PythonLexer), not passed out as String
|
||||
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
|
||||
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
|
||||
r'(.*?)((?(4)%)\})',
|
||||
bygroups(Punctuation, Name.Builtin, Punctuation, None,
|
||||
String, using(PythonLexer), Punctuation)),
|
||||
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
|
||||
index 23653f7a1..9d358bd7c 100644
|
||||
--- a/pygments/lexers/varnish.py
|
||||
+++ b/pygments/lexers/varnish.py
|
||||
@@ -61,7 +61,7 @@ def analyse_text(text):
|
||||
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
|
||||
(r'(\.probe)(\s*=\s*)(\{)',
|
||||
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
|
||||
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
|
||||
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
|
||||
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
|
||||
(r'\{', Punctuation, '#push'),
|
||||
(r'\}', Punctuation, '#pop'),
|
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
24
SOURCES/bz1992661-mysql-use-ssl-mode.patch
Normal file
@ -0,0 +1,24 @@
|
||||
From ed5bc606a4db5108995df9297698cf9dc14cccb2 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 18 Jan 2022 11:32:05 +0100
|
||||
Subject: [PATCH] mysql-common: fix local SSL connection by using
|
||||
--ssl-mode=REQUIRED which is available on 5.7+ (--ssl is not available in
|
||||
8.0)
|
||||
|
||||
---
|
||||
heartbeat/mysql-common.sh | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
|
||||
index 459948b10..de8763544 100755
|
||||
--- a/heartbeat/mysql-common.sh
|
||||
+++ b/heartbeat/mysql-common.sh
|
||||
@@ -97,7 +97,7 @@ MYSQL_BINDIR=`dirname ${OCF_RESKEY_binary}`
|
||||
|
||||
MYSQL=$OCF_RESKEY_client_binary
|
||||
if ocf_is_true "$OCF_RESKEY_replication_require_ssl"; then
|
||||
- MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl"
|
||||
+ MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl-mode=REQUIRED"
|
||||
else
|
||||
MYSQL_OPTIONS_LOCAL_SSL_OPTIONS=""
|
||||
fi
|
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
23
SOURCES/bz1995178-storage-mon-fix-typo.patch
Normal file
@ -0,0 +1,23 @@
|
||||
From 09cde6531a87fd6a04568eaae94d5c489f36a8b6 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 6 Sep 2021 15:07:41 +0200
|
||||
Subject: [PATCH] storage-mon: update metadata to suggest usage in combination
|
||||
with HealthSMART agent
|
||||
|
||||
---
|
||||
heartbeat/storage-mon.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
|
||||
index 5b289fe55..875095670 100644
|
||||
--- a/heartbeat/storage-mon.in
|
||||
+++ b/heartbeat/storage-mon.in
|
||||
@@ -75,7 +75,7 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
System health agent that checks the storage I/O status of the given drives and
|
||||
updates the #health-storage attribute. Usage is highly recommended in combination
|
||||
-with storage-mon monitoring agent. The agent currently support a maximum of 25
|
||||
+with the HealthSMART monitoring agent. The agent currently support a maximum of 25
|
||||
devices per instance.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">storage I/O health status</shortdesc>
|
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
2016
SOURCES/bz2003117-all-agents-set-correct-agent-ocf-version.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,64 @@
|
||||
From fcd2565602146c0b9317d159cecb8935e304c7ce Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 30 Sep 2021 10:23:17 +0200
|
||||
Subject: [PATCH] gcp-pd-move/gcp-vpc-move-route: dont fail failed resources
|
||||
instantly (caused by OCF_ERR_CONFIGURED)
|
||||
|
||||
---
|
||||
heartbeat/gcp-pd-move.in | 4 ++--
|
||||
heartbeat/gcp-vpc-move-route.in | 6 +++---
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||
index e99cc71f88..cbe703c3c5 100644
|
||||
--- a/heartbeat/gcp-pd-move.in
|
||||
+++ b/heartbeat/gcp-pd-move.in
|
||||
@@ -157,7 +157,7 @@ def populate_vars():
|
||||
CONN = googleapiclient.discovery.build('compute', 'v1')
|
||||
except Exception as e:
|
||||
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
|
||||
for param in PARAMETERS:
|
||||
value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param])
|
||||
@@ -172,7 +172,7 @@ def populate_vars():
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
'Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
|
||||
- sys.exit(ocf.OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(ocf.OCF_ERR_GENERIC)
|
||||
|
||||
PROJECT = get_metadata('project/project-id')
|
||||
if PARAMETERS['disk_scope'] in ['detect', 'regional']:
|
||||
diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||
index dac6e4ea8c..6b240c04d0 100644
|
||||
--- a/heartbeat/gcp-vpc-move-route.in
|
||||
+++ b/heartbeat/gcp-vpc-move-route.in
|
||||
@@ -243,7 +243,7 @@ def validate(ctx):
|
||||
ctx.conn = googleapiclient.discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False)
|
||||
except Exception as e:
|
||||
logger.error('Couldn\'t connect with google api: ' + str(e))
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
|
||||
ctx.ip = os.environ.get('OCF_RESKEY_ip')
|
||||
if not ctx.ip:
|
||||
@@ -258,7 +258,7 @@ def validate(ctx):
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
'Instance information not found. Is this a GCE instance ?: %s', str(e))
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
|
||||
ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
|
||||
GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
|
||||
@@ -273,7 +273,7 @@ def validate(ctx):
|
||||
idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
|
||||
if not idxs:
|
||||
logger.error('Network interface not found')
|
||||
- sys.exit(OCF_ERR_CONFIGURED)
|
||||
+ sys.exit(OCF_ERR_GENERIC)
|
||||
ctx.iface_idx = idxs[0]
|
||||
|
||||
|
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
366
SOURCES/bz2014415-nfsserver-add-nfs_server_scope-parameter.patch
Normal file
@ -0,0 +1,366 @@
|
||||
From 764dacb6195f8940f13b9c322b1bc8189c5619fc Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Mon, 6 Sep 2021 12:13:42 +0200
|
||||
Subject: [PATCH 1/6] Fix NFSv4 lock failover: set NFS Server Scope
|
||||
|
||||
Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||
RFC8881, 8.4.2.1 State Reclaim:
|
||||
|
||||
| If the server scope is different, the client should not attempt to
|
||||
| reclaim locks. In this situation, no lock reclaim is possible.
|
||||
| Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
| problematic since there is no guarantee that the existing
|
||||
| filehandles will be recognized by the new server, or that if
|
||||
| recognized, they denote the same objects. It is best to treat the
|
||||
| locks as having been revoked by the reconfiguration event.
|
||||
|
||||
That's why for lock reclaim to even be attempted, we have to define and set
|
||||
the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||
cluster. And in linux, that is done by setting the uts nodename for the
|
||||
command that starts the nfsd kernel threads.
|
||||
|
||||
For "init scripts", just set it directly using unshare --uts.
|
||||
For systemd units, add NFS_SERVER_SCOPE to some environment files
|
||||
and inject the "unshare --uts" into the ExecStart command lines
|
||||
using override drop-in files.
|
||||
---
|
||||
heartbeat/nfsserver | 120 +++++++++++++++++++++++++++++++++++++++++++-
|
||||
1 file changed, 119 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 96b19abe36..0888378645 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -5,6 +5,18 @@
|
||||
# by hxinwei@gmail.com
|
||||
# License: GNU General Public License v2 (GPLv2) and later
|
||||
|
||||
+
|
||||
+# I don't know for certain whether all services actuall _need_ this,
|
||||
+# I know that at least nfs-server needs it.
|
||||
+# The rgmanager resource agent in rgmanager/src/resources/nfsserver.sh.in
|
||||
+# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||
+# Let's start with just the nfs-server, and add others if/when we have clear
|
||||
+# indication they need it.
|
||||
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||
+NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||
+SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||
+SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
||||
+
|
||||
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||
. $OCF_DEBUG_LIBRARY
|
||||
else
|
||||
@@ -99,6 +111,31 @@ Specifies the length of sm-notify retry time (minutes).
|
||||
<content type="integer" default="" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="nfs_server_scope" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+RFC8881, 8.4.2.1 State Reclaim:
|
||||
+
|
||||
+If the server scope is different, the client should not attempt to
|
||||
+reclaim locks. In this situation, no lock reclaim is possible.
|
||||
+Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
+problematic since there is no guarantee that the existing
|
||||
+filehandles will be recognized by the new server, or that if
|
||||
+recognized, they denote the same objects. It is best to treat the
|
||||
+locks as having been revoked by the reconfiguration event.
|
||||
+
|
||||
+For lock reclaim to even be attempted, we have to define and set the same
|
||||
+server scope for NFSD on all cluster nodes in the NFS failover cluster.
|
||||
+
|
||||
+This agent won't "guess" a suitable server scope name for you, you need to
|
||||
+explicitly specify this. But without it, NFSv4 lock reclaim after failover
|
||||
+won't work properly. Suggested value: the failover "service IP".
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">
|
||||
+RFC8881 NFS server scope for (lock) state reclaim after failover.
|
||||
+</shortdesc>
|
||||
+<content type="string"/>
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="nfs_ip" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
Comma separated list of floating IP addresses used to access the nfs service
|
||||
@@ -269,7 +306,11 @@ nfs_exec()
|
||||
set_exec_mode
|
||||
|
||||
case $EXEC_MODE in
|
||||
- 1) ${OCF_RESKEY_nfs_init_script} $cmd;;
|
||||
+ 1) if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
+ ${OCF_RESKEY_nfs_init_script} $cmd
|
||||
+ else
|
||||
+ unshare -u /bin/sh -c "hostname ${OCF_RESKEY_nfs_server_scope}; exec ${OCF_RESKEY_nfs_init_script} $cmd"
|
||||
+ fi ;;
|
||||
2) if ! echo $svc | grep -q "\."; then
|
||||
svc="${svc}.service"
|
||||
fi
|
||||
@@ -623,6 +664,74 @@ notify_locks()
|
||||
fi
|
||||
}
|
||||
|
||||
+# Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
|
||||
+# RFC8881, 8.4.2.1 State Reclaim:
|
||||
+#
|
||||
+# | If the server scope is different, the client should not attempt to
|
||||
+# | reclaim locks. In this situation, no lock reclaim is possible.
|
||||
+# | Any attempt to re-obtain the locks with non-reclaim operations is
|
||||
+# | problematic since there is no guarantee that the existing
|
||||
+# | filehandles will be recognized by the new server, or that if
|
||||
+# | recognized, they denote the same objects. It is best to treat the
|
||||
+# | locks as having been revoked by the reconfiguration event.
|
||||
+#
|
||||
+# That's why for lock reclaim to even be attempted, we have to define and set
|
||||
+# the same server scope for NFSD on all cluster nodes in the NFS failover
|
||||
+# cluster. And in linux, that is done by setting the uts nodename for the
|
||||
+# command that starts the nfsd kernel threads.
|
||||
+#
|
||||
+inject_unshare_uts_name_into_systemd_units ()
|
||||
+{
|
||||
+ local END_TAG="# END OF DROP-IN FOR NFS SERVER SCOPE"
|
||||
+ local services
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
+
|
||||
+ local svc dir dropin edited_exec_start do_reload=false
|
||||
+ for svc in $services ; do
|
||||
+ dir=/run/systemd/system/$svc.d
|
||||
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
+ grep -sqF "$END_TAG" "$dropin" && continue
|
||||
+
|
||||
+ test -d "$dir" || mkdir -p "$dir"
|
||||
+ test -e "$dropin" && rm -f "$dropin"
|
||||
+
|
||||
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||
+ cat > "$dropin" <<___
|
||||
+[Service]
|
||||
+EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||
+# reset list of exec start, then re-populate with unshared uts namespace
|
||||
+ExecStart=
|
||||
+$edited_exec_start
|
||||
+$END_TAG
|
||||
+___
|
||||
+ do_reload=true
|
||||
+ ocf_log debug "injected unshare --uts into $dropin"
|
||||
+ done
|
||||
+
|
||||
+ mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||
+ echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||
+
|
||||
+ $do_reload && systemctl daemon-reload
|
||||
+}
|
||||
+
|
||||
+remove_unshare_uts_dropins ()
|
||||
+{
|
||||
+ local services
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||
+
|
||||
+ local svc dir dropin do_reload=false
|
||||
+ for svc in $services ; do
|
||||
+ dir=/run/systemd/system/$svc.d
|
||||
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
+ test -e "$dropin" || continue
|
||||
+ rm -f "$dropin"
|
||||
+ do_reload=true
|
||||
+ ocf_log debug "removed unshare --uts from $svc"
|
||||
+ done
|
||||
+ rm -f "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE}"
|
||||
+ $do_reload && systemctl daemon-reload
|
||||
+}
|
||||
+
|
||||
nfsserver_start ()
|
||||
{
|
||||
local rc;
|
||||
@@ -636,6 +745,13 @@ nfsserver_start ()
|
||||
is_redhat_based && set_env_args
|
||||
bind_tree
|
||||
prepare_directory
|
||||
+ case $EXEC_MODE in [23])
|
||||
+ if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
+ remove_unshare_uts_dropins
|
||||
+ else
|
||||
+ inject_unshare_uts_name_into_systemd_units
|
||||
+ fi ;;
|
||||
+ esac
|
||||
|
||||
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||
@@ -854,6 +970,8 @@ nfsserver_stop ()
|
||||
ocf_log info "NFS server stopped"
|
||||
fi
|
||||
|
||||
+ case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||
+
|
||||
return $rc
|
||||
}
|
||||
|
||||
|
||||
From 515697b53c1614d05d39491c9af83e8d8b844b17 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 12:01:41 +0200
|
||||
Subject: [PATCH 2/6] Fix NFSv4 lock failover: set NFS Server Scope, regardless
|
||||
of EXEC_MODE
|
||||
|
||||
Debian (and other systems) may provide "init scripts",
|
||||
which will only redirect back to systemd.
|
||||
|
||||
If we just unshare --uts the init script invocation,
|
||||
the uts namespace is useless in that case.
|
||||
|
||||
If systemd is running, mangle the nfs-server.service unit,
|
||||
independent of the "EXEC_MODE".
|
||||
---
|
||||
heartbeat/nfsserver | 18 ++++++++++++++----
|
||||
1 file changed, 14 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 0888378645..054aabbaf6 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -745,13 +745,20 @@ nfsserver_start ()
|
||||
is_redhat_based && set_env_args
|
||||
bind_tree
|
||||
prepare_directory
|
||||
- case $EXEC_MODE in [23])
|
||||
+
|
||||
+ # Debian (and other systems) may provide "init scripts",
|
||||
+ # which will only redirect back to systemd.
|
||||
+ # If we just unshare --uts the init script invocation,
|
||||
+ # the uts namespace is useless in that case.
|
||||
+ # If systemd is running, mangle the nfs-server.service unit,
|
||||
+ # independent of the "EXEC_MODE" we detected.
|
||||
+ if $systemd_is_running ; then
|
||||
if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
|
||||
remove_unshare_uts_dropins
|
||||
else
|
||||
inject_unshare_uts_name_into_systemd_units
|
||||
- fi ;;
|
||||
- esac
|
||||
+ fi
|
||||
+ fi
|
||||
|
||||
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
|
||||
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
|
||||
@@ -970,7 +977,9 @@ nfsserver_stop ()
|
||||
ocf_log info "NFS server stopped"
|
||||
fi
|
||||
|
||||
- case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
|
||||
+ if $systemd_is_running; then
|
||||
+ remove_unshare_uts_dropins
|
||||
+ fi
|
||||
|
||||
return $rc
|
||||
}
|
||||
@@ -1008,6 +1017,7 @@ nfsserver_validate ()
|
||||
}
|
||||
|
||||
nfsserver_validate
|
||||
+systemd_is_running && systemd_is_running=true || systemd_is_running=false
|
||||
|
||||
case $__OCF_ACTION in
|
||||
start) nfsserver_start
|
||||
|
||||
From e83c20d88f404f9f9d829c654883d60eb6cc9ff3 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:06:18 +0200
|
||||
Subject: [PATCH 3/6] Fix NFSv4 lock failover: add missing "|cut -f1" in
|
||||
remove_unshare_uts_dropins
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 054aabbaf6..d3db89a537 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -717,7 +717,7 @@ ___
|
||||
remove_unshare_uts_dropins ()
|
||||
{
|
||||
local services
|
||||
- services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
|
||||
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
|
||||
local svc dir dropin do_reload=false
|
||||
for svc in $services ; do
|
||||
|
||||
From b5b0e4a0b60d285af576b2d8ecfbe95e5a177a87 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:07:13 +0200
|
||||
Subject: [PATCH 4/6] Fix NFSv4 lock failover: get rid of "world-inaccessible"
|
||||
warning
|
||||
|
||||
by temporarily changing the umask before generating the dropins
|
||||
---
|
||||
heartbeat/nfsserver | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index d3db89a537..447e0302b2 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -687,6 +687,8 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||
services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
|
||||
|
||||
local svc dir dropin edited_exec_start do_reload=false
|
||||
+ local old_umask=$(umask)
|
||||
+ umask 0022
|
||||
for svc in $services ; do
|
||||
dir=/run/systemd/system/$svc.d
|
||||
dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
|
||||
@@ -710,6 +712,7 @@ ___
|
||||
|
||||
mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
|
||||
echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
|
||||
+ umask $old_umask
|
||||
|
||||
$do_reload && systemctl daemon-reload
|
||||
}
|
||||
|
||||
From 3c6c91ce5a00eeef9cd766389d73a0b42580a1e6 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:08:09 +0200
|
||||
Subject: [PATCH 5/6] Fix NFSv4 lock failover: deal with "special executable
|
||||
prefix" chars in ExecStart
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 447e0302b2..5326bd2c6e 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -697,7 +697,7 @@ inject_unshare_uts_name_into_systemd_units ()
|
||||
test -d "$dir" || mkdir -p "$dir"
|
||||
test -e "$dropin" && rm -f "$dropin"
|
||||
|
||||
- edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
|
||||
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\([-+:!@]*\\)\\(.*\\)#ExecStart=\\1/usr/bin/unshare --uts /bin/sh -c 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\2#p")
|
||||
cat > "$dropin" <<___
|
||||
[Service]
|
||||
EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
|
||||
|
||||
From 512fbaf61e6d24a1236ef50e323ea17a62485c36 Mon Sep 17 00:00:00 2001
|
||||
From: Lars Ellenberg <lars.ellenberg@linbit.com>
|
||||
Date: Fri, 8 Oct 2021 17:08:59 +0200
|
||||
Subject: [PATCH 6/6] Fix NFSv4 lock failover: add rpc-statd-notify to the
|
||||
comment list of potentially interesting services
|
||||
|
||||
---
|
||||
heartbeat/nfsserver | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
|
||||
index 5326bd2c6e..240dd1a76c 100755
|
||||
--- a/heartbeat/nfsserver
|
||||
+++ b/heartbeat/nfsserver
|
||||
@@ -12,7 +12,7 @@
|
||||
# did the unshare for gssd and idmapd as well, even though it seems unclear why.
|
||||
# Let's start with just the nfs-server, and add others if/when we have clear
|
||||
# indication they need it.
|
||||
-#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
|
||||
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpc-statd-notify.service rpcbind.service"
|
||||
NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
|
||||
SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
|
||||
SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
|
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
11
SOURCES/bz2015789-gcp-ilb-3-use-bundled-gcloud.patch
Normal file
@ -0,0 +1,11 @@
|
||||
--- a/heartbeat/gcp-ilb 2021-11-09 14:13:20.311243373 +0100
|
||||
+++ b/heartbeat/gcp-ilb 2021-11-09 14:13:50.269329165 +0100
|
||||
@@ -28,7 +28,7 @@
|
||||
OCF_RESKEY_cat_default="socat"
|
||||
OCF_RESKEY_port_default="60000"
|
||||
OCF_RESKEY_log_enable_default="false"
|
||||
-OCF_RESKEY_log_cmd_default="gcloud"
|
||||
+OCF_RESKEY_log_cmd_default="gcloud-ra"
|
||||
OCF_RESKEY_log_params_default="logging write GCPILB"
|
||||
OCF_RESKEY_log_end_params_default=""
|
||||
|
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
22
SOURCES/bz2027591-nfsnotify-fix-notify_args-default.patch
Normal file
@ -0,0 +1,22 @@
|
||||
From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 26 Aug 2021 12:27:50 +0200
|
||||
Subject: [PATCH] nfsnotify: fix default value for "notify_args"
|
||||
|
||||
---
|
||||
heartbeat/nfsnotify.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in
|
||||
index 851f6ad6b4..fe6d2793ba 100644
|
||||
--- a/heartbeat/nfsnotify.in
|
||||
+++ b/heartbeat/nfsnotify.in
|
||||
@@ -33,7 +33,7 @@
|
||||
# Parameter defaults
|
||||
|
||||
OCF_RESKEY_source_host_default=""
|
||||
-OCF_RESKEY_notify_args_default="false"
|
||||
+OCF_RESKEY_notify_args_default=""
|
||||
|
||||
: ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}}
|
||||
: ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}}
|
@ -1,22 +0,0 @@
|
||||
From c6338011cf9ea69324f44c8c31a4ca2478aab35a Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 7 Dec 2021 08:59:50 +0100
|
||||
Subject: [PATCH] podman: remove anonymous volumes
|
||||
|
||||
---
|
||||
heartbeat/podman | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/heartbeat/podman b/heartbeat/podman
|
||||
index fd901c968..2b73857f1 100755
|
||||
--- a/heartbeat/podman
|
||||
+++ b/heartbeat/podman
|
||||
@@ -251,7 +251,7 @@ remove_container()
|
||||
return 0
|
||||
fi
|
||||
ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
|
||||
- ocf_run podman rm $CONTAINER
|
||||
+ ocf_run podman rm -v $CONTAINER
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
# due to a podman bug (rhbz#1841485), sometimes a stopped
|
@ -1,543 +0,0 @@
|
||||
From 3e469239e8c853725b28a9c6b509152aacc2c5cc Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Mon, 13 Jun 2022 11:24:05 +0200
|
||||
Subject: [PATCH 1/2] all agents: update to promotable terms
|
||||
|
||||
---
|
||||
heartbeat/SAPInstance | 22 +++++++++++-----------
|
||||
heartbeat/conntrackd.in | 6 +++---
|
||||
heartbeat/db2 | 12 ++++++------
|
||||
heartbeat/dnsupdate.in | 2 +-
|
||||
heartbeat/galera.in | 26 +++++++++++++-------------
|
||||
heartbeat/iface-bridge | 6 +++---
|
||||
heartbeat/mariadb.in | 30 +++++++++++++++---------------
|
||||
heartbeat/mpathpersist.in | 24 ++++++++++++------------
|
||||
heartbeat/mysql | 4 ++--
|
||||
heartbeat/mysql-proxy | 2 +-
|
||||
heartbeat/pgsql | 2 +-
|
||||
heartbeat/redis.in | 4 ++--
|
||||
heartbeat/sg_persist.in | 4 ++--
|
||||
14 files changed, 74 insertions(+), 74 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance
|
||||
index 016f59aff..e3fe788ae 100755
|
||||
--- a/heartbeat/SAPInstance
|
||||
+++ b/heartbeat/SAPInstance
|
||||
@@ -25,8 +25,8 @@
|
||||
# OCF_RESKEY_AUTOMATIC_RECOVER (optional, automatic startup recovery using cleanipc, default is false)
|
||||
# OCF_RESKEY_MONITOR_SERVICES (optional, default is to monitor critical services only)
|
||||
# OCF_RESKEY_SHUTDOWN_METHOD (optional, defaults to NORMAL, KILL: terminate the SAP instance with OS commands - faster, at your own risk)
|
||||
-# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Master/Slave configuration)
|
||||
-# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Master/Slave configuration)
|
||||
+# OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Promotable configuration)
|
||||
+# OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Promotable configuration)
|
||||
# OCF_RESKEY_PRE_START_USEREXIT (optional, lists a script which can be executed before the resource is started)
|
||||
# OCF_RESKEY_POST_START_USEREXIT (optional, lists a script which can be executed after the resource is started)
|
||||
# OCF_RESKEY_PRE_STOP_USEREXIT (optional, lists a script which can be executed before the resource is stopped)
|
||||
@@ -92,11 +92,11 @@ sapinstance_usage() {
|
||||
|
||||
$0 manages a SAP Instance as an HA resource.
|
||||
|
||||
- The 'start' operation starts the instance or the ERS instance in a Master/Slave configuration
|
||||
+ The 'start' operation starts the instance or the ERS instance in a Promotable configuration
|
||||
The 'stop' operation stops the instance
|
||||
The 'status' operation reports whether the instance is running
|
||||
The 'monitor' operation reports whether the instance seems to be working
|
||||
- The 'promote' operation starts the primary instance in a Master/Slave configuration
|
||||
+ The 'promote' operation starts the primary instance in a Promotable configuration
|
||||
The 'demote' operation stops the primary instance and starts the ERS instance
|
||||
The 'reload' operation allows changed parameters (non-unique only) without restarting the service
|
||||
The 'notify' operation always returns SUCCESS
|
||||
@@ -201,11 +201,11 @@ You may specify multiple services separated by a | (pipe) sign in this parameter
|
||||
<content type="string" default="${OCF_RESKEY_SHUTDOWN_METHOD_default}"/>
|
||||
</parameter>
|
||||
<parameter name="ERS_InstanceName" unique="1" required="0">
|
||||
- <longdesc lang="en">Only used in a Master/Slave resource configuration:
|
||||
+ <longdesc lang="en">Only used in a Promotable resource configuration:
|
||||
The full qualified SAP enqueue replication instance name. e.g. P01_ERS02_sapp01ers. Usually this is the name of the SAP instance profile.
|
||||
-The enqueue replication instance must be installed, before you want to configure a master-slave cluster resource.
|
||||
+The enqueue replication instance must be installed, before you want to configure a promotable cluster resource.
|
||||
|
||||
-The master-slave configuration in the cluster must use this properties:
|
||||
+The promotable configuration in the cluster must use this properties:
|
||||
clone_max = 2
|
||||
clone_node_max = 1
|
||||
master_node_max = 1
|
||||
@@ -215,7 +215,7 @@ master_max = 1
|
||||
<content type="string" default="${OCF_RESKEY_ERS_InstanceName_default}"/>
|
||||
</parameter>
|
||||
<parameter name="ERS_START_PROFILE" unique="1" required="0">
|
||||
- <longdesc lang="en">Only used in a Master/Slave resource configuration:
|
||||
+ <longdesc lang="en">Only used in a Promotable resource configuration:
|
||||
The parameter ERS_InstanceName must also be set in this configuration.
|
||||
The name of the SAP START profile. Specify this parameter, if you have changed the name of the SAP START profile after the default SAP installation. As SAP release 7.10 does not have a START profile anymore, you need to specify the Instance Profile than.
|
||||
</longdesc>
|
||||
@@ -243,7 +243,7 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
|
||||
<content type="string" default="${OCF_RESKEY_POST_STOP_USEREXIT_default}" />
|
||||
</parameter>
|
||||
<parameter name="IS_ERS" unique="0" required="0">
|
||||
- <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to
|
||||
+ <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a promotable resource to
|
||||
allow the ASCS to 'find' the ERS running on another cluster node after a resource failure. This parameter should be set
|
||||
to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also
|
||||
systems for NetWeaver less than 7.40, if you like to implement the NW-HA-CLU-740 scenario.
|
||||
@@ -266,8 +266,8 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
|
||||
<action name="stop" timeout="240s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="60s" interval="120s" />
|
||||
-<action name="monitor" depth="0" timeout="60s" interval="121s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="60s" interval="119s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="60s" interval="121s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="60s" interval="119s" role="Promoted" />
|
||||
<action name="promote" timeout="320s" />
|
||||
<action name="demote" timeout="320s" />
|
||||
<action name="reload" timeout="320s" />
|
||||
diff --git a/heartbeat/conntrackd.in b/heartbeat/conntrackd.in
|
||||
index f115250d6..1c2ee955b 100644
|
||||
--- a/heartbeat/conntrackd.in
|
||||
+++ b/heartbeat/conntrackd.in
|
||||
@@ -50,7 +50,7 @@ meta_data() {
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
-Master/Slave OCF Resource Agent for conntrackd
|
||||
+Promotable OCF Resource Agent for conntrackd
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">This resource agent manages conntrackd</shortdesc>
|
||||
@@ -81,8 +81,8 @@ For example "/packages/conntrackd-0.9.14/etc/conntrackd/conntrackd.conf"</longde
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" timeout="20s" interval="20s" role="Slave" />
|
||||
-<action name="monitor" timeout="20s" interval="10s" role="Master" />
|
||||
+<action name="monitor" timeout="20s" interval="20s" role="Unpromoted" />
|
||||
+<action name="monitor" timeout="20s" interval="10s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
diff --git a/heartbeat/db2 b/heartbeat/db2
|
||||
index 4a4b2f477..620b89583 100755
|
||||
--- a/heartbeat/db2
|
||||
+++ b/heartbeat/db2
|
||||
@@ -3,7 +3,7 @@
|
||||
# db2
|
||||
#
|
||||
# Resource agent that manages a DB2 LUW database in Standard role
|
||||
-# or HADR configuration in master/slave configuration.
|
||||
+# or HADR configuration in promotable configuration.
|
||||
# Multi partition is supported as well.
|
||||
#
|
||||
# Copyright (c) 2011 Holger Teutsch <holger.teutsch@web.de>
|
||||
@@ -61,7 +61,7 @@ cat <<END
|
||||
<resource-agent name="db2" version="1.0">
|
||||
<version>1.0</version>
|
||||
<longdesc lang="en">
|
||||
-Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in master/slave configuration. Multiple partitions are supported.
|
||||
+Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in promotable configuration. Multiple partitions are supported.
|
||||
|
||||
Standard mode:
|
||||
|
||||
@@ -71,8 +71,8 @@ Configure each partition as a separate primitive resource.
|
||||
HADR mode:
|
||||
|
||||
A single database in HADR configuration is made highly available by automating takeover operations.
|
||||
-Configure a master / slave resource with notifications enabled and an
|
||||
-additional monitoring operation with role "Master".
|
||||
+Configure a promotable resource with notifications enabled and an
|
||||
+additional monitoring operation with role "Promoted".
|
||||
|
||||
In case of HADR be very deliberate in specifying intervals/timeouts. The detection of a failure including promote must complete within HADR_PEER_WINDOW.
|
||||
|
||||
@@ -84,7 +84,7 @@ In addition to honoring requirements for crash recovery etc. for your specific d
|
||||
|
||||
For further information and examples consult http://www.linux-ha.org/wiki/db2_(resource_agent)
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as master/slave configuration. Multiple partitions are supported.</shortdesc>
|
||||
+<shortdesc lang="en">Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as promotable configuration. Multiple partitions are supported.</shortdesc>
|
||||
|
||||
<parameters>
|
||||
<parameter name="instance" unique="1" required="1">
|
||||
@@ -125,7 +125,7 @@ The number of the partition (DBPARTITIONNUM) to be managed.
|
||||
<action name="demote" timeout="120s"/>
|
||||
<action name="notify" timeout="10s"/>
|
||||
<action name="monitor" depth="0" timeout="60s" interval="20s"/>
|
||||
-<action name="monitor" depth="0" timeout="60s" role="Master" interval="22s"/>
|
||||
+<action name="monitor" depth="0" timeout="60s" role="Promoted" interval="22s"/>
|
||||
<action name="validate-all" timeout="5s"/>
|
||||
<action name="meta-data" timeout="5s"/>
|
||||
</actions>
|
||||
diff --git a/heartbeat/dnsupdate.in b/heartbeat/dnsupdate.in
|
||||
index 35b7c99bb..b54822cd8 100755
|
||||
--- a/heartbeat/dnsupdate.in
|
||||
+++ b/heartbeat/dnsupdate.in
|
||||
@@ -119,7 +119,7 @@ the exact syntax.
|
||||
<parameter name="server" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
Which DNS server to send these updates for. When no
|
||||
-server is provided, this defaults to the master server
|
||||
+server is provided, this defaults to the promoted server
|
||||
for the correct zone.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">DNS server to contact</shortdesc>
|
||||
diff --git a/heartbeat/galera.in b/heartbeat/galera.in
|
||||
index c363eb254..546b1a853 100755
|
||||
--- a/heartbeat/galera.in
|
||||
+++ b/heartbeat/galera.in
|
||||
@@ -26,31 +26,31 @@
|
||||
##
|
||||
# README.
|
||||
#
|
||||
-# This agent only supports being configured as a multistate Master
|
||||
+# This agent only supports being configured as a multistate Promoted
|
||||
# resource.
|
||||
#
|
||||
-# Slave vs Master role:
|
||||
+# Unpromoted vs Promoted role:
|
||||
#
|
||||
-# During the 'Slave' role, galera instances are in read-only mode and
|
||||
+# During the 'Unpromoted' role, galera instances are in read-only mode and
|
||||
# will not attempt to connect to the cluster. This role exists only as
|
||||
# a means to determine which galera instance is the most up-to-date. The
|
||||
# most up-to-date node will be used to bootstrap a galera cluster that
|
||||
# has no current members.
|
||||
#
|
||||
-# The galera instances will only begin to be promoted to the Master role
|
||||
+# The galera instances will only begin to be promoted to the Promoted role
|
||||
# once all the nodes in the 'wsrep_cluster_address' connection address
|
||||
# have entered read-only mode. At that point the node containing the
|
||||
-# database that is most current will be promoted to Master. Once the first
|
||||
-# Master instance bootstraps the galera cluster, the other nodes will be
|
||||
-# promoted to Master as well.
|
||||
+# database that is most current will be promoted to Promoted. Once the first
|
||||
+# Promoted instance bootstraps the galera cluster, the other nodes will be
|
||||
+# promoted to Promoted as well.
|
||||
#
|
||||
# Example: Create a galera cluster using nodes rhel7-node1 rhel7-node2 rhel7-node3
|
||||
#
|
||||
# pcs resource create db galera enable_creation=true \
|
||||
-# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta master-max=3 --master
|
||||
+# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta promoted-max=3 --promoted
|
||||
#
|
||||
# By setting the 'enable_creation' option, the database will be automatically
|
||||
-# generated at startup. The meta attribute 'master-max=3' means that all 3
|
||||
+# generated at startup. The meta attribute 'promoted-max=3' means that all 3
|
||||
# nodes listed in the wsrep_cluster_address list will be allowed to connect
|
||||
# to the galera cluster and perform replication.
|
||||
#
|
||||
@@ -114,8 +114,8 @@ The 'start' operation starts the database.
|
||||
The 'stop' operation stops the database.
|
||||
The 'status' operation reports whether the database is running
|
||||
The 'monitor' operation reports whether the database seems to be working
|
||||
-The 'promote' operation makes this mysql server run as master
|
||||
-The 'demote' operation makes this mysql server run as slave
|
||||
+The 'promote' operation makes this mysql server run as promoted
|
||||
+The 'demote' operation makes this mysql server run as unpromoted
|
||||
The 'validate-all' operation reports whether the parameters are valid
|
||||
|
||||
UEND
|
||||
@@ -298,8 +298,8 @@ Use it with caution! (and fencing)
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="300s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="validate-all" timeout="5s" />
|
||||
diff --git a/heartbeat/iface-bridge b/heartbeat/iface-bridge
|
||||
index 75d5371dd..a4e50adb9 100755
|
||||
--- a/heartbeat/iface-bridge
|
||||
+++ b/heartbeat/iface-bridge
|
||||
@@ -211,7 +211,7 @@ bridge_meta_data() {
|
||||
<longdesc lang="en">
|
||||
Set the port cost. This is a dimensionless metric.
|
||||
A list of port/cost can be specified using the following
|
||||
- format: slave cost slave cost.
|
||||
+ format: unpromoted cost unpromoted cost.
|
||||
Example: eth0 100 eth1 1000
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
@@ -228,7 +228,7 @@ bridge_meta_data() {
|
||||
This metric is used in the designated port and root port
|
||||
selection algorithms.
|
||||
A list of port/priority can be specified using the following
|
||||
- format: slave cost slave cost.
|
||||
+ format: unpromoted cost unpromoted cost.
|
||||
Example: eth0 10 eth1 60
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
@@ -262,7 +262,7 @@ bridge_meta_data() {
|
||||
Enable or disable a port from the multicast router.
|
||||
Kernel enables all port by default.
|
||||
A list of port can be specified using the following
|
||||
- format: slave 0|1 slave 0|1.
|
||||
+ format: unpromoted 0|1 unpromoted 0|1.
|
||||
Example: eth0 1 eth1 0
|
||||
</longdesc>
|
||||
<shortdesc lang="en">
|
||||
diff --git a/heartbeat/mariadb.in b/heartbeat/mariadb.in
|
||||
index 39ad191bb..5a39ccb66 100644
|
||||
--- a/heartbeat/mariadb.in
|
||||
+++ b/heartbeat/mariadb.in
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# MariaDB
|
||||
#
|
||||
-# Description: Manages a MariaDB Master/Slave database as Linux-HA resource
|
||||
+# Description: Manages a MariaDB Promotable database as Linux-HA resource
|
||||
#
|
||||
# Authors: Alan Robertson: DB2 Script
|
||||
# Jakub Janczak: rewrite as MySQL
|
||||
@@ -61,8 +61,8 @@ The 'start' operation starts the database.
|
||||
The 'stop' operation stops the database.
|
||||
The 'status' operation reports whether the database is running
|
||||
The 'monitor' operation reports whether the database seems to be working
|
||||
-The 'promote' operation makes this mysql server run as master
|
||||
-The 'demote' operation makes this mysql server run as slave
|
||||
+The 'promote' operation makes this mysql server run as promoted
|
||||
+The 'demote' operation makes this mysql server run as unpromoted
|
||||
The 'validate-all' operation reports whether the parameters are valid
|
||||
|
||||
UEND
|
||||
@@ -78,20 +78,20 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
Resource script for MariaDB.
|
||||
|
||||
-Manages a complete master/slave replication setup with GTID, for simpler
|
||||
+Manages a complete promotable replication setup with GTID, for simpler
|
||||
uses look at the mysql resource agent which supports older replication
|
||||
forms which mysql and mariadb have in common.
|
||||
|
||||
The resource must be setup to use notifications. Set 'notify=true' in the metadata
|
||||
-attributes when defining a MariaDB master/slave instance.
|
||||
+attributes when defining a MariaDB promotable instance.
|
||||
|
||||
-The default behavior is to use uname -n values in the change master to command.
|
||||
+The default behavior is to use uname -n values in the change promoted to command.
|
||||
Other IPs can be specified manually by adding a node attribute
|
||||
\${INSTANCE_ATTR_NAME}_mysql_master_IP giving the IP to use for replication.
|
||||
For example, if the mariadb primitive you are using is p_mariadb, the
|
||||
attribute to set will be p_mariadb_mysql_master_IP.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">Manages a MariaDB master/slave instance</shortdesc>
|
||||
+<shortdesc lang="en">Manages a MariaDB promotable instance</shortdesc>
|
||||
<parameters>
|
||||
|
||||
<parameter name="binary" unique="0" required="0">
|
||||
@@ -154,7 +154,7 @@ The logfile to be used for mysqld.
|
||||
<longdesc lang="en">
|
||||
All node names of nodes that will execute mariadb.
|
||||
Please separate each node name with a space.
|
||||
-This is required for the master selection to function.
|
||||
+This is required for the promoted selection to function.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">node list</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_node_list_default}" />
|
||||
@@ -220,11 +220,11 @@ Additional parameters which are passed to the mysqld on startup.
|
||||
<parameter name="replication_user" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
MariaDB replication user. This user is used for starting and stopping
|
||||
-MariaDB replication, for setting and resetting the master host, and for
|
||||
+MariaDB replication, for setting and resetting the promoted host, and for
|
||||
setting and unsetting read-only mode. Because of that, this user must
|
||||
have SUPER, REPLICATION SLAVE, REPLICATION CLIENT, PROCESS and RELOAD
|
||||
privileges on all nodes within the cluster. Mandatory if you define a
|
||||
-master-slave resource.
|
||||
+promotable resource.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication user</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_user_default}" />
|
||||
@@ -232,8 +232,8 @@ master-slave resource.
|
||||
|
||||
<parameter name="replication_passwd" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-MariaDB replication password. Used for replication client and slave.
|
||||
-Mandatory if you define a master-slave resource.
|
||||
+MariaDB replication password. Used for replication client and unpromoted.
|
||||
+Mandatory if you define a promotable resource.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication user password</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_passwd_default}" />
|
||||
@@ -241,7 +241,7 @@ Mandatory if you define a master-slave resource.
|
||||
|
||||
<parameter name="replication_port" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-The port on which the Master MariaDB instance is listening.
|
||||
+The port on which the Promoted MariaDB instance is listening.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MariaDB replication port</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_replication_port_default}" />
|
||||
@@ -254,8 +254,8 @@ The port on which the Master MariaDB instance is listening.
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in
|
||||
index fcf1b3a4b..e47fef4bd 100644
|
||||
--- a/heartbeat/mpathpersist.in
|
||||
+++ b/heartbeat/mpathpersist.in
|
||||
@@ -80,9 +80,9 @@ meta_data() {
|
||||
<longdesc lang="en">
|
||||
This resource agent manages SCSI persistent reservations on multipath devices.
|
||||
"mpathpersist" from multipath-tools is used, please see its documentation.
|
||||
-Should be used as multistate (Master/Slave) resource
|
||||
-Slave registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list.
|
||||
-Master reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter.
|
||||
+Should be used as multistate (Promotable) resource
|
||||
+Unpromoted registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list.
|
||||
+Promoted reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter.
|
||||
Please see man sg_persist(8) and mpathpersist(8) for reservation_type details.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Manages SCSI persistent reservations on multipath devices</shortdesc>
|
||||
@@ -132,7 +132,7 @@ reservation type
|
||||
master_score_base value
|
||||
"master_score_base" value is used in "master_score" calculation:
|
||||
master_score = master_score_base + master_score_dev_factor * working_devs
|
||||
-if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for master role.
|
||||
+if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for promoted role.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">base master_score value</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_master_score_base_default}" />
|
||||
@@ -140,9 +140,9 @@ if set to bigger value in mpathpersist resource configuration on some node, this
|
||||
|
||||
<parameter name="master_score_dev_factor" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-Working device factor in master_score calculation
|
||||
+Working device factor in promoted calculation
|
||||
each "working" device provides additional value to "master_score",
|
||||
-so the node that sees more devices will be preferred for the "Master"-role
|
||||
+so the node that sees more devices will be preferred for the "Promoted"-role
|
||||
Setting it to 0 will disable this behavior.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">working device factor in master_score calculation</shortdesc>
|
||||
@@ -151,10 +151,10 @@ Setting it to 0 will disable this behavior.
|
||||
|
||||
<parameter name="master_score_delay" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-master/slave decreases/increases its master_score after delay of "master_score_delay" seconds
|
||||
-so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched
|
||||
-and after this device reappears again the master increases its master_score first
|
||||
-this can work only if the master_score_delay is bigger then monitor interval on both master and slave
|
||||
+promoted/unpromoted decreases/increases its master_score after delay of "master_score_delay" seconds
|
||||
+so if some device gets inaccessible, the unpromoted decreases its promoted first and the resource will no be watched
|
||||
+and after this device reappears again the promoted increases its master_score first
|
||||
+this can work only if the master_score_delay is bigger then monitor interval on both promoted and unpromoted
|
||||
Setting it to 0 will disable this behavior.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">master_score decrease/increase delay time</shortdesc>
|
||||
@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior.
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="29s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="60s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="29s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="60s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
diff --git a/heartbeat/mysql b/heartbeat/mysql
|
||||
index 720de8c1a..aec44fe5e 100755
|
||||
--- a/heartbeat/mysql
|
||||
+++ b/heartbeat/mysql
|
||||
@@ -321,8 +321,8 @@ whether a node is usable for clients to read from.</shortdesc>
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="20s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="30s" interval="30s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="30s" interval="30s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/mysql-proxy b/heartbeat/mysql-proxy
|
||||
index e34396d9a..fdf2fa230 100755
|
||||
--- a/heartbeat/mysql-proxy
|
||||
+++ b/heartbeat/mysql-proxy
|
||||
@@ -162,7 +162,7 @@ Address:port of the remote back-end servers (default: 127.0.0.1:3306).
|
||||
|
||||
<parameter name="proxy_read_only_backend_addresses" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
-Address:port of the remote (read only) slave-server (default: ).
|
||||
+Address:port of the remote (read only) unpromoted-server (default: ).
|
||||
</longdesc>
|
||||
<shortdesc lang="en">MySql Proxy read only back-end servers</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_proxy_read_only_backend_addresses_default}" />
|
||||
diff --git a/heartbeat/pgsql b/heartbeat/pgsql
|
||||
index e3a39038f..94aceb324 100755
|
||||
--- a/heartbeat/pgsql
|
||||
+++ b/heartbeat/pgsql
|
||||
@@ -458,7 +458,7 @@ wal receiver is not running in the master and the attribute shows status as
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="30s" interval="30s"/>
|
||||
-<action name="monitor" depth="0" timeout="30s" interval="29s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="30s" interval="29s" role="Promoted" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/redis.in b/heartbeat/redis.in
|
||||
index 7f886c7ea..6429477e1 100755
|
||||
--- a/heartbeat/redis.in
|
||||
+++ b/heartbeat/redis.in
|
||||
@@ -220,8 +220,8 @@ is in use.
|
||||
<action name="stop" timeout="120s" />
|
||||
<action name="status" timeout="60s" />
|
||||
<action name="monitor" depth="0" timeout="60s" interval="45s" />
|
||||
-<action name="monitor" role="Master" depth="0" timeout="60s" interval="20s" />
|
||||
-<action name="monitor" role="Slave" depth="0" timeout="60s" interval="60s" />
|
||||
+<action name="monitor" role="Promoted" depth="0" timeout="60s" interval="20s" />
|
||||
+<action name="monitor" role="Unpromoted" depth="0" timeout="60s" interval="60s" />
|
||||
<action name="promote" timeout="120s" />
|
||||
<action name="demote" timeout="120s" />
|
||||
<action name="notify" timeout="90s" />
|
||||
diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in
|
||||
index 678762f40..0497cc469 100644
|
||||
--- a/heartbeat/sg_persist.in
|
||||
+++ b/heartbeat/sg_persist.in
|
||||
@@ -168,8 +168,8 @@ Setting it to 0 will disable this behavior.
|
||||
<action name="demote" timeout="30s" />
|
||||
<action name="notify" timeout="30s" />
|
||||
<action name="stop" timeout="30s" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="29s" role="Slave" />
|
||||
-<action name="monitor" depth="0" timeout="20s" interval="60s" role="Master" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="29s" role="Unpromoted" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="60s" role="Promoted" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="30s" />
|
||||
</actions>
|
||||
|
||||
From 14e5cb71e3749d311745f110f90cc1139f9cedaf Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Thu, 16 Jun 2022 15:54:39 +0200
|
||||
Subject: [PATCH 2/2] metadata: update to promoted roles
|
||||
|
||||
---
|
||||
heartbeat/metadata.rng | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/metadata.rng b/heartbeat/metadata.rng
|
||||
index 3dd735547..909efc284 100644
|
||||
--- a/heartbeat/metadata.rng
|
||||
+++ b/heartbeat/metadata.rng
|
||||
@@ -85,8 +85,8 @@
|
||||
|
||||
<define name="role-values">
|
||||
<choice>
|
||||
- <value>Master</value>
|
||||
- <value>Slave</value>
|
||||
+ <value>Promoted</value>
|
||||
+ <value>Unpromoted</value>
|
||||
</choice>
|
||||
</define>
|
||||
|
@ -1,312 +0,0 @@
|
||||
From fd1d6426a2d05f521207c305d10b49fedd92c2df Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 28 Feb 2022 09:27:42 +0100
|
||||
Subject: [PATCH 1/4] IPaddr2: Allow to disable Duplicate Address Detection for
|
||||
IPv6
|
||||
|
||||
"Starting" an IPv6 address with IPaddr2 involves performing Duplicate
|
||||
Address Detection which typically takes at least 1000 ms. Allow the user
|
||||
to disable DAD if they can guarantee that the configured address is not
|
||||
duplicate and they wish to start the resource faster.
|
||||
---
|
||||
heartbeat/IPaddr2 | 15 +++++++++++++++
|
||||
1 file changed, 15 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 735dd7779..650392b70 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -88,6 +88,7 @@ OCF_RESKEY_arp_sender_default=""
|
||||
OCF_RESKEY_send_arp_opts_default=""
|
||||
OCF_RESKEY_flush_routes_default="false"
|
||||
OCF_RESKEY_run_arping_default=false
|
||||
+OCF_RESKEY_nodad_default=false
|
||||
OCF_RESKEY_noprefixroute_default="false"
|
||||
OCF_RESKEY_preferred_lft_default="forever"
|
||||
OCF_RESKEY_network_namespace_default=""
|
||||
@@ -110,6 +111,7 @@ OCF_RESKEY_network_namespace_default=""
|
||||
: ${OCF_RESKEY_send_arp_opts=${OCF_RESKEY_send_arp_opts_default}}
|
||||
: ${OCF_RESKEY_flush_routes=${OCF_RESKEY_flush_routes_default}}
|
||||
: ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}}
|
||||
+: ${OCF_RESKEY_nodad=${OCF_RESKEY_nodad_default}}
|
||||
: ${OCF_RESKEY_noprefixroute=${OCF_RESKEY_noprefixroute_default}}
|
||||
: ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}}
|
||||
: ${OCF_RESKEY_network_namespace=${OCF_RESKEY_network_namespace_default}}
|
||||
@@ -391,6 +393,14 @@ Whether or not to run arping for IPv4 collision detection check.
|
||||
<content type="string" default="${OCF_RESKEY_run_arping_default}"/>
|
||||
</parameter>
|
||||
|
||||
+<parameter name="nodad">
|
||||
+<longdesc lang="en">
|
||||
+For IPv6, do not perform Duplicate Address Detection when adding the address.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Use nodad flag</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_nodad_default}"/>
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="noprefixroute">
|
||||
<longdesc lang="en">
|
||||
Use noprefixroute flag (see 'man ip-address').
|
||||
@@ -662,6 +672,11 @@ add_interface () {
|
||||
msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface"
|
||||
fi
|
||||
|
||||
+ if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
||||
+ cmd="$cmd nodad"
|
||||
+ msg="${msg} (with nodad)"
|
||||
+ fi
|
||||
+
|
||||
if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then
|
||||
cmd="$cmd noprefixroute"
|
||||
msg="${msg} (with noprefixroute)"
|
||||
|
||||
From f4a9e3281d48c5d37f5df593d014706c46ddb3a7 Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 7 Mar 2022 17:21:59 +0100
|
||||
Subject: [PATCH 2/4] IPaddr2: Allow to send IPv6 Neighbor Advertisements in
|
||||
background
|
||||
|
||||
"Starting" an IPv6 address with IPaddr2 involves sending Neighbor
|
||||
Advertisement packets to inform neighboring machines about the new
|
||||
IP+MAC translation. By default, 5x packets with 200 ms sleep after each
|
||||
are sent which delays the start by 1000 ms. Allow the user to run this
|
||||
operation in background, similarly as is possible with GARP for IPv4.
|
||||
---
|
||||
heartbeat/IPaddr2 | 33 +++++++++++++++++++++++++++++----
|
||||
1 file changed, 29 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index 650392b70..e243a642d 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -83,7 +83,7 @@ OCF_RESKEY_unique_clone_address_default=false
|
||||
OCF_RESKEY_arp_interval_default=200
|
||||
OCF_RESKEY_arp_count_default=5
|
||||
OCF_RESKEY_arp_count_refresh_default=0
|
||||
-OCF_RESKEY_arp_bg_default=true
|
||||
+OCF_RESKEY_arp_bg_default=""
|
||||
OCF_RESKEY_arp_sender_default=""
|
||||
OCF_RESKEY_send_arp_opts_default=""
|
||||
OCF_RESKEY_flush_routes_default="false"
|
||||
@@ -336,9 +336,10 @@ situations.
|
||||
|
||||
<parameter name="arp_bg">
|
||||
<longdesc lang="en">
|
||||
-Whether or not to send the ARP packets in the background.
|
||||
+Whether or not to send the ARP (IPv4) or NA (IPv6) packets in the background.
|
||||
+The default is true for IPv4 and false for IPv6.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP from background</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA from background</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
|
||||
</parameter>
|
||||
|
||||
@@ -507,6 +508,9 @@ ip_init() {
|
||||
ocf_exit_reason "IPv4 does not support lvs_ipv6_addrlabel"
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
fi
|
||||
+ if [ -z "$OCF_RESKEY_arp_bg" ]; then
|
||||
+ OCF_RESKEY_arp_bg=true
|
||||
+ fi
|
||||
else
|
||||
FAMILY=inet6
|
||||
# address sanitization defined in RFC5952
|
||||
@@ -527,6 +531,9 @@ ip_init() {
|
||||
exit $OCF_ERR_CONFIGURED
|
||||
fi
|
||||
fi
|
||||
+ if [ -z "$OCF_RESKEY_arp_bg" ]; then
|
||||
+ OCF_RESKEY_arp_bg=false
|
||||
+ fi
|
||||
fi
|
||||
|
||||
# support nic:iflabel format in nic parameter
|
||||
@@ -893,6 +900,20 @@ run_arp_sender() {
|
||||
fi
|
||||
}
|
||||
|
||||
+log_send_ua() {
|
||||
+ local cmdline
|
||||
+ local output
|
||||
+ local rc
|
||||
+
|
||||
+ cmdline="$@"
|
||||
+ output=$($cmdline 2>&1)
|
||||
+ rc=$?
|
||||
+ if [ $rc -ne 0 ] ; then
|
||||
+ ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements: rc=$rc"
|
||||
+ fi
|
||||
+ ocf_log info "$output"
|
||||
+ return $rc
|
||||
+}
|
||||
|
||||
#
|
||||
# Run send_ua to note send ICMPv6 Unsolicited Neighbor Advertisements.
|
||||
@@ -930,7 +951,11 @@ run_send_ua() {
|
||||
|
||||
ARGS="-i $OCF_RESKEY_arp_interval -c $OCF_RESKEY_arp_count $OCF_RESKEY_ip $NETMASK $NIC"
|
||||
ocf_log info "$SENDUA $ARGS"
|
||||
- $SENDUA $ARGS || ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements."
|
||||
+ if ocf_is_true $OCF_RESKEY_arp_bg; then
|
||||
+ log_send_ua $SENDUA $ARGS &
|
||||
+ else
|
||||
+ log_send_ua $SENDUA $ARGS
|
||||
+ fi
|
||||
}
|
||||
|
||||
# Do we already serve this IP address on the given $NIC?
|
||||
|
||||
From c8afb43012c264f3ee24013a92b2a2f3566db2fd Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Tue, 8 Mar 2022 12:35:56 +0100
|
||||
Subject: [PATCH 3/4] IPaddr2: Log 'ip addr add' options together
|
||||
|
||||
Change the log message in add_interface() from
|
||||
"Adding ... (with <opt1>) (with <opt2>)"
|
||||
to
|
||||
"Adding ... (with <opt1> <opt2>)".
|
||||
---
|
||||
heartbeat/IPaddr2 | 19 ++++++++++---------
|
||||
1 file changed, 10 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index e243a642d..dca1b6f5b 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -651,7 +651,7 @@ delete_interface () {
|
||||
# Add an interface
|
||||
#
|
||||
add_interface () {
|
||||
- local cmd msg ipaddr netmask broadcast iface label
|
||||
+ local cmd msg extra_opts ipaddr netmask broadcast iface label
|
||||
|
||||
ipaddr="$1"
|
||||
netmask="$2"
|
||||
@@ -679,23 +679,24 @@ add_interface () {
|
||||
msg="Adding $FAMILY address $ipaddr/$netmask with broadcast address $broadcast to device $iface"
|
||||
fi
|
||||
|
||||
+ extra_opts=""
|
||||
if [ "$FAMILY" = "inet6" ] && ocf_is_true "${OCF_RESKEY_nodad}"; then
|
||||
- cmd="$cmd nodad"
|
||||
- msg="${msg} (with nodad)"
|
||||
+ extra_opts="$extra_opts nodad"
|
||||
fi
|
||||
|
||||
if ocf_is_true "${OCF_RESKEY_noprefixroute}"; then
|
||||
- cmd="$cmd noprefixroute"
|
||||
- msg="${msg} (with noprefixroute)"
|
||||
+ extra_opts="$extra_opts noprefixroute"
|
||||
fi
|
||||
|
||||
if [ ! -z "$label" ]; then
|
||||
- cmd="$cmd label $label"
|
||||
- msg="${msg} (with label $label)"
|
||||
+ extra_opts="$extra_opts label $label"
|
||||
fi
|
||||
if [ "$FAMILY" = "inet6" ] ;then
|
||||
- cmd="$cmd preferred_lft $OCF_RESKEY_preferred_lft"
|
||||
- msg="${msg} (with preferred_lft $OCF_RESKEY_preferred_lft)"
|
||||
+ extra_opts="$extra_opts preferred_lft $OCF_RESKEY_preferred_lft"
|
||||
+ fi
|
||||
+ if [ -n "$extra_opts" ]; then
|
||||
+ cmd="$cmd$extra_opts"
|
||||
+ msg="$msg (with$extra_opts)"
|
||||
fi
|
||||
|
||||
ocf_log info "$msg"
|
||||
|
||||
From cb4d52ead694718282a40eab24e04b6d85bcc802 Mon Sep 17 00:00:00 2001
|
||||
From: Petr Pavlu <petr.pavlu@suse.com>
|
||||
Date: Mon, 7 Mar 2022 17:25:02 +0100
|
||||
Subject: [PATCH 4/4] IPaddr2: Clarify behavior of 'arp_*' parameters for IPv4
|
||||
and IPv6
|
||||
|
||||
* Mention that 'arp_*' parameters are shared by the IPv4 and IPv6 code.
|
||||
* Clarify description of these parameters and mark which of them apply
|
||||
only to IPv4.
|
||||
---
|
||||
heartbeat/IPaddr2 | 26 +++++++++++++++++---------
|
||||
1 file changed, 17 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
|
||||
index dca1b6f5b..97a7431a2 100755
|
||||
--- a/heartbeat/IPaddr2
|
||||
+++ b/heartbeat/IPaddr2
|
||||
@@ -157,6 +157,12 @@ and/or clone-max < number of nodes. In case of node failure,
|
||||
clone instances need to be re-allocated on surviving nodes.
|
||||
This would not be possible if there is already an instance
|
||||
on those nodes, and clone-node-max=1 (which is the default).
|
||||
+
|
||||
+When the specified IP address gets assigned to a respective interface, the
|
||||
+resource agent sends unsolicited ARP (Address Resolution Protocol, IPv4) or NA
|
||||
+(Neighbor Advertisement, IPv6) packets to inform neighboring machines about the
|
||||
+change. This functionality is controlled for both IPv4 and IPv6 by shared
|
||||
+'arp_*' parameters.
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">Manages virtual IPv4 and IPv6 addresses (Linux specific version)</shortdesc>
|
||||
@@ -306,28 +312,30 @@ a unique address to manage
|
||||
|
||||
<parameter name="arp_interval">
|
||||
<longdesc lang="en">
|
||||
-Specify the interval between unsolicited ARP packets in milliseconds.
|
||||
+Specify the interval between unsolicited ARP (IPv4) or NA (IPv6) packets in
|
||||
+milliseconds.
|
||||
|
||||
This parameter is deprecated and used for the backward compatibility only.
|
||||
It is effective only for the send_arp binary which is built with libnet,
|
||||
and send_ua for IPv6. It has no effect for other arp_sender.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP packet interval in ms (deprecated)</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA packet interval in ms (deprecated)</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
|
||||
</parameter>
|
||||
|
||||
<parameter name="arp_count">
|
||||
<longdesc lang="en">
|
||||
-Number of unsolicited ARP packets to send at resource initialization.
|
||||
+Number of unsolicited ARP (IPv4) or NA (IPv6) packets to send at resource
|
||||
+initialization.
|
||||
</longdesc>
|
||||
-<shortdesc lang="en">ARP packet count sent during initialization</shortdesc>
|
||||
+<shortdesc lang="en">ARP/NA packet count sent during initialization</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_arp_count_default}"/>
|
||||
</parameter>
|
||||
|
||||
<parameter name="arp_count_refresh">
|
||||
<longdesc lang="en">
|
||||
-Number of unsolicited ARP packets to send during resource monitoring. Doing
|
||||
-so helps mitigate issues of stuck ARP caches resulting from split-brain
|
||||
+For IPv4, number of unsolicited ARP packets to send during resource monitoring.
|
||||
+Doing so helps mitigate issues of stuck ARP caches resulting from split-brain
|
||||
situations.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">ARP packet count sent during monitoring</shortdesc>
|
||||
@@ -345,7 +353,7 @@ The default is true for IPv4 and false for IPv6.
|
||||
|
||||
<parameter name="arp_sender">
|
||||
<longdesc lang="en">
|
||||
-The program to send ARP packets with on start. Available options are:
|
||||
+For IPv4, the program to send ARP packets with on start. Available options are:
|
||||
- send_arp: default
|
||||
- ipoibarping: default for infiniband interfaces if ipoibarping is available
|
||||
- iputils_arping: use arping in iputils package
|
||||
@@ -357,7 +365,7 @@ The program to send ARP packets with on start. Available options are:
|
||||
|
||||
<parameter name="send_arp_opts">
|
||||
<longdesc lang="en">
|
||||
-Extra options to pass to the arp_sender program.
|
||||
+For IPv4, extra options to pass to the arp_sender program.
|
||||
Available options are vary depending on which arp_sender is used.
|
||||
|
||||
A typical use case is specifying '-A' for iputils_arping to use
|
||||
@@ -388,7 +396,7 @@ IP address goes away.
|
||||
|
||||
<parameter name="run_arping">
|
||||
<longdesc lang="en">
|
||||
-Whether or not to run arping for IPv4 collision detection check.
|
||||
+For IPv4, whether or not to run arping for collision detection check.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Run arping for IPv4 collision detection check</shortdesc>
|
||||
<content type="string" default="${OCF_RESKEY_run_arping_default}"/>
|
@ -1,401 +0,0 @@
|
||||
From d59a000da2766476538bb82d1889f5c0f3882f9f Mon Sep 17 00:00:00 2001
|
||||
From: Jan Friesse <jfriesse@redhat.com>
|
||||
Date: Wed, 2 Mar 2022 18:43:31 +0100
|
||||
Subject: [PATCH] corosync-qnetd: Add resource agent
|
||||
|
||||
Mostly for better monitor operation using corosync-qnetd-tool. As qnetd
|
||||
is (almost) stateless only directory which has to be copied (once)
|
||||
across the nodes is nss db directory (usually
|
||||
/etc/corosync/qnetd/nssdb).
|
||||
|
||||
Signed-off-by: Jan Friesse <jfriesse@redhat.com>
|
||||
---
|
||||
doc/man/Makefile.am | 1 +
|
||||
heartbeat/Makefile.am | 1 +
|
||||
heartbeat/corosync-qnetd | 353 +++++++++++++++++++++++++++++++++++++++
|
||||
3 files changed, 355 insertions(+)
|
||||
create mode 100755 heartbeat/corosync-qnetd
|
||||
|
||||
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||
index 1093717fe..013aa392d 100644
|
||||
--- a/doc/man/Makefile.am
|
||||
+++ b/doc/man/Makefile.am
|
||||
@@ -127,6 +127,7 @@ man_MANS = ocf_heartbeat_AoEtarget.7 \
|
||||
ocf_heartbeat_azure-lb.7 \
|
||||
ocf_heartbeat_clvm.7 \
|
||||
ocf_heartbeat_conntrackd.7 \
|
||||
+ ocf_heartbeat_corosync-qnetd.7 \
|
||||
ocf_heartbeat_crypt.7 \
|
||||
ocf_heartbeat_db2.7 \
|
||||
ocf_heartbeat_dhcpd.7 \
|
||||
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
index 67b400679..38154e2da 100644
|
||||
--- a/heartbeat/Makefile.am
|
||||
+++ b/heartbeat/Makefile.am
|
||||
@@ -101,6 +101,7 @@ ocf_SCRIPTS = AoEtarget \
|
||||
azure-lb \
|
||||
clvm \
|
||||
conntrackd \
|
||||
+ corosync-qnetd \
|
||||
crypt \
|
||||
db2 \
|
||||
dhcpd \
|
||||
diff --git a/heartbeat/corosync-qnetd b/heartbeat/corosync-qnetd
|
||||
new file mode 100755
|
||||
index 000000000..6b9777711
|
||||
--- /dev/null
|
||||
+++ b/heartbeat/corosync-qnetd
|
||||
@@ -0,0 +1,353 @@
|
||||
+#!/bin/sh
|
||||
+#
|
||||
+# Copyright (C) 2022 Red Hat, Inc. All rights reserved.
|
||||
+#
|
||||
+# Authors: Jan Friesse <jfriesse@redhat.com>
|
||||
+#
|
||||
+# This program is free software; you can redistribute it and/or modify
|
||||
+# it under the terms of version 2 of the GNU General Public License as
|
||||
+# published by the Free Software Foundation.
|
||||
+#
|
||||
+# This program is distributed in the hope that it would be useful, but
|
||||
+# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
+#
|
||||
+# Further, this software is distributed without any warranty that it is
|
||||
+# free of the rightful claim of any third person regarding infringement
|
||||
+# or the like. Any license provided herein, whether implied or
|
||||
+# otherwise, applies only to this software file. Patent licenses, if
|
||||
+# any, provided herein do not apply to combinations of this program with
|
||||
+# other software, or any other product whatsoever.
|
||||
+#
|
||||
+# You should have received a copy of the GNU General Public License
|
||||
+# along with this program; if not, write the Free Software Foundation,
|
||||
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
+#
|
||||
+
|
||||
+# Initialization:
|
||||
+: "${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}"
|
||||
+. "${OCF_FUNCTIONS_DIR}/ocf-shellfuncs"
|
||||
+
|
||||
+# Use runuser if available for SELinux.
|
||||
+if [ -x "/sbin/runuser" ]; then
|
||||
+ SU="runuser"
|
||||
+else
|
||||
+ SU="su"
|
||||
+fi
|
||||
+
|
||||
+# Attempt to detect a default binary
|
||||
+OCF_RESKEY_binary_default=$(which corosync-qnetd 2> /dev/null)
|
||||
+if [ "${OCF_RESKEY_binary_default}" = "" ]; then
|
||||
+ OCF_RESKEY_binary_default="/usr/bin/corosync-qnetd"
|
||||
+fi
|
||||
+
|
||||
+# Defaults
|
||||
+OCF_RESKEY_qnetd_opts_default=""
|
||||
+OCF_RESKEY_qnetd_tool_binary_default="/usr/bin/corosync-qnetd-tool"
|
||||
+OCF_RESKEY_ip_default=""
|
||||
+OCF_RESKEY_port_default=""
|
||||
+OCF_RESKEY_nss_db_dir_default=""
|
||||
+OCF_RESKEY_pid_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.pid"
|
||||
+OCF_RESKEY_ipc_sock_default="/var/run/corosync-qnetd/corosync-qnetd-${OCF_RESOURCE_INSTANCE}.sock"
|
||||
+OCF_RESKEY_user_default="coroqnetd"
|
||||
+OCF_RESKEY_group_default="${OCF_RESKEY_user_default}"
|
||||
+
|
||||
+: "${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}"
|
||||
+: "${OCF_RESKEY_qnetd_opts=${OCF_RESKEY_qnetd_opts_default}}"
|
||||
+: "${OCF_RESKEY_qnetd_tool_binary=${OCF_RESKEY_qnetd_tool_binary_default}}"
|
||||
+: "${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}}"
|
||||
+: "${OCF_RESKEY_port=${OCF_RESKEY_port_default}}"
|
||||
+: "${OCF_RESKEY_nss_db_dir=${OCF_RESKEY_nss_db_dir_default}}"
|
||||
+: "${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}"
|
||||
+: "${OCF_RESKEY_ipc_sock=${OCF_RESKEY_ipc_sock_default}}"
|
||||
+: "${OCF_RESKEY_user=${OCF_RESKEY_user_default}}"
|
||||
+: "${OCF_RESKEY_group=${OCF_RESKEY_group_default}}"
|
||||
+
|
||||
+corosync_qnetd_usage() {
|
||||
+ cat <<END
|
||||
+usage: $0 {start|stop|status|monitor|validate-all|meta-data}
|
||||
+
|
||||
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||
+END
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_meta_data() {
|
||||
+ cat <<END
|
||||
+<?xml version="1.0"?>
|
||||
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
+<resource-agent name="corosync-qnetd" version="1.0">
|
||||
+<version>1.0</version>
|
||||
+
|
||||
+<longdesc lang="en">OCF Resource script for corosync-qnetd. It manages a corosync-qnetd
|
||||
+instance as a HA resource. It is required to copy nss db directory (usually /etc/corosync/qnetd/nssdb)
|
||||
+across all nodes (only once - after database is initialized).</longdesc>
|
||||
+<shortdesc lang="en">Corosync QNet daemon resource agent</shortdesc>
|
||||
+
|
||||
+<parameters>
|
||||
+
|
||||
+<parameter name="binary">
|
||||
+ <longdesc lang="en">Location of the corosync-qnetd binary</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd binary</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_binary_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="qnetd_opts">
|
||||
+ <longdesc lang="en">
|
||||
+ Additional options for corosync-qnetd binary. "-4" for example.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd extra options</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_qnetd_opts_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="qnetd_tool_binary">
|
||||
+ <longdesc lang="en">
|
||||
+ The absolute path to the corosync-qnetd-tool for monitoring with OCF_CHECK_LEVEL greater zero.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">The absolute path to the corosync-qnetd-tool binary</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_qnetd_tool_binary_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="ip">
|
||||
+ <longdesc lang="en">
|
||||
+ IP address to listen on. By default the daemon listens on all addresses (wildcard).
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">IP address to listen on</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_ip_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="port">
|
||||
+ <longdesc lang="en">
|
||||
+ TCP port to listen on. Default port is 5403.
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">TCP port to listen on</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_port_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="nss_db_dir">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd nss db directory (empty for default - usually /etc/corosync/qnetd/nssdb)
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd nss db directory</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_nss_db_dir_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="pid">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd pid/lock
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd pid file</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_pid_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="ipc_sock">
|
||||
+ <longdesc lang="en">
|
||||
+ Location of the corosync-qnetd ipc socket
|
||||
+ </longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd ipc socket file</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_ipc_sock_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="user">
|
||||
+ <longdesc lang="en">User running corosync-qnetd</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd user</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_user_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="group">
|
||||
+ <longdesc lang="en">Group running corosync-qnetd</longdesc>
|
||||
+ <shortdesc lang="en">corosync-qnetd group</shortdesc>
|
||||
+ <content type="string" default="${OCF_RESKEY_group_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+</parameters>
|
||||
+
|
||||
+<actions>
|
||||
+<action name="start" timeout="20s" />
|
||||
+<action name="stop" timeout="20s" />
|
||||
+<action name="status" timeout="20s" />
|
||||
+<action name="monitor" depth="0" timeout="20s" interval="10s" start-delay="10s" />
|
||||
+<action name="validate-all" timeout="20s" />
|
||||
+<action name="meta-data" timeout="20s" />
|
||||
+</actions>
|
||||
+</resource-agent>
|
||||
+END
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_status() {
|
||||
+ ocf_pidfile_status "${OCF_RESKEY_pid}" > /dev/null 2>&1
|
||||
+ case "$?" in
|
||||
+ 0)
|
||||
+ rc="$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+ 1|2)
|
||||
+ rc="$OCF_NOT_RUNNING"
|
||||
+ ;;
|
||||
+ *)
|
||||
+ rc="$OCF_ERR_GENERIC"
|
||||
+ ;;
|
||||
+ esac
|
||||
+
|
||||
+ return "$rc"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_start() {
|
||||
+ corosync_qnetd_validate_all
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -ne 0 ]; then
|
||||
+ return "$rc"
|
||||
+ fi
|
||||
+
|
||||
+ # if resource is already running,no need to continue code after this.
|
||||
+ if corosync_qnetd_status; then
|
||||
+ ocf_log info "corosync-qnetd is already running"
|
||||
+ return "${OCF_SUCCESS}"
|
||||
+ fi
|
||||
+
|
||||
+ pid_dir=$(dirname "${OCF_RESKEY_pid}")
|
||||
+ sock_dir=$(dirname "${OCF_RESKEY_ipc_sock}")
|
||||
+
|
||||
+ for d in "$pid_dir" "$sock_dir";do
|
||||
+ if [ ! -d "$d" ];then
|
||||
+ mkdir -p "$d"
|
||||
+ chmod 0770 "$d"
|
||||
+ chown "${OCF_RESKEY_user}:${OCF_RESKEY_group}" "$d"
|
||||
+ fi
|
||||
+ done
|
||||
+
|
||||
+ params="-S \"local_socket_file=${OCF_RESKEY_ipc_sock}\" -S \"lock_file=${OCF_RESKEY_pid}\""
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_nss_db_dir}" ];then
|
||||
+ params="$params -S \"nss_db_dir=${OCF_RESKEY_nss_db_dir}\""
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_ip}" ];then
|
||||
+ params="$params -l \"${OCF_RESKEY_ip}\""
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_port}" ];then
|
||||
+ params="$params -p \"${OCF_RESKEY_port}\""
|
||||
+ fi
|
||||
+
|
||||
+ params="$params ${OCF_RESKEY_qnetd_opts}"
|
||||
+
|
||||
+ ocf_run "$SU" -s "/bin/sh" "${OCF_RESKEY_user}" -c "${OCF_RESKEY_binary} $params"
|
||||
+
|
||||
+ while :; do
|
||||
+ corosync_qnetd_monitor "debug"
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -eq "${OCF_SUCCESS}" ]; then
|
||||
+ break
|
||||
+ fi
|
||||
+ sleep 1
|
||||
+
|
||||
+ ocf_log debug "corosync-qnetd still hasn't started yet. Waiting..."
|
||||
+ done
|
||||
+
|
||||
+ ocf_log info "corosync-qnetd started"
|
||||
+ return "${OCF_SUCCESS}"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_stop() {
|
||||
+ corosync_qnetd_status
|
||||
+
|
||||
+ if [ "$?" -ne "$OCF_SUCCESS" ]; then
|
||||
+ # Currently not running. Nothing to do.
|
||||
+ ocf_log info "corosync-qnetd is already stopped"
|
||||
+
|
||||
+ return "$OCF_SUCCESS"
|
||||
+ fi
|
||||
+
|
||||
+ pid=$(cat "${OCF_RESKEY_pid}")
|
||||
+ kill "$pid"
|
||||
+
|
||||
+ # Wait for process to stop
|
||||
+ while corosync_qnetd_monitor "debug"; do
|
||||
+ sleep 1
|
||||
+ done
|
||||
+
|
||||
+ ocf_log info "corosync-qnetd stopped"
|
||||
+ return "$OCF_SUCCESS"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_monitor() {
|
||||
+ loglevel=${1:-err}
|
||||
+
|
||||
+ corosync_qnetd_status
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" -ne "$OCF_SUCCESS" ];then
|
||||
+ return "$rc"
|
||||
+ fi
|
||||
+
|
||||
+ out=$("${OCF_RESKEY_qnetd_tool_binary}" -s -p "${OCF_RESKEY_ipc_sock}" 2>&1 >/dev/null)
|
||||
+ rc="$?"
|
||||
+
|
||||
+ if [ "$rc" != 0 ];then
|
||||
+ ocf_log "$loglevel" "$out"
|
||||
+ fi
|
||||
+
|
||||
+ case "$rc" in
|
||||
+ "0") rc="$OCF_SUCCESS" ;;
|
||||
+ "3") rc="$OCF_NOT_RUNNING" ;;
|
||||
+ *) rc="$OCF_ERR_GENERIC" ;;
|
||||
+ esac
|
||||
+
|
||||
+ return "$rc"
|
||||
+}
|
||||
+
|
||||
+corosync_qnetd_validate_all() {
|
||||
+ check_binary "${OCF_RESKEY_binary}"
|
||||
+
|
||||
+ check_binary "${OCF_RESKEY_qnetd_tool_binary}"
|
||||
+}
|
||||
+
|
||||
+
|
||||
+# **************************** MAIN SCRIPT ************************************
|
||||
+
|
||||
+# Make sure meta-data and usage always succeed
|
||||
+case "$__OCF_ACTION" in
|
||||
+ meta-data)
|
||||
+ corosync_qnetd_meta_data
|
||||
+ exit "$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+ usage|help)
|
||||
+ corosync_qnetd_usage
|
||||
+ exit "$OCF_SUCCESS"
|
||||
+ ;;
|
||||
+esac
|
||||
+
|
||||
+# This OCF agent script need to be run as root user.
|
||||
+if ! ocf_is_root; then
|
||||
+ echo "$0 agent script need to be run as root user."
|
||||
+ ocf_log debug "$0 agent script need to be run as root user."
|
||||
+ exit "$OCF_ERR_GENERIC"
|
||||
+fi
|
||||
+
|
||||
+# Translate each action into the appropriate function call
|
||||
+case "$__OCF_ACTION" in
|
||||
+ start)
|
||||
+ corosync_qnetd_start
|
||||
+ ;;
|
||||
+ stop)
|
||||
+ corosync_qnetd_stop
|
||||
+ ;;
|
||||
+ status)
|
||||
+ corosync_qnetd_status
|
||||
+ ;;
|
||||
+ monitor)
|
||||
+ corosync_qnetd_monitor
|
||||
+ ;;
|
||||
+ validate-all)
|
||||
+ corosync_qnetd_validate_all
|
||||
+ ;;
|
||||
+ *)
|
||||
+ corosync_qnetd_usage
|
||||
+ exit "$OCF_ERR_UNIMPLEMENTED"
|
||||
+ ;;
|
||||
+esac
|
||||
+
|
||||
+rc="$?"
|
||||
+exit "$rc"
|
||||
+# End of this script
|
@ -1,61 +0,0 @@
|
||||
From 340e12c0d457d244d375c2d805e78033c9dbdf78 Mon Sep 17 00:00:00 2001
|
||||
From: Takashi Kajinami <tkajinam@redhat.com>
|
||||
Date: Wed, 04 May 2022 23:13:35 +0900
|
||||
Subject: [PATCH] NovaCompute/Evacuate: Make user/project domain configurable
|
||||
|
||||
... so that we can use a user or a project in a non-default keystone
|
||||
domain.
|
||||
|
||||
Change-Id: I6e2175adca08fd97942cb83b8f8094e980b60c9d
|
||||
---
|
||||
|
||||
diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
index 596f520..4565766 100644
|
||||
--- a/heartbeat/NovaEvacuate
|
||||
+++ b/heartbeat/NovaEvacuate
|
||||
@@ -63,13 +63,29 @@
|
||||
|
||||
<parameter name="tenant_name" unique="0" required="1">
|
||||
<longdesc lang="en">
|
||||
-Tenant name for connecting to keystone in admin context.
|
||||
+Tenant(Project) name for connecting to keystone in admin context.
|
||||
Note that with Keystone V3 tenant names are only unique within a domain.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Tenant name</shortdesc>
|
||||
<content type="string" default="" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="user_domain" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Keystone domain the user belongs to
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="project_domain" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Keystone domain the tenant(project) belongs to
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
<parameter name="domain" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
|
||||
@@ -319,6 +335,14 @@
|
||||
|
||||
fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
|
||||
|
||||
+ if [ -n "${OCF_RESKEY_user_domain}" ]; then
|
||||
+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_project_domain}" ]; then
|
||||
+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
|
||||
+ fi
|
||||
+
|
||||
if [ -n "${OCF_RESKEY_domain}" ]; then
|
||||
fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
|
||||
fi
|
@ -0,0 +1,47 @@
|
||||
From 99c4f2af92a10155cf072198c72deffaed3883a5 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 3 Aug 2022 17:20:31 +0200
|
||||
Subject: [PATCH] CTDB: move process to root cgroup if realtime scheduling is
|
||||
enabled
|
||||
|
||||
---
|
||||
heartbeat/CTDB.in | 2 ++
|
||||
heartbeat/ocf-shellfuncs.in | 12 ++++++++++++
|
||||
2 files changed, 14 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
|
||||
index d25d026ca..46f56cfac 100755
|
||||
--- a/heartbeat/CTDB.in
|
||||
+++ b/heartbeat/CTDB.in
|
||||
@@ -709,6 +709,8 @@ EOF
|
||||
invoke_ctdbd() {
|
||||
local vers="$1"
|
||||
|
||||
+ ocf_move_to_root_cgroup_if_rt_enabled
|
||||
+
|
||||
ocf_version_cmp "$vers" "4.9.0"
|
||||
if [ "$?" -ne "0" ]; then
|
||||
# With 4.9+, all ctdbd binary parameters are provided as
|
||||
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
|
||||
index 6be4e4e30..2c53a967a 100644
|
||||
--- a/heartbeat/ocf-shellfuncs.in
|
||||
+++ b/heartbeat/ocf-shellfuncs.in
|
||||
@@ -672,6 +672,18 @@ EOF
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
+# move process to root cgroup if realtime scheduling is enabled
|
||||
+ocf_move_to_root_cgroup_if_rt_enabled()
|
||||
+{
|
||||
+ if [ -e "/sys/fs/cgroup/cpu/cpu.rt_runtime_us" ]; then
|
||||
+ echo $$ >> /sys/fs/cgroup/cpu/tasks
|
||||
+
|
||||
+ if [ "$?" -ne "0" ]; then
|
||||
+ ocf_log warn "Unable to move PID $$ to the root cgroup"
|
||||
+ fi
|
||||
+ fi
|
||||
+}
|
||||
+
|
||||
# usage: crm_mon_no_validation args...
|
||||
# run crm_mon without any cib schema validation
|
||||
# This is useful when an agent runs in a bundle to avoid potential
|
27
SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch
Normal file
27
SOURCES/bz2141836-vdo-vol-dont-fail-probe-action.patch
Normal file
@ -0,0 +1,27 @@
|
||||
From 739e6ce9096facd6d37dffd524c79c961e3fae38 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Fri, 11 Nov 2022 14:17:39 +0100
|
||||
Subject: [PATCH] vdo-vol: dont fail probe action when the underlying device
|
||||
doesnt exist
|
||||
|
||||
---
|
||||
heartbeat/vdo-vol | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/vdo-vol b/heartbeat/vdo-vol
|
||||
index 94822cb82..29bd7b8fd 100755
|
||||
--- a/heartbeat/vdo-vol
|
||||
+++ b/heartbeat/vdo-vol
|
||||
@@ -148,6 +148,12 @@ vdo_monitor(){
|
||||
MODE=$(vdostats --verbose ${OCF_RESKEY_volume} | grep "operating mode" | awk '{print $NF}')
|
||||
|
||||
case "$status" in
|
||||
+ *"ERROR - vdodumpconfig: Failed to make FileLayer from"*)
|
||||
+ if ocf_is_probe; then
|
||||
+ return $OCF_NOT_RUNNING
|
||||
+ fi
|
||||
+ return $OCF_ERR_GENERIC
|
||||
+ ;;
|
||||
*"Device mapper status: not available"*)
|
||||
return $OCF_NOT_RUNNING
|
||||
;;
|
@ -1,156 +0,0 @@
|
||||
From 51dd5d5d051aa3b3f0c104f8e80f212cd5780fc3 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 14 Mar 2023 09:14:28 +0100
|
||||
Subject: [PATCH] LVM-activate: failover with missing PVs
|
||||
|
||||
There area two changes included:
|
||||
|
||||
- Allow the system ID to be changed on a VG when the VG is
|
||||
missing PVs, as long as a majority of PVs are still present.
|
||||
This requires a recent version of lvm that supports the
|
||||
--majoritypvs option for vgchange.
|
||||
|
||||
- Use --activationmode degraded when activating LVs so that
|
||||
raid LVs can be activated when legs are missing, as long as
|
||||
sufficient devices are available for raid to provide all the
|
||||
data in the LV.
|
||||
|
||||
By David Teigland.
|
||||
---
|
||||
heartbeat/LVM-activate | 82 ++++++++++++++++++++++++++++++++----------
|
||||
1 file changed, 64 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
|
||||
index e951a08e9c..f6f24a3b52 100755
|
||||
--- a/heartbeat/LVM-activate
|
||||
+++ b/heartbeat/LVM-activate
|
||||
@@ -50,6 +50,8 @@ OCF_RESKEY_vg_access_mode_default=""
|
||||
OCF_RESKEY_activation_mode_default="exclusive"
|
||||
OCF_RESKEY_tag_default="pacemaker"
|
||||
OCF_RESKEY_partial_activation_default="false"
|
||||
+OCF_RESKEY_degraded_activation_default="false"
|
||||
+OCF_RESKEY_majority_pvs_default="false"
|
||||
|
||||
: ${OCF_RESKEY_vgname=${OCF_RESKEY_vgname_default}}
|
||||
: ${OCF_RESKEY_lvname=${OCF_RESKEY_lvname_default}}
|
||||
@@ -57,6 +59,8 @@ OCF_RESKEY_partial_activation_default="false"
|
||||
: ${OCF_RESKEY_activation_mode=${OCF_RESKEY_activation_mode_default}}
|
||||
: ${OCF_RESKEY_tag=${OCF_RESKEY_tag_default}}
|
||||
: ${OCF_RESKEY_partial_activation=${OCF_RESKEY_partial_activation_default}}
|
||||
+: ${OCF_RESKEY_degraded_activation=${OCF_RESKEY_degraded_activation_default}}
|
||||
+: ${OCF_RESKEY_majority_pvs=${OCF_RESKEY_majority_pvs_default}}
|
||||
|
||||
# If LV is given, only activate this named LV; otherwise, activate all
|
||||
# LVs in the named VG.
|
||||
@@ -191,6 +195,29 @@ logical volumes.
|
||||
<content type="string" default="${OCF_RESKEY_partial_activation_default}" />
|
||||
</parameter>
|
||||
|
||||
+<parameter name="degraded_activation" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Activate RAID LVs using the "degraded" activation mode. This allows RAID
|
||||
+LVs to be activated with missing PVs if all data can be provided with
|
||||
+RAID redundancy. The RAID level determines the number of PVs that are
|
||||
+required for degraded activation to succeed. If fewer PVs are available,
|
||||
+then degraded activation will fail. Also enable majority_pvs.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Activate RAID LVs in degraded mode when missing PVs</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_degraded_activation_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="majority_pvs" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+If set, the VG system ID can be reassigned to a new host if a majority
|
||||
+of PVs in the VG are present. Otherwise, VG failover with system ID
|
||||
+will fail when the VG is missing PVs. Also enable degraded_activation
|
||||
+when RAID LVs are used.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Allow changing the system ID of a VG with a majority of PVs</shortdesc>
|
||||
+<content type="string" default="${OCF_RESKEY_majority_pvs_default}" />
|
||||
+</parameter>
|
||||
+
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
@@ -524,24 +551,27 @@ lvm_validate() {
|
||||
exit $OCF_ERR_GENERIC
|
||||
fi
|
||||
|
||||
- # Inconsistency might be due to missing physical volumes, which doesn't
|
||||
- # automatically mean we should fail. If partial_activation=true then
|
||||
- # we should let start try to handle it, or if no PVs are listed as
|
||||
- # "unknown device" then another node may have marked a device missing
|
||||
- # where we have access to all of them and can start without issue.
|
||||
- case $(vgs -o attr --noheadings $VG | tr -d ' ') in
|
||||
- ???p??*)
|
||||
- if ! ocf_is_true "$OCF_RESKEY_partial_activation" ; then
|
||||
- # We are missing devices and cannot activate partially
|
||||
- ocf_exit_reason "Volume group [$VG] has devices missing. Consider partial_activation=true to attempt to activate partially"
|
||||
- exit $OCF_ERR_GENERIC
|
||||
+ vg_missing_pv_count=$(vgs -o missing_pv_count --noheadings ${VG} 2>/dev/null)
|
||||
+
|
||||
+ if [ $vg_missing_pv_count -gt 0 ]; then
|
||||
+ ocf_log warn "Volume Group ${VG} is missing $vg_missing_pv_count PVs."
|
||||
+
|
||||
+ # Setting new system ID will succeed if over half of PVs remain.
|
||||
+ # Don't try to calculate here if a majority is present,
|
||||
+ # but leave this up to the vgchange command to determine.
|
||||
+ if ocf_is_true "$OCF_RESKEY_majority_pvs" ; then
|
||||
+ ocf_log warn "Attempting fail over with missing PVs (majority.)"
|
||||
+
|
||||
+ # Setting new system ID will fail, and behavior is undefined for
|
||||
+ # other access modes.
|
||||
+ elif ocf_is_true "$OCF_RESKEY_partial_activation" ; then
|
||||
+ ocf_log warn "Attempting fail over with missing PVs (partial.)"
|
||||
+
|
||||
else
|
||||
- # We are missing devices but are allowed to activate partially.
|
||||
- # Assume that caused the vgck failure and carry on
|
||||
- ocf_log warn "Volume group inconsistency detected with missing device(s) and partial_activation enabled. Proceeding with requested action."
|
||||
+ ocf_exit_reason "Volume group [$VG] has devices missing. Consider majority_pvs=true"
|
||||
+ exit $OCF_ERR_GENERIC
|
||||
fi
|
||||
- ;;
|
||||
- esac
|
||||
+ fi
|
||||
|
||||
# Get the access mode from VG metadata and check if it matches the input
|
||||
# value. Skip to check "tagging" mode because there's no reliable way to
|
||||
@@ -601,7 +631,18 @@ lvm_validate() {
|
||||
do_activate() {
|
||||
do_activate_opt=$1
|
||||
|
||||
- if ocf_is_true "$OCF_RESKEY_partial_activation" ; then
|
||||
+ if ocf_is_true "$OCF_RESKEY_degraded_activation" ; then
|
||||
+ # This will allow a RAID LV to be activated if sufficient
|
||||
+ # devices are available to allow the LV to be usable
|
||||
+ do_activate_opt="${do_activate_opt} --activationmode degraded"
|
||||
+
|
||||
+ elif ocf_is_true "$OCF_RESKEY_partial_activation" ; then
|
||||
+ # This will allow a mirror LV to be activated if any
|
||||
+ # devices are missing, but the activated LV may not be
|
||||
+ # usable, so it is not recommended. Also, other LV
|
||||
+ # types without data redundancy will be activated
|
||||
+ # when partial is set.
|
||||
+ # RAID LVs and degraded_activation should be used instead.
|
||||
do_activate_opt="${do_activate_opt} --partial"
|
||||
fi
|
||||
|
||||
@@ -661,11 +702,16 @@ clvmd_activate() {
|
||||
}
|
||||
|
||||
systemid_activate() {
|
||||
+ majority_opt=""
|
||||
set_autoactivation=0
|
||||
cur_systemid=$(vgs --foreign --noheadings -o systemid ${VG} | tr -d '[:blank:]')
|
||||
|
||||
+ if ocf_is_true "$OCF_RESKEY_majority_pvs" ; then
|
||||
+ vgchange --help | grep '\--majoritypvs' >/dev/null 2>&1 && majority_opt="--majoritypvs"
|
||||
+ fi
|
||||
+
|
||||
# Put our system ID on the VG
|
||||
- vgchange -y --config "local/extra_system_ids=[\"${cur_systemid}\"]" \
|
||||
+ vgchange -y $majority_opt --config "local/extra_system_ids=[\"${cur_systemid}\"]" \
|
||||
--systemid ${SYSTEM_ID} ${VG}
|
||||
vgchange --help | grep '\--setautoactivation' >/dev/null 2>&1 && set_autoactivation=1
|
||||
|
@ -1,29 +0,0 @@
|
||||
From 78622f1d3e46d58b78efe33643d05bea4d6948a2 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Wed, 17 May 2023 12:29:38 +0200
|
||||
Subject: [PATCH] Filesystem: create systemd drop-in for network filesystems
|
||||
|
||||
---
|
||||
heartbeat/Filesystem | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/heartbeat/Filesystem b/heartbeat/Filesystem
|
||||
index 50c68f115..65a9dffb5 100755
|
||||
--- a/heartbeat/Filesystem
|
||||
+++ b/heartbeat/Filesystem
|
||||
@@ -1021,6 +1021,7 @@ is_option "ro" &&
|
||||
case "$FSTYPE" in
|
||||
nfs4|nfs|efs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|cvfs|lustre)
|
||||
CLUSTERSAFE=1 # this is kind of safe too
|
||||
+ systemd_drop_in "99-Filesystem-remote" "After" "remote-fs.target"
|
||||
;;
|
||||
# add here CLUSTERSAFE=0 for all filesystems which are not
|
||||
# cluster aware and which, even if when mounted read-only,
|
||||
@@ -1028,6 +1029,7 @@ nfs4|nfs|efs|smbfs|cifs|none|gfs2|glusterfs|ceph|ocfs2|overlay|overlayfs|tmpfs|c
|
||||
ext4|ext4dev|ext3|reiserfs|reiser4|xfs|jfs)
|
||||
if ocf_is_true "$OCF_RESKEY_force_clones"; then
|
||||
CLUSTERSAFE=2
|
||||
+ systemd_drop_in "99-Filesystem-remote" "After" "remote-fs.target"
|
||||
else
|
||||
CLUSTERSAFE=0 # these are not allowed
|
||||
fi
|
@ -1,27 +0,0 @@
|
||||
From a913eb6a9a8732db7c56d2e0be937dbd0db9dc38 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Fri, 26 May 2023 12:45:13 +0200
|
||||
Subject: [PATCH] Delay: increase stop, status and monitor timeouts to 40s to
|
||||
avoid failing with default values
|
||||
|
||||
---
|
||||
heartbeat/Delay | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Delay b/heartbeat/Delay
|
||||
index 7ba6623f24..bc6c13559b 100755
|
||||
--- a/heartbeat/Delay
|
||||
+++ b/heartbeat/Delay
|
||||
@@ -89,9 +89,9 @@ Defaults to "startdelay" if unspecified.
|
||||
|
||||
<actions>
|
||||
<action name="start" timeout="30s" />
|
||||
-<action name="stop" timeout="30s" />
|
||||
-<action name="status" depth="0" timeout="30s" interval="10s" />
|
||||
-<action name="monitor" depth="0" timeout="30s" interval="10s" />
|
||||
+<action name="stop" timeout="40s" />
|
||||
+<action name="status" depth="0" timeout="40s" interval="10s" />
|
||||
+<action name="monitor" depth="0" timeout="40s" interval="10s" />
|
||||
<action name="meta-data" timeout="5s" />
|
||||
<action name="validate-all" timeout="5s" />
|
||||
</actions>
|
@ -1,30 +0,0 @@
|
||||
From fe8a807dae0398b811d1ee63ebd7202280b2b678 Mon Sep 17 00:00:00 2001
|
||||
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
|
||||
Date: Tue, 18 Jul 2023 14:51:00 +0200
|
||||
Subject: [PATCH] Delay: remove statement about defaulting to "startdelay"
|
||||
value if not specified
|
||||
|
||||
---
|
||||
heartbeat/Delay | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/heartbeat/Delay b/heartbeat/Delay
|
||||
index bc6c13559..5aa8f4608 100755
|
||||
--- a/heartbeat/Delay
|
||||
+++ b/heartbeat/Delay
|
||||
@@ -71,7 +71,6 @@ How long in seconds to delay on start operation.
|
||||
<parameter name="stopdelay" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
How long in seconds to delay on stop operation.
|
||||
-Defaults to "startdelay" if unspecified.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Stop delay</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_stopdelay_default}" />
|
||||
@@ -80,7 +79,6 @@ Defaults to "startdelay" if unspecified.
|
||||
<parameter name="mondelay" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
How long in seconds to delay on monitor operation.
|
||||
-Defaults to "startdelay" if unspecified.
|
||||
</longdesc>
|
||||
<shortdesc lang="en">Monitor delay</shortdesc>
|
||||
<content type="integer" default="${OCF_RESKEY_mondelay_default}" />
|
28
SOURCES/gcp-configure-skip-bundled-lib-checks.patch
Normal file
28
SOURCES/gcp-configure-skip-bundled-lib-checks.patch
Normal file
@ -0,0 +1,28 @@
|
||||
--- ClusterLabs-resource-agents-55a4e2c9/configure.ac 2021-08-19 09:37:57.000000000 +0200
|
||||
+++ ClusterLabs-resource-agents-55a4e2c9/configure.ac.modif 2021-09-02 13:12:26.336044699 +0200
|
||||
@@ -522,25 +522,12 @@
|
||||
AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1)
|
||||
|
||||
BUILD_GCP_PD_MOVE=1
|
||||
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||
- BUILD_GCP_PD_MOVE=0
|
||||
- AC_MSG_WARN("Not building gcp-pd-move")
|
||||
-fi
|
||||
AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1)
|
||||
|
||||
BUILD_GCP_VPC_MOVE_ROUTE=1
|
||||
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || \
|
||||
- test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||
- BUILD_GCP_VPC_MOVE_ROUTE=0
|
||||
- AC_MSG_WARN("Not building gcp-vpc-move-route")
|
||||
-fi
|
||||
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1)
|
||||
|
||||
BUILD_GCP_VPC_MOVE_VIP=1
|
||||
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
|
||||
- BUILD_GCP_VPC_MOVE_VIP=0
|
||||
- AC_MSG_WARN("Not building gcp-vpc-move-vip")
|
||||
-fi
|
||||
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1)
|
||||
|
||||
AC_PATH_PROGS(ROUTE, route)
|
@ -1,12 +0,0 @@
|
||||
diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
|
||||
--- a/heartbeat/aliyun-vpc-move-ip 2021-08-19 09:37:57.000000000 +0200
|
||||
+++ b/heartbeat/aliyun-vpc-move-ip 2021-08-25 13:38:26.786626079 +0200
|
||||
@@ -17,7 +17,7 @@
|
||||
OCF_RESKEY_interface_default="eth0"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_endpoint_default="vpc.aliyuncs.com"
|
||||
-OCF_RESKEY_aliyuncli_default="detect"
|
||||
+OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/bin/aliyuncli"
|
||||
|
||||
|
||||
: ${OCF_RESKEY_address=${OCF_RESKEY_address_default}}
|
@ -1,49 +0,0 @@
|
||||
diff --color -uNr a/heartbeat/awseip b/heartbeat/awseip
|
||||
--- a/heartbeat/awseip 2020-12-03 14:31:17.000000000 +0100
|
||||
+++ b/heartbeat/awseip 2021-02-15 16:47:36.624610378 +0100
|
||||
@@ -43,7 +43,7 @@
|
||||
#
|
||||
# Defaults
|
||||
#
|
||||
-OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
+OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
|
||||
OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
OCF_RESKEY_api_delay_default="3"
|
||||
diff --color -uNr a/heartbeat/awsvip b/heartbeat/awsvip
|
||||
--- a/heartbeat/awsvip 2020-12-03 14:31:17.000000000 +0100
|
||||
+++ b/heartbeat/awsvip 2021-02-15 16:47:48.960632484 +0100
|
||||
@@ -42,7 +42,7 @@
|
||||
#
|
||||
# Defaults
|
||||
#
|
||||
-OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
+OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
|
||||
OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
diff --color -uNr a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
|
||||
--- a/heartbeat/aws-vpc-move-ip 2020-12-03 14:31:17.000000000 +0100
|
||||
+++ b/heartbeat/aws-vpc-move-ip 2021-02-15 16:47:55.484644118 +0100
|
||||
@@ -35,7 +35,7 @@
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
# Defaults
|
||||
-OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
+OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
|
||||
OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
||||
diff --color -uNr a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
|
||||
--- a/heartbeat/aws-vpc-route53.in 2020-12-03 14:31:17.000000000 +0100
|
||||
+++ b/heartbeat/aws-vpc-route53.in 2021-02-15 16:47:59.808651828 +0100
|
||||
@@ -45,7 +45,7 @@
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
# Defaults
|
||||
-OCF_RESKEY_awscli_default="/usr/bin/aws"
|
||||
+OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
|
||||
OCF_RESKEY_auth_type_default="key"
|
||||
OCF_RESKEY_profile_default="default"
|
||||
OCF_RESKEY_region_default=""
|
@ -1,33 +0,0 @@
|
||||
diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
|
||||
--- a/heartbeat/gcp-pd-move.in 2021-08-19 09:37:57.000000000 +0200
|
||||
+++ b/heartbeat/gcp-pd-move.in 2021-08-25 13:50:54.461732967 +0200
|
||||
@@ -32,6 +32,7 @@
|
||||
from ocf import logger
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
|
||||
import googleapiclient.discovery
|
||||
except ImportError:
|
||||
pass
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
|
||||
--- a/heartbeat/gcp-vpc-move-route.in 2021-08-19 09:37:57.000000000 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-route.in 2021-08-25 13:51:17.489797999 +0200
|
||||
@@ -45,6 +45,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
|
||||
import googleapiclient.discovery
|
||||
import pyroute2
|
||||
try:
|
||||
diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
|
||||
--- a/heartbeat/gcp-vpc-move-vip.in 2021-08-19 09:37:57.000000000 +0200
|
||||
+++ b/heartbeat/gcp-vpc-move-vip.in 2021-08-25 13:51:35.012847487 +0200
|
||||
@@ -29,6 +29,7 @@
|
||||
from ocf import *
|
||||
|
||||
try:
|
||||
+ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
|
||||
import googleapiclient.discovery
|
||||
try:
|
||||
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
@ -1,6 +1,6 @@
|
||||
diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||
--- a/doc/man/Makefile.am 2021-08-25 09:51:53.037906134 +0200
|
||||
+++ b/doc/man/Makefile.am 2021-08-25 09:48:44.578408475 +0200
|
||||
--- a/doc/man/Makefile.am 2021-08-25 09:31:14.033615965 +0200
|
||||
+++ b/doc/man/Makefile.am 2021-08-24 17:59:40.679372762 +0200
|
||||
@@ -97,6 +97,8 @@
|
||||
ocf_heartbeat_ManageRAID.7 \
|
||||
ocf_heartbeat_ManageVE.7 \
|
||||
@ -11,8 +11,8 @@ diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
|
||||
ocf_heartbeat_Raid1.7 \
|
||||
ocf_heartbeat_Route.7 \
|
||||
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
--- a/heartbeat/Makefile.am 2021-08-25 09:51:53.038906137 +0200
|
||||
+++ b/heartbeat/Makefile.am 2021-08-25 09:48:44.588408501 +0200
|
||||
--- a/heartbeat/Makefile.am 2021-08-25 09:31:14.034615967 +0200
|
||||
+++ b/heartbeat/Makefile.am 2021-08-24 17:59:40.679372762 +0200
|
||||
@@ -29,6 +29,8 @@
|
||||
|
||||
ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
|
||||
@ -34,39 +34,23 @@ diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
|
||||
ClusterMon \
|
||||
diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
|
||||
--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100
|
||||
+++ b/heartbeat/nova-compute-wait 2021-08-25 09:50:14.626646141 +0200
|
||||
@@ -0,0 +1,345 @@
|
||||
+++ b/heartbeat/nova-compute-wait 2021-08-24 17:59:40.678372759 +0200
|
||||
@@ -0,0 +1,317 @@
|
||||
+#!/bin/sh
|
||||
+# Copyright 2015 Red Hat, Inc.
|
||||
+#
|
||||
+# Description: Manages compute daemons
|
||||
+#
|
||||
+# nova-compute-wait agent manages compute daemons.
|
||||
+# Authors: Andrew Beekhof
|
||||
+#
|
||||
+# Copyright (c) 2015
|
||||
+#
|
||||
+# This program is free software; you can redistribute it and/or modify
|
||||
+# it under the terms of version 2 of the GNU General Public License as
|
||||
+# published by the Free Software Foundation.
|
||||
+#
|
||||
+# This program is distributed in the hope that it would be useful, but
|
||||
+# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
+#
|
||||
+# Further, this software is distributed without any warranty that it is
|
||||
+# free of the rightful claim of any third person regarding infringement
|
||||
+# or the like. Any license provided herein, whether implied or
|
||||
+# otherwise, applies only to this software file. Patent licenses, if
|
||||
+# any, provided herein do not apply to combinations of this program with
|
||||
+# other software, or any other product whatsoever.
|
||||
+#
|
||||
+# You should have received a copy of the GNU General Public License
|
||||
+# along with this program; if not, write the Free Software Foundation,
|
||||
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
+# Support: openstack@lists.openstack.org
|
||||
+# License: Apache Software License (ASL) 2.0
|
||||
+#
|
||||
+
|
||||
+
|
||||
+#######################################################################
|
||||
+# Initialization:
|
||||
+
|
||||
+
|
||||
+###
|
||||
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
@ -91,33 +75,25 @@ diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
|
||||
+<parameters>
|
||||
+
|
||||
+<parameter name="auth_url" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="username" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="password" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="tenant_name" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
@ -130,18 +106,14 @@ diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="endpoint_type" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="no_shared_storage" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Deprecated option not in use
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Deprecated</shortdesc>
|
||||
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
|
||||
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
|
||||
+<content type="boolean" default="0" />
|
||||
+</parameter>
|
||||
+
|
||||
@ -383,8 +355,8 @@ diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
|
||||
+
|
||||
diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100
|
||||
+++ b/heartbeat/NovaEvacuate 2021-08-25 09:50:23.780670326 +0200
|
||||
@@ -0,0 +1,400 @@
|
||||
+++ b/heartbeat/NovaEvacuate 2021-08-24 17:59:40.682372770 +0200
|
||||
@@ -0,0 +1,407 @@
|
||||
+#!/bin/bash
|
||||
+#
|
||||
+# Copyright 2015 Red Hat, Inc.
|
||||
@ -411,7 +383,7 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+#######################################################################
|
||||
+
|
||||
+meta_data() {
|
||||
+ cat <<END
|
||||
+ cat <<END
|
||||
+<?xml version="1.0"?>
|
||||
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
+<resource-agent name="NovaEvacuate" version="1.0">
|
||||
@ -437,7 +409,6 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+Username for connecting to keystone in admin context
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Username</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="password" unique="0" required="1">
|
||||
@ -453,15 +424,23 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+Tenant name for connecting to keystone in admin context.
|
||||
+Note that with Keystone V3 tenant names are only unique within a domain.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Tenant name</shortdesc>
|
||||
+<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="domain" unique="0" required="0">
|
||||
+<parameter name="user_domain" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
|
||||
+User's domain name. Used when authenticating to Keystone.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">DNS domain</shortdesc>
|
||||
+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
+<parameter name="project_domain" unique="0" required="1">
|
||||
+<longdesc lang="en">
|
||||
+Domain name containing project. Used when authenticating to Keystone.
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
|
||||
+<content type="string" default="" />
|
||||
+</parameter>
|
||||
+
|
||||
@ -493,12 +472,7 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+
|
||||
+<parameter name="no_shared_storage" unique="0" required="0">
|
||||
+<longdesc lang="en">
|
||||
+Indicate that nova storage for instances is not shared across compute
|
||||
+nodes. This must match the reality of how nova storage is configured!
|
||||
+Otherwise VMs could end up in error state upon evacuation. When
|
||||
+storage is non-shared, instances on dead hypervisors will be rebuilt
|
||||
+from their original image or volume, so anything on ephemeral storage
|
||||
+will be lost.
|
||||
+Disable shared storage recovery for instances. Use at your own risk!
|
||||
+</longdesc>
|
||||
+<shortdesc lang="en">Disable shared storage recovery for instances</shortdesc>
|
||||
+<content type="boolean" default="0" />
|
||||
@ -539,12 +513,12 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+# don't exit on TERM, to test that lrmd makes sure that we do exit
|
||||
+trap sigterm_handler TERM
|
||||
+sigterm_handler() {
|
||||
+ ocf_log info "They use TERM to bring us down. No such luck."
|
||||
+ return
|
||||
+ ocf_log info "They use TERM to bring us down. No such luck."
|
||||
+ return
|
||||
+}
|
||||
+
|
||||
+evacuate_usage() {
|
||||
+ cat <<END
|
||||
+ cat <<END
|
||||
+usage: $0 {start|stop|monitor|validate-all|meta-data}
|
||||
+
|
||||
+Expects to have a fully populated OCF RA-compliant environment set.
|
||||
@ -563,84 +537,82 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+}
|
||||
+
|
||||
+update_evacuation() {
|
||||
+ attrd_updater -p -n evacuate -Q -N ${1} -U ${2}
|
||||
+ attrd_updater -p -n evacuate -Q -N ${1} -v ${2}
|
||||
+ arc=$?
|
||||
+ if [ ${arc} != 0 ]; then
|
||||
+ ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
|
||||
+ ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
|
||||
+ fi
|
||||
+ return ${arc}
|
||||
+}
|
||||
+
|
||||
+handle_evacuations() {
|
||||
+ while [ $# -gt 0 ]; do
|
||||
+ node=$1
|
||||
+ state=$2
|
||||
+ shift; shift;
|
||||
+ need_evacuate=0
|
||||
+ node=$1
|
||||
+ state=$2
|
||||
+ shift; shift;
|
||||
+ need_evacuate=0
|
||||
+
|
||||
+ case $state in
|
||||
+ "")
|
||||
+ ;;
|
||||
+ no)
|
||||
+ ocf_log debug "$node is either fine or already handled"
|
||||
+ ;;
|
||||
+ yes) need_evacuate=1
|
||||
+ ;;
|
||||
+ *@*)
|
||||
+ where=$(echo $state | awk -F@ '{print $1}')
|
||||
+ when=$(echo $state | awk -F@ '{print $2}')
|
||||
+ now=$(date +%s)
|
||||
+ case $state in
|
||||
+ "")
|
||||
+ ;;
|
||||
+ no)
|
||||
+ ocf_log debug "$node is either fine or already handled"
|
||||
+ ;;
|
||||
+ yes) need_evacuate=1
|
||||
+ ;;
|
||||
+ *@*)
|
||||
+ where=$(echo $state | awk -F@ '{print $1}')
|
||||
+ when=$(echo $state | awk -F@ '{print $2}')
|
||||
+ now=$(date +%s)
|
||||
+
|
||||
+ if [ $(($now - $when)) -gt 60 ]; then
|
||||
+ ocf_log info "Processing partial evacuation of $node by" \
|
||||
+ "$where at $when"
|
||||
+ need_evacuate=1
|
||||
+ else
|
||||
+ # Give some time for any in-flight evacuations to either
|
||||
+ # complete or fail Nova won't react well if there are two
|
||||
+ # overlapping requests
|
||||
+ ocf_log info "Deferring processing partial evacuation of" \
|
||||
+ "$node by $where at $when"
|
||||
+ fi
|
||||
+ ;;
|
||||
+ esac
|
||||
+ if [ $(($now - $when)) -gt 60 ]; then
|
||||
+ ocf_log info "Processing partial evacuation of $node by $where at $when"
|
||||
+ need_evacuate=1
|
||||
+ else
|
||||
+ # Give some time for any in-flight evacuations to either complete or fail
|
||||
+ # Nova won't react well if there are two overlapping requests
|
||||
+ ocf_log info "Deferring processing partial evacuation of $node by $where at $when"
|
||||
+ fi
|
||||
+ ;;
|
||||
+ esac
|
||||
+
|
||||
+ if [ $need_evacuate = 1 ]; then
|
||||
+ fence_agent="fence_compute"
|
||||
+ if [ $need_evacuate = 1 ]; then
|
||||
+ fence_agent="fence_compute"
|
||||
+
|
||||
+ if have_binary fence_evacuate; then
|
||||
+ fence_agent="fence_evacuate"
|
||||
+ fi
|
||||
+ if have_binary fence_evacuate
|
||||
+ then
|
||||
+ fence_agent="fence_evacuate"
|
||||
+ fi
|
||||
+
|
||||
+ if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then
|
||||
+ ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds"
|
||||
+ sleep ${OCF_RESKEY_evacuate_delay}
|
||||
+ fi
|
||||
+
|
||||
+ ocf_log notice "Initiating evacuation of $node with $fence_agent"
|
||||
+ $fence_agent ${fence_options} -o status -n ${node}
|
||||
+ if [ $? = 1 ]; then
|
||||
+ ocf_log info "Nova does not know about ${node}"
|
||||
+ # Dont mark as no because perhaps nova is unavailable right now
|
||||
+ continue
|
||||
+ fi
|
||||
+ ocf_log notice "Initiating evacuation of $node with $fence_agent"
|
||||
+ $fence_agent ${fence_options} -o status -n ${node}
|
||||
+ if [ $? = 1 ]; then
|
||||
+ ocf_log info "Nova does not know about ${node}"
|
||||
+ # Dont mark as no because perhaps nova is unavailable right now
|
||||
+ continue
|
||||
+ fi
|
||||
+
|
||||
+ update_evacuation ${node} "$(uname -n)@$(date +%s)"
|
||||
+ if [ $? != 0 ]; then
|
||||
+ return $OCF_SUCCESS
|
||||
+ fi
|
||||
+ update_evacuation ${node} "$(uname -n)@$(date +%s)"
|
||||
+ if [ $? != 0 ]; then
|
||||
+ return $OCF_SUCCESS
|
||||
+ fi
|
||||
+
|
||||
+ $fence_agent ${fence_options} -o off -n $node
|
||||
+ rc=$?
|
||||
+ $fence_agent ${fence_options} -o off -n $node
|
||||
+ rc=$?
|
||||
+
|
||||
+ if [ $rc = 0 ]; then
|
||||
+ update_evacuation ${node} no
|
||||
+ ocf_log notice "Completed evacuation of $node"
|
||||
+ else
|
||||
+ ocf_log warn "Evacuation of $node failed: $rc"
|
||||
+ update_evacuation ${node} yes
|
||||
+ fi
|
||||
+ fi
|
||||
+ if [ $rc = 0 ]; then
|
||||
+ update_evacuation ${node} no
|
||||
+ ocf_log notice "Completed evacuation of $node"
|
||||
+ else
|
||||
+ ocf_log warn "Evacuation of $node failed: $rc"
|
||||
+ update_evacuation ${node} yes
|
||||
+ fi
|
||||
+ fi
|
||||
+ done
|
||||
+
|
||||
+ return $OCF_SUCCESS
|
||||
@ -648,7 +620,7 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+
|
||||
+evacuate_monitor() {
|
||||
+ if [ ! -f "$statefile" ]; then
|
||||
+ return $OCF_NOT_RUNNING
|
||||
+ return $OCF_NOT_RUNNING
|
||||
+ fi
|
||||
+
|
||||
+ handle_evacuations $(
|
||||
@ -665,49 +637,54 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+ rc=$OCF_SUCCESS
|
||||
+ fence_options=""
|
||||
+
|
||||
+
|
||||
+ if ! have_binary fence_evacuate; then
|
||||
+ check_binary fence_compute
|
||||
+ fi
|
||||
+
|
||||
+ # Is the state directory writable?
|
||||
+ # Is the state directory writable?
|
||||
+ state_dir=$(dirname $statefile)
|
||||
+ touch "$state_dir/$$"
|
||||
+ if [ $? != 0 ]; then
|
||||
+ ocf_exit_reason "Invalid state directory: $state_dir"
|
||||
+ return $OCF_ERR_ARGS
|
||||
+ ocf_exit_reason "Invalid state directory: $state_dir"
|
||||
+ return $OCF_ERR_ARGS
|
||||
+ fi
|
||||
+ rm -f "$state_dir/$$"
|
||||
+
|
||||
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
|
||||
+ ocf_exit_reason "auth_url not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ocf_exit_reason "auth_url not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ fi
|
||||
+
|
||||
+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
|
||||
+
|
||||
+ if [ -z "${OCF_RESKEY_username}" ]; then
|
||||
+ ocf_exit_reason "username not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ocf_exit_reason "username not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ fi
|
||||
+
|
||||
+ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
|
||||
+
|
||||
+ if [ -z "${OCF_RESKEY_password}" ]; then
|
||||
+ ocf_exit_reason "password not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ocf_exit_reason "password not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ fi
|
||||
+
|
||||
+ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
|
||||
+
|
||||
+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
|
||||
+ ocf_exit_reason "tenant_name not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ocf_exit_reason "tenant_name not configured"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ fi
|
||||
+
|
||||
+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_domain}" ]; then
|
||||
+ fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
|
||||
+ if [ -n "${OCF_RESKEY_user_domain}" ]; then
|
||||
+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_project_domain}" ]; then
|
||||
+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_region_name}" ]; then
|
||||
@ -722,9 +699,9 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
|
||||
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
|
||||
+ fence_options="${fence_options} --no-shared-storage"
|
||||
+ fi
|
||||
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
|
||||
+ fence_options="${fence_options} --no-shared-storage"
|
||||
+ fi
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_verbose}" ]; then
|
||||
@ -734,20 +711,22 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+ fi
|
||||
+
|
||||
+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
|
||||
+ case ${OCF_RESKEY_endpoint_type} in
|
||||
+ adminURL|publicURL|internalURL)
|
||||
+ ;;
|
||||
+ *)
|
||||
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
|
||||
+ "not valid. Use adminURL or publicURL or internalURL"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ;;
|
||||
+ esac
|
||||
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
|
||||
+ case ${OCF_RESKEY_endpoint_type} in
|
||||
+ adminURL|publicURL|internalURL) ;;
|
||||
+ *)
|
||||
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type} not valid. Use adminURL or publicURL or internalURL"
|
||||
+ exit $OCF_ERR_CONFIGURED
|
||||
+ ;;
|
||||
+ esac
|
||||
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
|
||||
+ fi
|
||||
+
|
||||
+ if [ -z "${OCF_RESKEY_evacuate_delay}" ]; then
|
||||
+ OCF_RESKEY_evacuate_delay=0
|
||||
+ fi
|
||||
+
|
||||
+ if [ $rc != $OCF_SUCCESS ]; then
|
||||
+ exit $rc
|
||||
+ exit $rc
|
||||
+ fi
|
||||
+ return $rc
|
||||
+}
|
||||
@ -756,31 +735,31 @@ diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
|
||||
+
|
||||
+case $__OCF_ACTION in
|
||||
+ start)
|
||||
+ evacuate_validate
|
||||
+ evacuate_start
|
||||
+ ;;
|
||||
+ evacuate_validate
|
||||
+ evacuate_start
|
||||
+ ;;
|
||||
+ stop)
|
||||
+ evacuate_stop
|
||||
+ ;;
|
||||
+ evacuate_stop
|
||||
+ ;;
|
||||
+ monitor)
|
||||
+ evacuate_validate
|
||||
+ evacuate_monitor
|
||||
+ ;;
|
||||
+ evacuate_validate
|
||||
+ evacuate_monitor
|
||||
+ ;;
|
||||
+ meta-data)
|
||||
+ meta_data
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ meta_data
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ usage|help)
|
||||
+ evacuate_usage
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ evacuate_usage
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ validate-all)
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ exit $OCF_SUCCESS
|
||||
+ ;;
|
||||
+ *)
|
||||
+ evacuate_usage
|
||||
+ exit $OCF_ERR_UNIMPLEMENTED
|
||||
+ ;;
|
||||
+ evacuate_usage
|
||||
+ exit $OCF_ERR_UNIMPLEMENTED
|
||||
+ ;;
|
||||
+esac
|
||||
+rc=$?
|
||||
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
|
||||
|
592
SOURCES/python3-syntax-fixes.patch
Normal file
592
SOURCES/python3-syntax-fixes.patch
Normal file
@ -0,0 +1,592 @@
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-10-08 12:36:31.868765636 +0200
|
||||
@@ -52,8 +52,8 @@
|
||||
if not filename == None:
|
||||
self.exportInstanceToFile(result,filename)
|
||||
else:
|
||||
- print 'Filename is needed'
|
||||
- except Exception,e:
|
||||
+ print('Filename is needed')
|
||||
+ except Exception as e:
|
||||
print(e)
|
||||
def _optimizeResult(self,result):
|
||||
keys = result.keys()
|
||||
@@ -81,9 +81,9 @@
|
||||
fp = open(fileName,'w')
|
||||
try :
|
||||
fp.write(json.dumps(result,indent=4))
|
||||
- print "success"
|
||||
+ print("success")
|
||||
except IOError:
|
||||
- print "Error: can\'t find file or read data"
|
||||
+ print("Error: can\'t find file or read data")
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-10-08 12:36:53.882358851 +0200
|
||||
@@ -16,7 +16,7 @@
|
||||
if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
|
||||
filename = keyValues['--filename'][0]
|
||||
else:
|
||||
- print "A profile is needed! please use \'--filename\' and add the profile name."
|
||||
+ print("A profile is needed! please use \'--filename\' and add the profile name.")
|
||||
return filename
|
||||
|
||||
def getInstanceCount(self,keyValues):
|
||||
@@ -25,7 +25,7 @@
|
||||
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
|
||||
count = keyValues['--instancecount'][0]
|
||||
else:
|
||||
- print "InstanceCount should be a positive number! The default value(1) will be used!"
|
||||
+ print("InstanceCount should be a positive number! The default value(1) will be used!")
|
||||
return int(count)
|
||||
|
||||
def getSubOperations(self,cmd,operation):
|
||||
@@ -65,8 +65,8 @@
|
||||
_newkeyValues["RegionId"] = newkeyValues["RegionId"]
|
||||
self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest)
|
||||
else:
|
||||
- print "InstanceId is need!"
|
||||
- except Exception,e:
|
||||
+ print("InstanceId is need!")
|
||||
+ except Exception as e:
|
||||
print(e)
|
||||
|
||||
def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False):
|
||||
@@ -81,7 +81,7 @@
|
||||
response.display_response("error", result, "json")
|
||||
else:
|
||||
response.display_response(extraOperation, result, "json")
|
||||
- except Exception,e:
|
||||
+ except Exception as e:
|
||||
print(e)
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
'''
|
||||
if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
|
||||
instanceId = data['InstanceId']
|
||||
- except Exception,e:
|
||||
+ except Exception as e:
|
||||
pass
|
||||
finally:
|
||||
return instanceId
|
||||
@@ -156,5 +156,5 @@
|
||||
if __name__ == "__main__":
|
||||
handler = EcsImportHandler()
|
||||
handler.getKVFromJson('ttt')
|
||||
- print handler.getKVFromJson('ttt')
|
||||
+ print(handler.getKVFromJson('ttt'))
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200
|
||||
@@ -77,8 +77,8 @@
|
||||
if not filename == None:
|
||||
self.exportInstanceToFile(result,filename)
|
||||
else:
|
||||
- print 'Filename is needed'
|
||||
- except Exception,e:
|
||||
+ print('Filename is needed')
|
||||
+ except Exception as e:
|
||||
print(e)
|
||||
|
||||
def exportInstanceToFile(self, result, filename):
|
||||
@@ -96,9 +96,9 @@
|
||||
fp = open(fileName,'w')
|
||||
try :
|
||||
fp.write(json.dumps(result,indent=4))
|
||||
- print "success"
|
||||
+ print("success")
|
||||
except IOError:
|
||||
- print "Error: can\'t find file or read data"
|
||||
+ print("Error: can\'t find file or read data")
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200
|
||||
@@ -26,7 +26,7 @@
|
||||
count = keyValues[import_count][0]
|
||||
else:
|
||||
pass
|
||||
- # print "InstanceCount should be a positive number! The default value(1) will be used!"
|
||||
+ # print("InstanceCount should be a positive number! The default value(1) will be used!")
|
||||
return int(count), "InstanceCount is "+str(count)+" created."
|
||||
|
||||
def getSubOperations(self,cmd,operation):
|
||||
@@ -46,7 +46,7 @@
|
||||
if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues):
|
||||
newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
|
||||
newkeyValues["ClientToken"] = [self.random_str()]
|
||||
- # print newkeyValues.keys()
|
||||
+ # print(newkeyValues.keys())
|
||||
# return
|
||||
# self._setAttr(cmdInstance, newkeyValues) # set all key values in instance
|
||||
# self.apiHandler.changeEndPoint(cmdInstance, newkeyValues)
|
||||
@@ -58,7 +58,7 @@
|
||||
response.display_response("error", result, "json")
|
||||
else:
|
||||
response.display_response(item, result, "json")
|
||||
- except Exception,e:
|
||||
+ except Exception as e:
|
||||
print(e)
|
||||
|
||||
def getKVFromJson(self,filename):
|
||||
@@ -77,7 +77,7 @@
|
||||
fp = open(fileName,'r')
|
||||
data=json.loads(fp.read())
|
||||
keys = data.keys()
|
||||
- # print keys, type(data['Items']['DBInstanceAttribute'][0])
|
||||
+ # print(keys, type(data['Items']['DBInstanceAttribute'][0]))
|
||||
# instanceAttribute = data['Items']['DBInstanceAttribute'][0]
|
||||
items = data['Items']['DBInstanceAttribute'][0]
|
||||
keys = items.keys()
|
||||
@@ -130,7 +130,7 @@
|
||||
if __name__ == "__main__":
|
||||
handler = RdsImportDBInstanceHandler()
|
||||
# handler.getKVFromJson('ttt')
|
||||
- # print handler.getKVFromJson('ttt')
|
||||
- print handler.random_str()
|
||||
+ # print(handler.getKVFromJson('ttt'))
|
||||
+ print(handler.random_str())
|
||||
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200
|
||||
@@ -24,9 +24,9 @@
|
||||
_value = keyValues[ProfileCmd.name][0] # use the first value
|
||||
self.extensionCliHandler.setUserProfile(_value)
|
||||
else:
|
||||
- print "Do your forget profile name? please use \'--name\' and add the profile name."
|
||||
+ print("Do your forget profile name? please use \'--name\' and add the profile name.")
|
||||
else:
|
||||
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?"
|
||||
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?")
|
||||
|
||||
def addProfileCmd(self, cmd, keyValues):
|
||||
userKey = ''
|
||||
@@ -52,12 +52,12 @@
|
||||
finally:
|
||||
f.close()
|
||||
else:
|
||||
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?"
|
||||
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
handler = ProfileHandler()
|
||||
handler.handleProfileCmd("useprofile", {'--name':["profile444"]})
|
||||
- print handler.extensionCliHandler.getUserProfile()
|
||||
+ print(handler.extensionCliHandler.getUserProfile())
|
||||
handler.addProfileCmd("addProfile", {})
|
||||
- handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
|
||||
\ No newline at end of file
|
||||
+ handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200
|
||||
@@ -24,14 +24,14 @@
|
||||
self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
|
||||
|
||||
def showUsage(self):
|
||||
- print "usage: aliyuncli <command> <operation> [options and parameters]"
|
||||
+ print("usage: aliyuncli <command> <operation> [options and parameters]")
|
||||
|
||||
def showExample(self):
|
||||
- print "show example"
|
||||
+ print("show example")
|
||||
|
||||
def showCmdError(self, cmd):
|
||||
self.showUsage()
|
||||
- print "<aliyuncli> the valid command as follows:\n"
|
||||
+ print("<aliyuncli> the valid command as follows:\n")
|
||||
cmds = self.openApiDataHandler.getApiCmds()
|
||||
self.printAsFormat(cmds)
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
error.printInFormat("Wrong version", "The sdk version is not exit.")
|
||||
return None
|
||||
self.showUsage()
|
||||
- print "["+cmd+"]","valid operations as follows:\n"
|
||||
+ print("["+cmd+"]","valid operations as follows:\n")
|
||||
operations = self.openApiDataHandler.getApiOperations(cmd, version)
|
||||
extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd)
|
||||
operations.update(extensions)
|
||||
@@ -56,8 +56,8 @@
|
||||
self.printAsFormat(operations)
|
||||
|
||||
def showParameterError(self, cmd, operation, parameterlist):
|
||||
- print 'usage: aliyuncli <command> <operation> [options and parameters]'
|
||||
- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n'
|
||||
+ print('usage: aliyuncli <command> <operation> [options and parameters]')
|
||||
+ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n')
|
||||
self.printAsFormat(parameterlist)
|
||||
pass
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
tmpList.append(item)
|
||||
count = count+1
|
||||
if len(tmpList) == 2:
|
||||
- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')
|
||||
+ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10'))
|
||||
tmpList = list()
|
||||
if len(tmpList) == 1 and count == len(mlist):
|
||||
- print tmpList[0]
|
||||
\ No newline at end of file
|
||||
+ print(tmpList[0])
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200
|
||||
@@ -91,7 +91,7 @@
|
||||
keyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
|
||||
#check necessaryArgs as:accesskeyid accesskeysecret regionId
|
||||
if not self.handler.hasNecessaryArgs(keyValues):
|
||||
- print 'accesskeyid/accesskeysecret/regionId is absence'
|
||||
+ print('accesskeyid/accesskeysecret/regionId is absence')
|
||||
return
|
||||
result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest)
|
||||
if result is None:
|
||||
@@ -102,7 +102,7 @@
|
||||
else:
|
||||
response.display_response(operation, result, outPutFormat,keyValues)
|
||||
else:
|
||||
- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com'
|
||||
+ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com')
|
||||
elif self.handler.isAvailableExtensionOperation(cmd, operation):
|
||||
if self.args.__len__() >= 3 and self.args[2] == 'help':
|
||||
import commandConfigure
|
||||
@@ -125,7 +125,7 @@
|
||||
def showInstanceAttribute(self, cmd, operation, classname):
|
||||
if self.args.__len__() >= 3 and self.args[2] == "help":
|
||||
self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname))
|
||||
- #print self.completer._help_to_show_instance_attribute(cmdInstance)
|
||||
+ #print(self.completer._help_to_show_instance_attribute(cmdInstance))
|
||||
return True
|
||||
return False
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200
|
||||
@@ -141,7 +141,7 @@
|
||||
_key = keyValues[keystr][0]
|
||||
if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
|
||||
_secret = keyValues[secretstr][0]
|
||||
- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret
|
||||
+ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
|
||||
return _key, _secret
|
||||
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200
|
||||
@@ -161,12 +161,12 @@
|
||||
|
||||
if __name__ == "__main__":
|
||||
upgradeHandler = aliyunCliUpgradeHandler()
|
||||
- # print upgradeHandler.getLatestTimeFromServer()
|
||||
+ # print(upgradeHandler.getLatestTimeFromServer())
|
||||
# flag, url = upgradeHandler.isNewVersionReady()
|
||||
# if flag:
|
||||
- # print url
|
||||
+ # print(url)
|
||||
# else:
|
||||
- # print "current version is latest one"
|
||||
- # print "final test:"
|
||||
- print upgradeHandler.checkForUpgrade()
|
||||
- print upgradeHandler.handleUserChoice("N")
|
||||
+ # print("current version is latest one")
|
||||
+ # print("final test:")
|
||||
+ print(upgradeHandler.checkForUpgrade())
|
||||
+ print(upgradeHandler.handleUserChoice("N"))
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200
|
||||
@@ -127,35 +127,35 @@
|
||||
|
||||
# this api will show help page when user input aliyuncli help(-h or --help)
|
||||
def showAliyunCliHelp(self):
|
||||
- print color.bold+"ALIYUNCLI()"+color.end
|
||||
- print color.bold+"\nNAME"+color.end
|
||||
- print "\taliyuncli -"
|
||||
- print color.bold+"\nDESCRIPTION"+color.end
|
||||
- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. "
|
||||
- print color.bold+"\nSYNOPSIS"+color.end
|
||||
- print "\taliyuncli <command> <operation> [options and parameters]"
|
||||
- print "\n\taliyuncli has supported command completion now. The detail you can check our site."
|
||||
- print color.bold+"OPTIONS"+color.end
|
||||
- print color.bold+"\tconfigure"+color.end
|
||||
- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)"
|
||||
- print color.bold+"\n\t--output"+color.end+" (string)"
|
||||
- print "\n\tThe formatting style for command output."
|
||||
- print "\n\to json"
|
||||
- print "\n\to text"
|
||||
- print "\n\to table"
|
||||
+ print(color.bold+"ALIYUNCLI()"+color.end)
|
||||
+ print(color.bold+"\nNAME"+color.end)
|
||||
+ print("\taliyuncli -")
|
||||
+ print(color.bold+"\nDESCRIPTION"+color.end)
|
||||
+ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ")
|
||||
+ print(color.bold+"\nSYNOPSIS"+color.end)
|
||||
+ print("\taliyuncli <command> <operation> [options and parameters]")
|
||||
+ print("\n\taliyuncli has supported command completion now. The detail you can check our site.")
|
||||
+ print(color.bold+"OPTIONS"+color.end)
|
||||
+ print(color.bold+"\tconfigure"+color.end)
|
||||
+ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)")
|
||||
+ print(color.bold+"\n\t--output"+color.end+" (string)")
|
||||
+ print("\n\tThe formatting style for command output.")
|
||||
+ print("\n\to json")
|
||||
+ print("\n\to text")
|
||||
+ print("\n\to table")
|
||||
|
||||
- print color.bold+"\n\t--secure"+color.end
|
||||
- print "\n\tMaking secure requests(HTTPS) to service"
|
||||
+ print(color.bold+"\n\t--secure"+color.end)
|
||||
+ print("\n\tMaking secure requests(HTTPS) to service")
|
||||
|
||||
- print color.bold+"\nAVAILABLE SERVICES"+color.end
|
||||
- print "\n\to ecs"
|
||||
- print "\n\to ess"
|
||||
- print "\n\to mts"
|
||||
- print "\n\to rds"
|
||||
- print "\n\to slb"
|
||||
+ print(color.bold+"\nAVAILABLE SERVICES"+color.end)
|
||||
+ print("\n\to ecs")
|
||||
+ print("\n\to ess")
|
||||
+ print("\n\to mts")
|
||||
+ print("\n\to rds")
|
||||
+ print("\n\to slb")
|
||||
|
||||
def showCurrentVersion(self):
|
||||
- print self._version
|
||||
+ print(self._version)
|
||||
|
||||
def findConfigureFilePath(self):
|
||||
homePath = ""
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200
|
||||
@@ -39,9 +39,9 @@
|
||||
|
||||
|
||||
def oss_notice():
|
||||
- print "OSS operation in aliyuncli is not supported."
|
||||
- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation."
|
||||
- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n"
|
||||
+ print("OSS operation in aliyuncli is not supported.")
|
||||
+ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.")
|
||||
+ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n")
|
||||
|
||||
|
||||
try:
|
||||
@@ -391,22 +391,22 @@
|
||||
return jsonobj
|
||||
|
||||
except ImportError as e:
|
||||
- print module, 'is not exist!'
|
||||
+ print(module, 'is not exist!')
|
||||
sys.exit(1)
|
||||
|
||||
except ServerException as e:
|
||||
error = cliError.error()
|
||||
error.printInFormat(e.get_error_code(), e.get_error_msg())
|
||||
- print "Detail of Server Exception:\n"
|
||||
- print str(e)
|
||||
+ print("Detail of Server Exception:\n")
|
||||
+ print(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
except ClientException as e:
|
||||
- # print e.get_error_msg()
|
||||
+ # print(e.get_error_msg())
|
||||
error = cliError.error()
|
||||
error.printInFormat(e.get_error_code(), e.get_error_msg())
|
||||
- print "Detail of Client Exception:\n"
|
||||
- print str(e)
|
||||
+ print("Detail of Client Exception:\n")
|
||||
+ print(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
def getSetFuncs(self,classname):
|
||||
@@ -549,6 +549,6 @@
|
||||
|
||||
if __name__ == '__main__':
|
||||
handler = aliyunOpenApiDataHandler()
|
||||
- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')
|
||||
- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances')
|
||||
- print "###############",handler.getExtensionOperationsFromCmd('ecs')
|
||||
+ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance'))
|
||||
+ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances'))
|
||||
+ print("###############",handler.getExtensionOperationsFromCmd('ecs'))
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200
|
||||
@@ -44,7 +44,7 @@
|
||||
filename=self.fileName
|
||||
self.writeCmdVersionToFile(cmd,version,filename)
|
||||
else:
|
||||
- print "A argument is needed! please use \'--version\' and add the sdk version."
|
||||
+ print("A argument is needed! please use \'--version\' and add the sdk version.")
|
||||
return
|
||||
def showVersions(self,cmd,operation,stream=None):
|
||||
configureVersion='(not configure)'
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200
|
||||
@@ -55,7 +55,7 @@
|
||||
# _mlist = self.rds.extensionOptions[self.rds.exportDBInstance]
|
||||
self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance])
|
||||
if operation.lower() == self.rds.importDBInstance.lower():
|
||||
- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance])
|
||||
+ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance]))
|
||||
# parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance])
|
||||
self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance])
|
||||
|
||||
@@ -89,8 +89,8 @@
|
||||
importInstance:['count','filename']}
|
||||
|
||||
if __name__ == '__main__':
|
||||
- # print type(rds.extensionOperations)
|
||||
- # print type(rds.extensionOptions)
|
||||
- # print rds.extensionOptions['ll']
|
||||
+ # print(type(rds.extensionOperations))
|
||||
+ # print(type(rds.extensionOptions))
|
||||
+ # print(rds.extensionOptions['ll'])
|
||||
configure = commandConfigure()
|
||||
- print configure.showExtensionOperationHelp("ecs", "ExportInstance")
|
||||
+ print(configure.showExtensionOperationHelp("ecs", "ExportInstance"))
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200
|
||||
@@ -577,7 +577,7 @@
|
||||
operation = operations[i].strip()
|
||||
self._getKeyFromSection(profilename,operation)
|
||||
else:
|
||||
- print 'The correct usage:aliyuncli configure get key --profile profilename'
|
||||
+ print('The correct usage:aliyuncli configure get key --profile profilename')
|
||||
return
|
||||
|
||||
def _getKeyFromSection(self,profilename,key):
|
||||
@@ -591,7 +591,7 @@
|
||||
elif key in _WRITE_TO_CONFIG_FILE :
|
||||
self._getKeyFromFile(config_filename,sectionName,key)
|
||||
else:
|
||||
- print key,'=','None'
|
||||
+ print(key,'=','None')
|
||||
def _getKeyFromFile(self,filename,section,key):
|
||||
if os.path.isfile(filename):
|
||||
with open(filename, 'r') as f:
|
||||
@@ -600,9 +600,9 @@
|
||||
start = self._configWriter.hasSectionName(section,contents)[1]
|
||||
end = self._configWriter._getSectionEnd(start,contents)
|
||||
value = self._configWriter._getValueInSlice(start,end,key,contents)
|
||||
- print key,'=',value
|
||||
+ print(key,'=',value)
|
||||
else:
|
||||
- print key,'=None'
|
||||
+ print(key,'=None')
|
||||
|
||||
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
def handleEndPoint(cmd,operation,keyValues):
|
||||
if not hasNecessaryArgs(keyValues):
|
||||
- print 'RegionId/EndPoint is absence'
|
||||
+ print('RegionId/EndPoint is absence')
|
||||
return
|
||||
if cmd is not None:
|
||||
cmd = cmd.capitalize()
|
||||
@@ -25,7 +25,7 @@
|
||||
from aliyunsdkcore.profile.region_provider import modify_point
|
||||
modify_point(cmd,regionId,endPoint)
|
||||
except Exception as e:
|
||||
- print e
|
||||
+ print(e)
|
||||
pass
|
||||
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200
|
||||
@@ -111,14 +111,14 @@
|
||||
if os.path.isfile(cfgfile):
|
||||
ans = raw_input('File existed. Do you wish to overwrite it?(y/n)')
|
||||
if ans.lower() != 'y':
|
||||
- print 'Answer is No. Quit now'
|
||||
+ print('Answer is No. Quit now')
|
||||
return
|
||||
with open(cfgfile, 'w+') as f:
|
||||
config.write(f)
|
||||
- print 'Your configuration is saved to %s.' % cfgfile
|
||||
+ print('Your configuration is saved to %s.' % cfgfile)
|
||||
|
||||
def cmd_help(args):
|
||||
- print HELP
|
||||
+ print(HELP)
|
||||
|
||||
def add_config(parser):
|
||||
parser.add_argument('--host', type=str, help='service host')
|
||||
@@ -161,7 +161,7 @@
|
||||
return CMD_LIST.keys()
|
||||
def handleOas(pars=None):
|
||||
if pars is None:
|
||||
- print HELP
|
||||
+ print(HELP)
|
||||
sys.exit(0)
|
||||
parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter)
|
||||
|
||||
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
|
||||
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100
|
||||
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200
|
||||
@@ -61,7 +61,7 @@
|
||||
data = f.read()
|
||||
return data
|
||||
except (OSError, IOError) as e:
|
||||
- print e
|
||||
+ print(e)
|
||||
def _getParamFromUrl(prefix,value,mode):
|
||||
|
||||
req = urllib2.Request(value)
|
||||
@@ -74,7 +74,7 @@
|
||||
errorMsg='Get the wrong content'
|
||||
errorClass.printInFormat(response.getcode(), errorMsg)
|
||||
except Exception as e:
|
||||
- print e
|
||||
+ print(e)
|
||||
|
||||
PrefixMap = {'file://': _getParamFromFile,
|
||||
'fileb://': _getParamFromFile
|
||||
@@ -86,4 +86,4 @@
|
||||
'fileb://': {'mode': 'rb'},
|
||||
#'http://': {},
|
||||
#'https://': {}
|
||||
- }
|
||||
\ No newline at end of file
|
||||
+ }
|
||||
diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py
|
||||
--- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100
|
||||
+++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200
|
||||
@@ -16,10 +16,10 @@
|
||||
3a4
|
||||
"""
|
||||
colorama.init()
|
||||
- print "aaa"
|
||||
- print "aaa"
|
||||
- print "aaa"
|
||||
- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4"
|
||||
+ print("aaa")
|
||||
+ print("aaa")
|
||||
+ print("aaa")
|
||||
+ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
File diff suppressed because it is too large
Load Diff
@ -1,36 +0,0 @@
|
||||
From c3c1f1a3005f6a6d3d03d6bf0f0ac7605838146f Mon Sep 17 00:00:00 2001
|
||||
From: Hideo Yamauchi <renayama19661014@ybb.ne.jp>
|
||||
Date: Tue, 26 Sep 2023 14:02:39 +0900
|
||||
Subject: [PATCH] Low: storage-mon: Remove unnecessary code and fix typos.
|
||||
|
||||
---
|
||||
tools/storage_mon.c | 9 ++-------
|
||||
1 file changed, 2 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/tools/storage_mon.c b/tools/storage_mon.c
|
||||
index 1231570c8..1aae29e58 100644
|
||||
--- a/tools/storage_mon.c
|
||||
+++ b/tools/storage_mon.c
|
||||
@@ -318,12 +318,7 @@ static int32_t sigchld_handler(int32_t sig, void *data)
|
||||
daemon_check_first_all_devices = TRUE;
|
||||
}
|
||||
}
|
||||
-#if 0
|
||||
- if (shutting_down == FALSE) {
|
||||
- finished_count++;
|
||||
- test_forks[index] = 0;
|
||||
- }
|
||||
-#endif
|
||||
+
|
||||
finished_count++;
|
||||
test_forks[index] = 0;
|
||||
|
||||
@@ -521,7 +516,7 @@ static int test_device_main(gpointer data)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
- /* Rrun the child process timeout watch timer. */
|
||||
+ /* Run the child process timeout watch timer. */
|
||||
qb_loop_timer_add(storage_mon_poll_handle, QB_LOOP_MED, timeout * QB_TIME_NS_IN_SEC, NULL, child_timeout_handler, &expire_handle);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user