Auto sync2gitlab import of resource-agents-4.9.0-18.el8.src.rpm

This commit is contained in:
James Antill 2022-05-26 14:09:42 -04:00
parent f82d1864be
commit 9ba88241b8
34 changed files with 11353 additions and 1 deletions

9
.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
/ClusterLabs-resource-agents-55a4e2c9.tar.gz
/aliyun-cli-2.1.10.tar.gz
/aliyun-python-sdk-core-2.13.1.tar.gz
/aliyun-python-sdk-ecs-4.9.3.tar.gz
/aliyun-python-sdk-vpc-3.0.2.tar.gz
/colorama-0.3.3.tar.gz
/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
/pycryptodome-3.6.4.tar.gz
/pyroute2-0.4.13.tar.gz

View File

@ -0,0 +1,25 @@
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 1980-01-01 09:00:00.000000000 +0100
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py 2019-04-04 11:59:47.592768577 +0200
@@ -900,6 +900,9 @@
return """\
For detailed information on this command and its flags, run:
{command_path} --help
+
+WARNING: {command_path} is only supported for "{command_path} init" and for use
+with the agents in resource-agents.
""".format(command_path=' '.join(command.GetPath()))
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py
--- a/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 1980-01-01 09:00:00.000000000 +0100
+++ b/bundled/gcp/google-cloud-sdk/lib/googlecloudsdk/gcloud_main.py 2019-04-04 12:00:23.991142694 +0200
@@ -84,7 +84,7 @@
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
loader = cli.CLILoader(
- name='gcloud',
+ name='gcloud-ra',
command_root_directory=os.path.join(pkg_root, 'surface'),
allow_non_existing_modules=True,
version_func=VersionFunc,

23
7-gcp-bundled.patch Normal file
View File

@ -0,0 +1,23 @@
diff -uNr a/heartbeat/gcp-vpc-move-ip.in b/heartbeat/gcp-vpc-move-ip.in
--- a/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:20:26.164739897 +0200
+++ b/heartbeat/gcp-vpc-move-ip.in 2019-04-05 09:21:01.331139742 +0200
@@ -36,7 +36,7 @@
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
# Defaults
-OCF_RESKEY_gcloud_default="/usr/bin/gcloud"
+OCF_RESKEY_gcloud_default="/usr/bin/gcloud-ra"
OCF_RESKEY_configuration_default="default"
OCF_RESKEY_vpc_network_default="default"
OCF_RESKEY_interface_default="eth0"
diff -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
--- a/heartbeat/gcp-vpc-move-route.in 2019-04-05 09:20:26.180739624 +0200
+++ b/heartbeat/gcp-vpc-move-route.in 2019-04-05 09:22:28.648649593 +0200
@@ -45,6 +45,7 @@
from ocf import *
try:
+ sys.path.insert(0, '/usr/lib/resource-agents/bundled/gcp')
import googleapiclient.discovery
import pyroute2
except ImportError:

View File

@ -0,0 +1,129 @@
diff -uNr a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py
--- a/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 1980-01-01 09:00:00.000000000 +0100
+++ b/bundled/gcp/google-cloud-sdk/lib/third_party/oauth2client/_pure_python_crypt.py 2019-04-04 11:56:00.292677044 +0200
@@ -19,8 +19,14 @@
certificates.
"""
+from pyasn1.codec.der import decoder
from pyasn1_modules import pem
-import rsa
+from pyasn1_modules.rfc2459 import Certificate
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
+from cryptography.hazmat.primitives import serialization, hashes
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography import x509
+from cryptography.hazmat.backends import default_backend
import six
from oauth2client import _helpers
@@ -40,7 +46,7 @@
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
-_PKCS8_SPEC = None
+_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
@@ -67,7 +73,8 @@
"""
def __init__(self, pubkey):
- self._pubkey = pubkey
+ self._pubkey = serialization.load_pem_public_key(pubkey,
+ backend=default_backend())
def verify(self, message, signature):
"""Verifies a message against a signature.
@@ -84,8 +91,9 @@
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
- return rsa.pkcs1.verify(message, signature, self._pubkey)
- except (ValueError, rsa.pkcs1.VerificationError):
+ return self._pubkey.verify(signature, message, padding.PKCS1v15(),
+ hashes.SHA256())
+ except (ValueError, TypeError, InvalidSignature):
return False
@classmethod
@@ -109,19 +117,18 @@
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
- from pyasn1.codec.der import decoder
- from pyasn1_modules import rfc2459
-
- der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
- asn1_cert, remaining = decoder.decode(der, asn1Spec=rfc2459.Certificate())
+ der = x509.load_pem_x509_certificate(pem_data, default_backend())
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
- pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
+ pubkey = serialization.load_der_public_key(decoded_key,
+ backend=default_backend())
else:
- pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
+ pubkey = serialization.load_pem_public_key(decoded_key,
+ backend=default_backend())
return cls(pubkey)
@@ -134,6 +141,8 @@
def __init__(self, pkey):
self._key = pkey
+ self._pubkey = serialization.load_pem_private_key(pkey,
+ backend=default_backend())
def sign(self, message):
"""Signs a message.
@@ -145,7 +154,7 @@
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
- return rsa.pkcs1.sign(message, self._key, 'SHA-256')
+ return self._key.sign(message, padding.PKCS1v15(), hashes.SHA256())
@classmethod
def from_string(cls, key, password='notasecret'):
@@ -163,27 +172,24 @@
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
- global _PKCS8_SPEC
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
- pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
- format='DER')
- elif marker_id == 1:
- from pyasn1.codec.der import decoder
- from pyasn1_modules import rfc5208
+ pkey = serialization.load_der_private_key(
+ key_bytes, password=None,
+ backend=default_backend())
- if _PKCS8_SPEC is None:
- _PKCS8_SPEC = rfc5208.PrivateKeyInfo()
+ elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
- pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
- format='DER')
+ pkey = serialization.load_der_private_key(
+ pkey_info.asOctets(), password=None,
+ backend=default_backend())
else:
raise ValueError('No key could be detected.')

1
EMPTY
View File

@ -1 +0,0 @@

View File

@ -0,0 +1,15 @@
--- a/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:45:38.432860930 +0200
+++ b/heartbeat/aliyun-vpc-move-ip 2020-06-09 13:51:06.341211557 +0200
@@ -35,10 +35,10 @@
USAGE="usage: $0 {start|stop|status|meta-data}";
if [ "${OCF_RESKEY_aliyuncli}" = "detect" ]; then
- OCF_RESKEY_aliyuncli="$(which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
+ OCF_RESKEY_aliyuncli="$(which aliyuncli-ra 2> /dev/null || which aliyuncli 2> /dev/null || which aliyun 2> /dev/null)"
fi
-if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
+if [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli-ra' ] || [ "${OCF_RESKEY_aliyuncli##*/}" = 'aliyuncli' ]; then
OUTPUT="text"
EXECUTING='{ print $3 }'
IFS_=" "

View File

@ -0,0 +1,398 @@
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2019-02-19 14:40:39.656330971 +0100
@@ -13,7 +13,7 @@
def getFileName(self,keyValues):
filename = None
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
filename = keyValues['--filename'][0]
else:
return filename, "A file name is needed! please use \'--filename\' and add the file name."
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2019-02-19 14:41:48.927128430 +0100
@@ -13,7 +13,7 @@
def getFileName(self,keyValues):
filename = None
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
filename = keyValues['--filename'][0]
else:
print("A profile is needed! please use \'--filename\' and add the profile name.")
@@ -21,7 +21,7 @@
def getInstanceCount(self,keyValues):
count = 1
- if keyValues.has_key('--instancecount') and len(keyValues['--instancecount']) > 0:
+ if '--instancecount' in keyValues and len(keyValues['--instancecount']) > 0:
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
count = keyValues['--instancecount'][0]
else:
@@ -113,7 +113,7 @@
def isAllocatePublicIpAddress(self,keyValues):
_publicIp = False
- if keyValues.has_key('--allocatepublicip') and len(keyValues['--allocatepublicip']) > 0:
+ if '--allocatepublicip' in keyValues and len(keyValues['--allocatepublicip']) > 0:
if keyValues['--allocatepublicip'][0] == "yes":
_publicIp = True
return _publicIp
@@ -125,7 +125,7 @@
'''
data = json.loads(jsonbody)
'''
- if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
+ if 'InstanceId' in data and len(data['InstanceId']) > 0:
instanceId = data['InstanceId']
except Exception as e:
pass
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 12:08:17.331785393 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2019-02-19 14:42:11.772731833 +0100
@@ -38,7 +38,7 @@
def getFileName(self,keyValues):
filename = None
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
filename = keyValues['--filename'][0]
else:
return filename, "A file name is needed! please use \'--filename\' and add the file name."
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 12:08:17.331785393 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2019-02-19 14:39:09.247900469 +0100
@@ -13,7 +13,7 @@
def getFileName(self,keyValues):
filename = None
- if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
+ if '--filename' in keyValues and len(keyValues['--filename']) > 0:
filename = keyValues['--filename'][0]
else:
return filename, "A filename is needed! please use \'--filename\' and add the file name."
@@ -21,7 +21,7 @@
def getInstanceCount(self,keyValues):
count = 1
import_count = "--count"
- if keyValues.has_key(import_count) and len(keyValues[import_count]) > 0:
+ if import_count in keyValues and len(keyValues[import_count]) > 0:
if keyValues[import_count][0].isdigit() and int(keyValues[import_count][0]) >= 0:
count = keyValues[import_count][0]
else:
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userConfigHandler.py 2019-02-19 11:01:46.116653274 +0100
@@ -17,37 +17,37 @@
def getConfigHandlerOptions(self):
return [ConfigCmd.name]
-
+
def showConfig(self):
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
config = dict()
configContent = dict()
- credentialsContent = dict ()
- if os.path.exists(_configurePath):
+ credentialsContent = dict ()
+ if os.path.exists(_configurePath):
for line in open(_configurePath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
- configContent[list[0]] = list[1]
- else:
- pass
- config['configure'] = configContent
- if os.path.exists(_credentialsPath):
- for line in open(_credentialsPath):
+ configContent[list[0]] = list[1]
+ else:
+ pass
+ config['configure'] = configContent
+ if os.path.exists(_credentialsPath):
+ for line in open(_credentialsPath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
- credentialsContent[list[0]] = list[1]
- else:
- pass
- config ['credentials'] = credentialsContent
- response.display_response("showConfigure",config,'table')
+ credentialsContent[list[0]] = list[1]
+ else:
+ pass
+ config ['credentials'] = credentialsContent
+ response.display_response("showConfigure",config,'table')
def importConfig():
pass
def exportConfig():
pass
-
+
if __name__ == "__main__":
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 12:08:17.332785376 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2019-02-19 14:40:12.267806439 +0100
@@ -20,7 +20,7 @@
def handleProfileCmd(self, cmd, keyValues):
if cmd.lower() == ProfileCmd.useProfile.lower(): # confirm command is right
#check --name is valid
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
_value = keyValues[ProfileCmd.name][0] # use the first value
self.extensionCliHandler.setUserProfile(_value)
else:
@@ -34,7 +34,7 @@
newProfileName = ''
if cmd.lower() == ProfileCmd.addProfile.lower(): # confirm command is right
#check --name is valid
- if keyValues.has_key(ProfileCmd.name) and len(keyValues[ProfileCmd.name]) > 0:
+ if ProfileCmd.name in keyValues and len(keyValues[ProfileCmd.name]) > 0:
_value = keyValues[ProfileCmd.name][0] # check the first value
# only input key and secret
newProfileName = _value
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 12:08:17.332785376 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2019-02-19 14:35:32.009660989 +0100
@@ -137,9 +137,9 @@
values.append(self.args[index])
index = index + 1
keyValues[currentValue] = values
- if keyValues.has_key(keystr) and keyValues[keystr].__len__() > 0:
+ if keystr in keyValues and keyValues[keystr].__len__() > 0:
_key = keyValues[keystr][0]
- if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
+ if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
_secret = keyValues[secretstr][0]
#print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
return _key, _secret
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyuncli.py 2019-02-19 13:35:35.738680413 +0100
@@ -19,8 +19,9 @@
'''
import sys
-reload(sys)
-sys.setdefaultencoding('utf-8')
+if sys.version_info[0] < 3:
+ reload(sys)
+ sys.setdefaultencoding('utf-8')
__author__ = 'xixi.xxx'
import aliyunCliMain
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 12:08:17.332785376 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2019-02-19 11:15:19.920089641 +0100
@@ -18,7 +18,7 @@
'''
import aliyunCliConfiugre
-import urllib2
+import urllib3
import re
import os
import platform
@@ -151,7 +151,7 @@
# this functino will get the latest version
def _getLatestTimeFromServer(self):
try:
- f = urllib2.urlopen(self.configure.server_url,data=None,timeout=5)
+ f = urllib3.urlopen(self.configure.server_url,data=None,timeout=5)
s = f.read()
return s
except Exception as e:
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 12:08:17.332785376 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2019-02-19 14:37:28.221649497 +0100
@@ -26,7 +26,7 @@
import aliyunSdkConfigure
import json
import cliError
-import urllib2
+import urllib3
import handleEndPoint
from __init__ import __version__
@@ -259,7 +259,7 @@
def changeEndPoint(self, classname, keyValues):
endpoint = "Endpoint"
try:
- if keyValues.has_key(endpoint) and keyValues[endpoint].__len__() > 0:
+ if endpoint in keyValues and keyValues[endpoint].__len__() > 0:
classname._RestApi__domain = keyValues[endpoint][0]
except Exception as e:
pass
@@ -444,10 +444,10 @@
def getTempVersion(self,keyValues):
key='--version'
- if keyValues is not None and keyValues.has_key(key):
+ if keyValues is not None and key in keyValues:
return keyValues.get(key)
key = 'version'
- if keyValues is not None and keyValues.has_key(key):
+ if keyValues is not None and key in keyValues:
return keyValues.get(key)
def getVersionFromFile(self,cmd):
@@ -513,7 +513,7 @@
self.checkForServer(response,cmd,operation)
def getRequestId(self,response):
try:
- if response.has_key('RequestId') and len(response['RequestId']) > 0:
+ if 'RequestId' in response and len(response['RequestId']) > 0:
requestId = response['RequestId']
return requestId
except Exception:
@@ -532,7 +532,7 @@
ua = ""
url = configure.server_url + "?requesId=" + requestId + "&ak=" + ak +"&ua="+ua+"&cmd="+cmd+"&operation="+operation
try:
- f = urllib2.urlopen(url,data=None,timeout=5)
+ f = urllib3.urlopen(url,data=None,timeout=5)
s = f.read()
return s
except Exception :
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 12:08:17.333785359 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2019-02-19 14:38:04.032029661 +0100
@@ -39,7 +39,7 @@
def sdkConfigure(self,cmd,operation):
keyValues = self.parser._getKeyValues()
- if keyValues.has_key('--version') and len(keyValues['--version']) > 0:
+ if '--version' in keyValues and len(keyValues['--version']) > 0:
version=keyValues['--version'][0]
filename=self.fileName
self.writeCmdVersionToFile(cmd,version,filename)
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 12:08:17.333785359 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2019-02-19 11:12:58.670708353 +0100
@@ -23,6 +23,8 @@
import aliyunCliParser
import platform
+if sys.version_info[0] > 2:
+ raw_input = input
OSS_CREDS_FILENAME = "%s/.aliyuncli/osscredentials" % os.path.expanduser('~')
OSS_CONFIG_SECTION = 'OSSCredentials'
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 12:08:17.333785359 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2019-02-19 11:14:58.926181598 +0100
@@ -19,7 +19,7 @@
#/usr/bin/env python
#!-*- coding:utf-8 -*-
import os
-import urllib2
+import urllib3
import cliError
@@ -64,9 +64,9 @@
print(e)
def _getParamFromUrl(prefix,value,mode):
- req = urllib2.Request(value)
+ req = urllib3.Request(value)
try:
- response=urllib2.urlopen(req)
+ response=urllib3.urlopen(req)
if response.getcode() == 200:
return response.read()
else:
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/six.py b/bundled/aliyun/aliyun-cli/aliyuncli/six.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/six.py 2019-02-19 11:14:40.505262286 +0100
@@ -340,8 +340,8 @@
_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("URLError", "urllib3", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib3", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
@@ -359,34 +359,34 @@
_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("urlopen", "urllib3", "urllib.request"),
+ MovedAttribute("install_opener", "urllib3", "urllib.request"),
+ MovedAttribute("build_opener", "urllib3", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("Request", "urllib3", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib3", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib3", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib3", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib3", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib3", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib3", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib3", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib3", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib3", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
diff -uNr a/bundled/aliyun/aliyun-cli/setup.py b/bundled/aliyun/aliyun-cli/setup.py
--- a/bundled/aliyun/aliyun-cli/setup.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/setup.py 2019-02-19 13:33:29.069848394 +0100
@@ -24,7 +24,7 @@
install_requires = [
'colorama>=0.2.5,<=0.3.3',
- 'jmespath>=0.7.0,<=0.7.1',
+ 'jmespath>=0.7.0',
]
def main():
setup(

View File

@ -0,0 +1,14 @@
--- a/bundled/gcp/google-cloud-sdk/bin/gcloud 1980-01-01 09:00:00.000000000 +0100
+++ b/bundled/gcp/google-cloud-sdk/bin/gcloud 2021-10-14 11:30:17.726138166 +0200
@@ -128,6 +128,11 @@
fi
}
+if [ -z "$CLOUDSDK_PYTHON" ]; then
+ CLOUDSDK_PYTHON="/usr/libexec/platform-python"
+ CLOUDSDK_PYTHON_SITEPACKAGES=1
+fi
+
setup_cloudsdk_python
# $PYTHONHOME can interfere with gcloud. Users should use

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,770 @@
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
--- a/heartbeat/Makefile.am 2022-03-15 16:14:29.355209012 +0100
+++ b/heartbeat/Makefile.am 2022-03-15 16:18:35.917048467 +0100
@@ -217,6 +217,7 @@
lvm-clvm.sh \
lvm-plain.sh \
lvm-tag.sh \
+ openstack-common.sh \
ora-common.sh \
mysql-common.sh \
nfsserver-redhat.sh \
diff --color -uNr a/heartbeat/openstack-cinder-volume b/heartbeat/openstack-cinder-volume
--- a/heartbeat/openstack-cinder-volume 2022-03-15 16:14:29.370209063 +0100
+++ b/heartbeat/openstack-cinder-volume 2022-03-15 16:17:36.231840008 +0100
@@ -34,11 +34,11 @@
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
+
# Defaults
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
OCF_RESKEY_volume_local_check_default="true"
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
: ${OCF_RESKEY_volume_local_check=${OCF_RESKEY_volume_local_check_default}}
#######################################################################
@@ -68,14 +68,11 @@
<shortdesc lang="en">Attach a cinder volume</shortdesc>
<parameters>
-<parameter name="openstackcli">
-<longdesc lang="en">
-Path to command line tools for openstack.
-</longdesc>
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
-</parameter>
+END
+common_meta_data
+
+cat <<END
<parameter name="volume_local_check">
<longdesc lang="en">
This option allows the cluster to monitor the cinder volume presence without
@@ -85,28 +82,19 @@
<content type="boolean" default="${OCF_RESKEY_volume_local_check_default}" />
</parameter>
-<parameter name="openrc" required="1">
-<longdesc lang="en">
-Valid Openstack credentials as openrc file from api_access/openrc.
-</longdesc>
-<shortdesc lang="en">openrc file</shortdesc>
-<content type="string" />
-</parameter>
-
<parameter name="volume_id" required="1">
<longdesc lang="en">
-Cinder volume identifier to use to attach the bloc storage.
+Cinder volume identifier to use to attach the block storage.
</longdesc>
<shortdesc lang="en">Volume ID</shortdesc>
<content type="string" />
</parameter>
-
</parameters>
<actions>
<action name="start" timeout="180s" />
<action name="stop" timeout="180s" />
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
<action name="validate-all" timeout="5s" />
<action name="meta-data" timeout="5s" />
</actions>
@@ -127,17 +115,7 @@
osvol_validate() {
check_binary "$OCF_RESKEY_openstackcli"
- if [ -z "$OCF_RESKEY_openrc" ]; then
- ocf_exit_reason "openrc parameter not set"
- return $OCF_ERR_CONFIGURED
- fi
-
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
- ocf_exit_reason "openrc file not found"
- return $OCF_ERR_CONFIGURED
- fi
-
- . $OCF_RESKEY_openrc
+ get_config
if ! $OCF_RESKEY_openstackcli volume list|grep -q $OCF_RESKEY_volume_id ; then
ocf_exit_reason "volume-id $OCF_RESKEY_volume_id not found"
diff --color -uNr a/heartbeat/openstack-common.sh b/heartbeat/openstack-common.sh
--- a/heartbeat/openstack-common.sh 1970-01-01 01:00:00.000000000 +0100
+++ b/heartbeat/openstack-common.sh 2022-03-15 16:17:36.232840011 +0100
@@ -0,0 +1,147 @@
+OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
+OCF_RESKEY_insecure_default="false"
+
+: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
+: ${OCF_RESKEY_insecure=${OCF_RESKEY_insecure_default}}
+
+if ocf_is_true "${OCF_RESKEY_insecure}"; then
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --insecure"
+fi
+
+common_meta_data() {
+ cat <<END
+
+<parameter name="cloud" required="0">
+<longdesc lang="en">
+Openstack cloud (from ~/.config/openstack/clouds.yaml or /etc/openstack/clouds.yaml).
+</longdesc>
+<shortdesc lang="en">Cloud from clouds.yaml</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="openrc" required="0">
+<longdesc lang="en">
+Openstack credentials as openrc file from api_access/openrc.
+</longdesc>
+<shortdesc lang="en">openrc file</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="auth_url" required="0">
+<longdesc lang="en">
+Keystone Auth URL
+</longdesc>
+<shortdesc lang="en">Keystone Auth URL</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="username" required="0">
+<longdesc lang="en">
+Username.
+</longdesc>
+<shortdesc lang="en">Username</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="password" required="0">
+<longdesc lang="en">
+Password.
+</longdesc>
+<shortdesc lang="en">Password</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="project_name" required="0">
+<longdesc lang="en">
+Keystone Project.
+</longdesc>
+<shortdesc lang="en">Keystone Project</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="user_domain_name" required="0">
+<longdesc lang="en">
+Keystone User Domain Name.
+</longdesc>
+<shortdesc lang="en">Keystone User Domain Name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="project_domain_name" required="0">
+<longdesc lang="en">
+Keystone Project Domain Name.
+</longdesc>
+<shortdesc lang="en">Keystone Project Domain Name</shortdesc>
+<content type="string" />
+</parameter>
+
+<parameter name="openstackcli">
+<longdesc lang="en">
+Path to command line tools for openstack.
+</longdesc>
+<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
+<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
+</parameter>
+
+<parameter name="insecure">
+<longdesc lang="en">
+Allow insecure connections
+</longdesc>
+<shortdesc lang="en">Allow insecure connections</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_insecure_default}" />
+</parameter>
+END
+}
+
+get_config() {
+ if [ -n "$OCF_RESKEY_cloud" ]; then
+ TILDE=$(echo ~)
+ clouds_yaml="$TILDE/.config/openstack/clouds.yaml"
+ if [ ! -f "$clouds_yaml" ]; then
+ clouds_yaml="/etc/openstack/clouds.yaml"
+ fi
+ if [ ! -f "$clouds_yaml" ]; then
+ ocf_exit_reason "~/.config/openstack/clouds.yaml and /etc/openstack/clouds.yaml does not exist"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-cloud $OCF_RESKEY_cloud"
+ elif [ -n "$OCF_RESKEY_openrc" ]; then
+ if [ ! -f "$OCF_RESKEY_openrc" ]; then
+ ocf_exit_reason "$OCF_RESKEY_openrc does not exist"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ . $OCF_RESKEY_openrc
+ else
+ if [ -z "$OCF_RESKEY_auth_url" ]; then
+ ocf_exit_reason "auth_url not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ if [ -z "$OCF_RESKEY_username" ]; then
+ ocf_exit_reason "username not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ if [ -z "$OCF_RESKEY_password" ]; then
+ ocf_exit_reason "password not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ if [ -z "$OCF_RESKEY_project_name" ]; then
+ ocf_exit_reason "project_name not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ if [ -z "$OCF_RESKEY_user_domain_name" ]; then
+ ocf_exit_reason "user_domain_name not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+ if [ -z "$OCF_RESKEY_project_domain_name" ]; then
+ ocf_exit_reason " not set"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-auth-url $OCF_RESKEY_auth_url"
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-username $OCF_RESKEY_username"
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-password $OCF_RESKEY_password"
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-name $OCF_RESKEY_project_name"
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-user-domain-name $OCF_RESKEY_user_domain_name"
+ OCF_RESKEY_openstackcli="${OCF_RESKEY_openstackcli} --os-project-domain-name $OCF_RESKEY_project_domain_name"
+ fi
+}
diff --color -uNr a/heartbeat/openstack-floating-ip b/heartbeat/openstack-floating-ip
--- a/heartbeat/openstack-floating-ip 2022-03-15 16:14:29.370209063 +0100
+++ b/heartbeat/openstack-floating-ip 2022-03-15 16:17:36.233840014 +0100
@@ -34,10 +34,9 @@
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
-# Defaults
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
+# Defaults
#######################################################################
@@ -67,22 +66,11 @@
<shortdesc lang="en">Move a floating IP</shortdesc>
<parameters>
-<parameter name="openstackcli">
-<longdesc lang="en">
-Path to command line tools for openstack.
-</longdesc>
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
-</parameter>
+END
-<parameter name="openrc" required="1">
-<longdesc lang="en">
-Valid Openstack credentials as openrc file from api_access/openrc.
-</longdesc>
-<shortdesc lang="en">openrc file</shortdesc>
-<content type="string" />
-</parameter>
+common_meta_data
+cat <<END
<parameter name="ip_id" required="1">
<longdesc lang="en">
Floating IP Identifier.
@@ -104,7 +92,7 @@
<actions>
<action name="start" timeout="180s" />
<action name="stop" timeout="180s" />
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
<action name="validate-all" timeout="5s" />
<action name="meta-data" timeout="5s" />
</actions>
@@ -115,17 +103,7 @@
osflip_validate() {
check_binary "$OCF_RESKEY_openstackcli"
- if [ -z "$OCF_RESKEY_openrc" ]; then
- ocf_exit_reason "openrc parameter not set"
- return $OCF_ERR_CONFIGURED
- fi
-
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
- ocf_exit_reason "openrc file not found"
- return $OCF_ERR_CONFIGURED
- fi
-
- . $OCF_RESKEY_openrc
+ get_config
if ! $OCF_RESKEY_openstackcli floating ip list|grep -q $OCF_RESKEY_ip_id ; then
ocf_exit_reason "ip-id $OCF_RESKEY_ip_id not found"
diff --color -uNr a/heartbeat/openstack-info b/heartbeat/openstack-info
--- a/heartbeat/openstack-info 1970-01-01 01:00:00.000000000 +0100
+++ b/heartbeat/openstack-info 2022-03-15 16:17:36.234840018 +0100
@@ -0,0 +1,270 @@
+#!/bin/sh
+#
+#
+# OCF resource agent to set attributes from Openstack instance details.
+# It records (in the CIB) various attributes of a node
+#
+# Copyright (c) 2018 Mathieu Grzybek
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
+
+# Defaults
+OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}"
+OCF_RESKEY_delay_default="0"
+OCF_RESKEY_clone_default="0"
+OCF_RESKEY_curlcli_default="/usr/bin/curl"
+OCF_RESKEY_pythoncli_default="/usr/bin/python"
+
+: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}}
+: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}}
+: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}}
+: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}}
+: ${OCF_RESKEY_clone=${OCF_RESKEY_clone_default}}
+
+#######################################################################
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="openstack-info" version="1.0">
+<version>1.0</version>
+
+<longdesc lang="en">
+OCF resource agent to set attributes from Openstack instance details.
+It records (in the CIB) various attributes of a node.
+Sample output:
+ openstack_az : nova
+ openstack_flavor : c1.small
+ openstack_id : 60ac4343-5828-49b1-8aac-7c69b1417f31
+ openstack_ports : 7960d889-9750-4160-bf41-c69a41ad72d9:96530d18-57a3-4718-af32-30f2a74c22a2,b0e55a06-bd75-468d-8baa-22cfeb65799f:a55ae917-8016-4b1e-8ffa-04311b9dc7d6
+
+The layout of openstack_ports is a comma-separated list of tuples "subnet_id:port_id".
+</longdesc>
+<shortdesc lang="en">Records various node attributes in the CIB</shortdesc>
+
+<parameters>
+END
+
+common_meta_data
+
+ cat <<END
+<parameter name="pidfile" unique="0">
+<longdesc lang="en">PID file</longdesc>
+<shortdesc lang="en">PID file</shortdesc>
+<content type="string" default="${OCF_RESKEY_pidfile_default}" />
+</parameter>
+
+<parameter name="delay" unique="0">
+<longdesc lang="en">Interval to allow values to stabilize</longdesc>
+<shortdesc lang="en">Dampening Delay</shortdesc>
+<content type="string" default="${OCF_RESKEY_delay_default}" />
+</parameter>
+
+<parameter name="curlcli">
+<longdesc lang="en">
+Path to command line cURL binary.
+</longdesc>
+<shortdesc lang="en">Path to cURL binary</shortdesc>
+<content type="string" default="${OCF_RESKEY_curlcli_default}" />
+</parameter>
+
+<parameter name="pythoncli">
+<longdesc lang="en">
+Path to command line Python interpreter.
+</longdesc>
+<shortdesc lang="en">Path to Python interpreter</shortdesc>
+<content type="string" default="${OCF_RESKEY_pythoncli_default}" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="180s" />
+<action name="stop" timeout="180s" />
+<action name="monitor" timeout="30s" interval="60s"/>
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="20s" />
+</actions>
+</resource-agent>
+END
+}
+
+#######################################################################
+
+OSInfoStats() {
+ local result
+ local value
+ local node
+ local node_id
+
+ get_config
+
+ # Nova data: server ID
+ node_id=$($OCF_RESKEY_curlcli \
+ -s http://169.254.169.254/openstack/latest/meta_data.json |
+ $OCF_RESKEY_pythoncli -m json.tool |
+ grep -P '\"uuid\": \".*\",$' |
+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
+
+ if [ $? -ne 0 ] ; then
+ ocf_exit_reason "Cannot find server ID"
+ exit $OCF_ERR_GENERIC
+ fi
+
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_id -v "$node_id"
+
+ # Nova data: flavor
+ value=$($OCF_RESKEY_openstackcli server show \
+ --format value \
+ --column flavor \
+ $node_id)
+
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_flavor -v "$value"
+
+ # Nova data: availability zone
+ value=$($OCF_RESKEY_openstackcli server show \
+ --format value \
+ --column OS-EXT-AZ:availability_zone \
+ $node_id)
+
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_az -v "$value"
+
+ # Network data: ports
+ value=""
+ for port_id in $($OCF_RESKEY_openstackcli port list \
+ --format value \
+ --column id \
+ --server $node_id); do
+ subnet_id=$($OCF_RESKEY_openstackcli port show \
+ --format json \
+ --column fixed_ips \
+ ${port_id} | grep -P '\"subnet_id\": \".*\",$' |
+ grep -P -o '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}')
+ value+="${subnet_id}:${port_id},"
+ done
+ value=$(echo ${value} | sed -e 's/,$//g')
+
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_ports -v "$value"
+
+ if [ ! -z "$OS_REGION_NAME" ] ; then
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_region -v "$OS_REGION_NAME"
+ fi
+
+ if [ ! -z "$OS_TENANT_ID" ] ; then
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_id -v "$OS_TENANT_ID"
+
+ if [ ! -z "$OS_TENANT_NAME" ] ; then
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_tenant_name -v "$OS_TENANT_NAME"
+ fi
+ else
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_id -v "$OS_PROJECT_ID"
+
+ if [ ! -z "$OS_PROJECT_NAME" ] ; then
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -n openstack_project_name -v "$OS_PROJECT_NAME"
+ fi
+ fi
+
+}
+
+OSInfo_usage() {
+ cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+OSInfo_start() {
+ echo $OCF_RESKEY_clone > $OCF_RESKEY_pidfile
+ OSInfoStats
+ exit $OCF_SUCCESS
+}
+
+OSInfo_stop() {
+ rm -f $OCF_RESKEY_pidfile
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_id
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_flavor
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_az
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_ports
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_region
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_id
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_tenant_name
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_id
+ ${HA_SBIN_DIR}/attrd_updater ${OCF_RESKEY_delay} -D -n openstack_project_name
+ exit $OCF_SUCCESS
+}
+
+OSInfo_monitor() {
+ if [ -f "$OCF_RESKEY_pidfile" ] ; then
+ OSInfoStats
+ exit $OCF_RUNNING
+ fi
+ exit $OCF_NOT_RUNNING
+}
+
+OSInfo_validate() {
+ check_binary "$OCF_RESKEY_curlcli"
+ check_binary "$OCF_RESKEY_openstackcli"
+ check_binary "$OCF_RESKEY_pythoncli"
+
+ return $OCF_SUCCESS
+}
+
+if [ $# -ne 1 ]; then
+ OSInfo_usage
+ exit $OCF_ERR_ARGS
+fi
+
+if [ x != x${OCF_RESKEY_delay} ]; then
+ OCF_RESKEY_delay="-d ${OCF_RESKEY_delay}"
+fi
+
+case $__OCF_ACTION in
+meta-data) meta_data
+ exit $OCF_SUCCESS
+ ;;
+start) OSInfo_validate || exit $?
+ OSInfo_start
+ ;;
+stop) OSInfo_stop
+ ;;
+monitor) OSInfo_monitor
+ ;;
+validate-all) OSInfo_validate
+ ;;
+usage|help) OSInfo_usage
+ exit $OCF_SUCCESS
+ ;;
+*) OSInfo_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+
+exit $?
diff --color -uNr a/heartbeat/openstack-info.in b/heartbeat/openstack-info.in
--- a/heartbeat/openstack-info.in 2022-03-15 16:14:29.370209063 +0100
+++ b/heartbeat/openstack-info.in 2022-03-15 16:17:36.234840018 +0100
@@ -32,16 +32,16 @@
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
+
# Defaults
OCF_RESKEY_pidfile_default="$HA_RSCTMP/OSInfo-${OCF_RESOURCE_HOSTNAME}"
OCF_RESKEY_delay_default="0"
OCF_RESKEY_clone_default="0"
OCF_RESKEY_curlcli_default="/usr/bin/curl"
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
OCF_RESKEY_pythoncli_default="@PYTHON@"
: ${OCF_RESKEY_curlcli=${OCF_RESKEY_curlcli_default}}
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
: ${OCF_RESKEY_pythoncli=${OCF_RESKEY_pythoncli_default}}
: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}}
: ${OCF_RESKEY_delay=${OCF_RESKEY_delay_default}}
@@ -70,25 +70,23 @@
<shortdesc lang="en">Records various node attributes in the CIB</shortdesc>
<parameters>
+END
+
+common_meta_data
+
+ cat <<END
<parameter name="pidfile" unique="0">
<longdesc lang="en">PID file</longdesc>
<shortdesc lang="en">PID file</shortdesc>
<content type="string" default="${OCF_RESKEY_pidfile_default}" />
</parameter>
+
<parameter name="delay" unique="0">
<longdesc lang="en">Interval to allow values to stabilize</longdesc>
<shortdesc lang="en">Dampening Delay</shortdesc>
<content type="string" default="${OCF_RESKEY_delay_default}" />
</parameter>
-<parameter name="openrc" required="1">
-<longdesc lang="en">
-Valid Openstack credentials as openrc file from api_access/openrc.
-</longdesc>
-<shortdesc lang="en">openrc file</shortdesc>
-<content type="string" />
-</parameter>
-
<parameter name="curlcli">
<longdesc lang="en">
Path to command line cURL binary.
@@ -97,14 +95,6 @@
<content type="string" default="${OCF_RESKEY_curlcli_default}" />
</parameter>
-<parameter name="openstackcli">
-<longdesc lang="en">
-Path to command line tools for openstack.
-</longdesc>
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
-</parameter>
-
<parameter name="pythoncli">
<longdesc lang="en">
Path to command line Python interpreter.
@@ -116,9 +106,9 @@
</parameters>
<actions>
-<action name="start" timeout="20s" />
-<action name="stop" timeout="20s" />
-<action name="monitor" timeout="20s" interval="60s"/>
+<action name="start" timeout="180s" />
+<action name="stop" timeout="180s" />
+<action name="monitor" timeout="180s" interval="60s"/>
<action name="meta-data" timeout="5s" />
<action name="validate-all" timeout="20s" />
</actions>
@@ -134,7 +124,7 @@
local node
local node_id
- . $OCF_RESKEY_openrc
+ get_config
# Nova data: server ID
node_id=$($OCF_RESKEY_curlcli \
@@ -244,16 +234,6 @@
check_binary "$OCF_RESKEY_openstackcli"
check_binary "$OCF_RESKEY_pythoncli"
- if [ -z "$OCF_RESKEY_openrc" ]; then
- ocf_exit_reason "openrc parameter not set"
- return $OCF_ERR_CONFIGURED
- fi
-
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
- ocf_exit_reason "openrc file not found"
- return $OCF_ERR_CONFIGURED
- fi
-
return $OCF_SUCCESS
}
diff --color -uNr a/heartbeat/openstack-virtual-ip b/heartbeat/openstack-virtual-ip
--- a/heartbeat/openstack-virtual-ip 2022-03-15 16:14:29.370209063 +0100
+++ b/heartbeat/openstack-virtual-ip 2022-03-15 16:17:36.235840021 +0100
@@ -34,10 +34,9 @@
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
-# Defaults
-OCF_RESKEY_openstackcli_default="/usr/bin/openstack"
+. ${OCF_FUNCTIONS_DIR}/openstack-common.sh
-: ${OCF_RESKEY_openstackcli=${OCF_RESKEY_openstackcli_default}}
+# Defaults
#######################################################################
@@ -68,22 +67,11 @@
<shortdesc lang="en">Move a virtual IP</shortdesc>
<parameters>
-<parameter name="openstackcli">
-<longdesc lang="en">
-Path to command line tools for openstack.
-</longdesc>
-<shortdesc lang="en">Path to Openstack CLI tool</shortdesc>
-<content type="string" default="${OCF_RESKEY_openstackcli_default}" />
-</parameter>
+END
-<parameter name="openrc" required="1">
-<longdesc lang="en">
-Valid Openstack credentials as openrc file from api_access/openrc.
-</longdesc>
-<shortdesc lang="en">openrc file</shortdesc>
-<content type="string" />
-</parameter>
+common_meta_data
+cat <<END
<parameter name="ip" required="1">
<longdesc lang="en">
Virtual IP Address.
@@ -105,7 +93,7 @@
<actions>
<action name="start" timeout="180s" />
<action name="stop" timeout="180s" />
-<action name="monitor" depth="0" timeout="30s" interval="60s" />
+<action name="monitor" depth="0" timeout="180s" interval="60s" />
<action name="validate-all" timeout="5s" />
<action name="meta-data" timeout="5s" />
</actions>
@@ -128,17 +116,7 @@
osvip_validate() {
check_binary "$OCF_RESKEY_openstackcli"
- if [ -z "$OCF_RESKEY_openrc" ]; then
- ocf_exit_reason "openrc parameter not set"
- return $OCF_ERR_CONFIGURED
- fi
-
- if [ ! -f "$OCF_RESKEY_openrc" ] ; then
- ocf_exit_reason "openrc file not found"
- return $OCF_ERR_CONFIGURED
- fi
-
- . $OCF_RESKEY_openrc
+ get_config
${HA_SBIN_DIR}/attrd_updater --query -n openstack_ports -N $(crm_node -n) > /dev/null 2>&1
if [ $? -ne 0 ] ; then

View File

@ -0,0 +1,52 @@
From f91804ff4772e3ab41f46e28d370f57898700333 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Thu, 10 Dec 2020 08:19:21 +0100
Subject: [PATCH] fixes #1625: infinite loop in SML lexer
Reason was a lookahead-only pattern which was included in the state
where the lookahead was transitioning to.
---
pygments/lexers/ml.py | 12 ++++++------
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
index 8ca8ce3eb..f2ac367c5 100644
--- a/pygments/lexers/ml.py
+++ b/pygments/lexers/ml.py
@@ -142,7 +142,7 @@ def id_callback(self, match):
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
- (r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
+ (r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
@@ -315,15 +315,14 @@ def id_callback(self, match):
'ename': [
include('whitespace'),
- (r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
+ (r'(and\b)(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
- (r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
+ (r'(and\b)(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
+ (r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
- include('breakout'),
- include('core'),
- (r'\S+', Error),
+ default('#pop'),
],
'datcon': [
@@ -445,6 +444,7 @@ class OcamlLexer(RegexLexer):
],
}
+
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).

View File

@ -0,0 +1,138 @@
From 2e7e8c4a7b318f4032493773732754e418279a14 Mon Sep 17 00:00:00 2001
From: Georg Brandl <georg@python.org>
Date: Mon, 11 Jan 2021 09:46:34 +0100
Subject: [PATCH] Fix several exponential/cubic complexity regexes found by Ben
Caller/Doyensec
---
pygments/lexers/archetype.py | 2 +-
pygments/lexers/factor.py | 4 ++--
pygments/lexers/jvm.py | 1 -
pygments/lexers/matlab.py | 6 +++---
pygments/lexers/objective.py | 4 ++--
pygments/lexers/templates.py | 2 +-
pygments/lexers/varnish.py | 2 +-
8 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
index 65046613d..26f5ea8c9 100644
--- a/pygments/lexers/archetype.py
+++ b/pygments/lexers/archetype.py
@@ -58,7 +58,7 @@ class AtomsLexer(RegexLexer):
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
- (r'[+-]?(\d+)*\.\d+%?', Number.Float),
+ (r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py
index be7b30dff..9200547f9 100644
--- a/pygments/lexers/factor.py
+++ b/pygments/lexers/factor.py
@@ -265,7 +265,7 @@ class FactorLexer(RegexLexer):
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
- (r'"""\s+(?:.|\n)*?\s+"""', String),
+ (r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
@@ -322,7 +322,7 @@ class FactorLexer(RegexLexer):
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
- (r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
+ (r'(\{\s+)(\S+)(\s[^}]+\s\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 62dfd45e5..9a9397c2d 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -981,7 +981,6 @@ class CeylonLexer(RegexLexer):
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
- (r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
index 4823c6a7e..578848623 100644
--- a/pygments/lexers/matlab.py
+++ b/pygments/lexers/matlab.py
@@ -137,7 +137,7 @@ class MatlabLexer(RegexLexer):
(r'.', Comment.Multiline),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -638,7 +638,7 @@ class OctaveLexer(RegexLexer):
(r"[^']*'", String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
@@ -710,7 +710,7 @@ class ScilabLexer(RegexLexer):
(r'.', String, '#pop'),
],
'deffunc': [
- (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
index 34e4062f6..38ac9bb05 100644
--- a/pygments/lexers/objective.py
+++ b/pygments/lexers/objective.py
@@ -261,11 +261,11 @@ class LogosLexer(ObjectiveCppLexer):
'logos_classname'),
(r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
bygroups(Keyword, Text, Name.Class)),
- (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
+ (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
(r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
'function'),
- (r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
+ (r'(%new)(\s*)(\()(.*?)(\))',
bygroups(Keyword, Text, Keyword, String, Keyword)),
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
inherit,
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 33c06c4c4..5c3346b4c 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -1405,7 +1405,7 @@ class EvoqueLexer(RegexLexer):
# see doc for handling first name arg: /directives/evoque/
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
- (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
+ (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
index 23653f7a1..9d358bd7c 100644
--- a/pygments/lexers/varnish.py
+++ b/pygments/lexers/varnish.py
@@ -61,7 +61,7 @@ def analyse_text(text):
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
(r'(\.probe)(\s*=\s*)(\{)',
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
- (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
+ (r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),

View File

@ -0,0 +1,24 @@
From ed5bc606a4db5108995df9297698cf9dc14cccb2 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Tue, 18 Jan 2022 11:32:05 +0100
Subject: [PATCH] mysql-common: fix local SSL connection by using
--ssl-mode=REQUIRED which is available on 5.7+ (--ssl is not available in
8.0)
---
heartbeat/mysql-common.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/mysql-common.sh b/heartbeat/mysql-common.sh
index 459948b10..de8763544 100755
--- a/heartbeat/mysql-common.sh
+++ b/heartbeat/mysql-common.sh
@@ -97,7 +97,7 @@ MYSQL_BINDIR=`dirname ${OCF_RESKEY_binary}`
MYSQL=$OCF_RESKEY_client_binary
if ocf_is_true "$OCF_RESKEY_replication_require_ssl"; then
- MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl"
+ MYSQL_OPTIONS_LOCAL_SSL_OPTIONS="--ssl-mode=REQUIRED"
else
MYSQL_OPTIONS_LOCAL_SSL_OPTIONS=""
fi

View File

@ -0,0 +1,23 @@
From 09cde6531a87fd6a04568eaae94d5c489f36a8b6 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Mon, 6 Sep 2021 15:07:41 +0200
Subject: [PATCH] storage-mon: update metadata to suggest usage in combination
with HealthSMART agent
---
heartbeat/storage-mon.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/storage-mon.in b/heartbeat/storage-mon.in
index 5b289fe55..875095670 100644
--- a/heartbeat/storage-mon.in
+++ b/heartbeat/storage-mon.in
@@ -75,7 +75,7 @@ meta_data() {
<longdesc lang="en">
System health agent that checks the storage I/O status of the given drives and
updates the #health-storage attribute. Usage is highly recommended in combination
-with storage-mon monitoring agent. The agent currently support a maximum of 25
+with the HealthSMART monitoring agent. The agent currently support a maximum of 25
devices per instance.
</longdesc>
<shortdesc lang="en">storage I/O health status</shortdesc>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
From fcd2565602146c0b9317d159cecb8935e304c7ce Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 30 Sep 2021 10:23:17 +0200
Subject: [PATCH] gcp-pd-move/gcp-vpc-move-route: dont fail failed resources
instantly (caused by OCF_ERR_CONFIGURED)
---
heartbeat/gcp-pd-move.in | 4 ++--
heartbeat/gcp-vpc-move-route.in | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
index e99cc71f88..cbe703c3c5 100644
--- a/heartbeat/gcp-pd-move.in
+++ b/heartbeat/gcp-pd-move.in
@@ -157,7 +157,7 @@ def populate_vars():
CONN = googleapiclient.discovery.build('compute', 'v1')
except Exception as e:
logger.error('Couldn\'t connect with google api: ' + str(e))
- sys.exit(ocf.OCF_ERR_CONFIGURED)
+ sys.exit(ocf.OCF_ERR_GENERIC)
for param in PARAMETERS:
value = os.environ.get('OCF_RESKEY_%s' % param, PARAMETERS[param])
@@ -172,7 +172,7 @@ def populate_vars():
except Exception as e:
logger.error(
'Couldn\'t get instance name, is this running inside GCE?: ' + str(e))
- sys.exit(ocf.OCF_ERR_CONFIGURED)
+ sys.exit(ocf.OCF_ERR_GENERIC)
PROJECT = get_metadata('project/project-id')
if PARAMETERS['disk_scope'] in ['detect', 'regional']:
diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
index dac6e4ea8c..6b240c04d0 100644
--- a/heartbeat/gcp-vpc-move-route.in
+++ b/heartbeat/gcp-vpc-move-route.in
@@ -243,7 +243,7 @@ def validate(ctx):
ctx.conn = googleapiclient.discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False)
except Exception as e:
logger.error('Couldn\'t connect with google api: ' + str(e))
- sys.exit(OCF_ERR_CONFIGURED)
+ sys.exit(OCF_ERR_GENERIC)
ctx.ip = os.environ.get('OCF_RESKEY_ip')
if not ctx.ip:
@@ -258,7 +258,7 @@ def validate(ctx):
except Exception as e:
logger.error(
'Instance information not found. Is this a GCE instance ?: %s', str(e))
- sys.exit(OCF_ERR_CONFIGURED)
+ sys.exit(OCF_ERR_GENERIC)
ctx.instance_url = '%s/projects/%s/zones/%s/instances/%s' % (
GCP_API_URL_PREFIX, ctx.project, ctx.zone, ctx.instance)
@@ -273,7 +273,7 @@ def validate(ctx):
idxs = ctx.iproute.link_lookup(ifname=ctx.interface)
if not idxs:
logger.error('Network interface not found')
- sys.exit(OCF_ERR_CONFIGURED)
+ sys.exit(OCF_ERR_GENERIC)
ctx.iface_idx = idxs[0]

View File

@ -0,0 +1,43 @@
From 7c54e4ecda33c90a1046c0688774f5b847ab10fe Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Tue, 7 Dec 2021 10:37:24 +0100
Subject: [PATCH] Route: return OCF_NOT_RUNNING for probe action when interface
or route doesnt exist
---
heartbeat/Route | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/heartbeat/Route b/heartbeat/Route
index 8b390615a..7db41d0ae 100755
--- a/heartbeat/Route
+++ b/heartbeat/Route
@@ -227,15 +227,6 @@ route_stop() {
}
route_status() {
- if [ -n "${OCF_RESKEY_device}" ]; then
- # Must check if device exists or is gone.
- # If device is gone, route is also unconfigured.
- ip link show dev ${OCF_RESKEY_device} >/dev/null 2>&1
- if [ $? -ne 0 ]; then
- # Assume device does not exist, and short-circuit here.
- return $OCF_NOT_RUNNING
- fi
- fi
show_output="$(ip $addr_family route show $(create_route_spec) 2>/dev/null)"
if [ $? -eq 0 ]; then
if [ -n "$show_output" ]; then
@@ -251,7 +242,11 @@ route_status() {
else
# "ip route show" returned an error code. Assume something
# went wrong.
- return $OCF_ERR_GENERIC
+ if ocf_is_probe; then
+ return $OCF_NOT_RUNNING
+ else
+ return $OCF_ERR_GENERIC
+ fi
fi
}

View File

@ -0,0 +1,366 @@
From 764dacb6195f8940f13b9c322b1bc8189c5619fc Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Mon, 6 Sep 2021 12:13:42 +0200
Subject: [PATCH 1/6] Fix NFSv4 lock failover: set NFS Server Scope
Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
RFC8881, 8.4.2.1 State Reclaim:
| If the server scope is different, the client should not attempt to
| reclaim locks. In this situation, no lock reclaim is possible.
| Any attempt to re-obtain the locks with non-reclaim operations is
| problematic since there is no guarantee that the existing
| filehandles will be recognized by the new server, or that if
| recognized, they denote the same objects. It is best to treat the
| locks as having been revoked by the reconfiguration event.
That's why for lock reclaim to even be attempted, we have to define and set
the same server scope for NFSD on all cluster nodes in the NFS failover
cluster. And in linux, that is done by setting the uts nodename for the
command that starts the nfsd kernel threads.
For "init scripts", just set it directly using unshare --uts.
For systemd units, add NFS_SERVER_SCOPE to some environment files
and inject the "unshare --uts" into the ExecStart command lines
using override drop-in files.
---
heartbeat/nfsserver | 120 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 119 insertions(+), 1 deletion(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 96b19abe36..0888378645 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -5,6 +5,18 @@
# by hxinwei@gmail.com
# License: GNU General Public License v2 (GPLv2) and later
+
+# I don't know for certain whether all services actuall _need_ this,
+# I know that at least nfs-server needs it.
+# The rgmanager resource agent in rgmanager/src/resources/nfsserver.sh.in
+# did the unshare for gssd and idmapd as well, even though it seems unclear why.
+# Let's start with just the nfs-server, and add others if/when we have clear
+# indication they need it.
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
+NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
+SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
+SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf
+
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
. $OCF_DEBUG_LIBRARY
else
@@ -99,6 +111,31 @@ Specifies the length of sm-notify retry time (minutes).
<content type="integer" default="" />
</parameter>
+<parameter name="nfs_server_scope" unique="0" required="0">
+<longdesc lang="en">
+RFC8881, 8.4.2.1 State Reclaim:
+
+If the server scope is different, the client should not attempt to
+reclaim locks. In this situation, no lock reclaim is possible.
+Any attempt to re-obtain the locks with non-reclaim operations is
+problematic since there is no guarantee that the existing
+filehandles will be recognized by the new server, or that if
+recognized, they denote the same objects. It is best to treat the
+locks as having been revoked by the reconfiguration event.
+
+For lock reclaim to even be attempted, we have to define and set the same
+server scope for NFSD on all cluster nodes in the NFS failover cluster.
+
+This agent won't "guess" a suitable server scope name for you, you need to
+explicitly specify this. But without it, NFSv4 lock reclaim after failover
+won't work properly. Suggested value: the failover "service IP".
+</longdesc>
+<shortdesc lang="en">
+RFC8881 NFS server scope for (lock) state reclaim after failover.
+</shortdesc>
+<content type="string"/>
+</parameter>
+
<parameter name="nfs_ip" unique="0" required="0">
<longdesc lang="en">
Comma separated list of floating IP addresses used to access the nfs service
@@ -269,7 +306,11 @@ nfs_exec()
set_exec_mode
case $EXEC_MODE in
- 1) ${OCF_RESKEY_nfs_init_script} $cmd;;
+ 1) if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
+ ${OCF_RESKEY_nfs_init_script} $cmd
+ else
+ unshare -u /bin/sh -c "hostname ${OCF_RESKEY_nfs_server_scope}; exec ${OCF_RESKEY_nfs_init_script} $cmd"
+ fi ;;
2) if ! echo $svc | grep -q "\."; then
svc="${svc}.service"
fi
@@ -623,6 +664,74 @@ notify_locks()
fi
}
+# Problem: https://github.com/ClusterLabs/resource-agents/issues/1644
+# RFC8881, 8.4.2.1 State Reclaim:
+#
+# | If the server scope is different, the client should not attempt to
+# | reclaim locks. In this situation, no lock reclaim is possible.
+# | Any attempt to re-obtain the locks with non-reclaim operations is
+# | problematic since there is no guarantee that the existing
+# | filehandles will be recognized by the new server, or that if
+# | recognized, they denote the same objects. It is best to treat the
+# | locks as having been revoked by the reconfiguration event.
+#
+# That's why for lock reclaim to even be attempted, we have to define and set
+# the same server scope for NFSD on all cluster nodes in the NFS failover
+# cluster. And in linux, that is done by setting the uts nodename for the
+# command that starts the nfsd kernel threads.
+#
+inject_unshare_uts_name_into_systemd_units ()
+{
+ local END_TAG="# END OF DROP-IN FOR NFS SERVER SCOPE"
+ local services
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
+
+ local svc dir dropin edited_exec_start do_reload=false
+ for svc in $services ; do
+ dir=/run/systemd/system/$svc.d
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
+ grep -sqF "$END_TAG" "$dropin" && continue
+
+ test -d "$dir" || mkdir -p "$dir"
+ test -e "$dropin" && rm -f "$dropin"
+
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
+ cat > "$dropin" <<___
+[Service]
+EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
+# reset list of exec start, then re-populate with unshared uts namespace
+ExecStart=
+$edited_exec_start
+$END_TAG
+___
+ do_reload=true
+ ocf_log debug "injected unshare --uts into $dropin"
+ done
+
+ mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
+ echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
+
+ $do_reload && systemctl daemon-reload
+}
+
+remove_unshare_uts_dropins ()
+{
+ local services
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
+
+ local svc dir dropin do_reload=false
+ for svc in $services ; do
+ dir=/run/systemd/system/$svc.d
+ dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
+ test -e "$dropin" || continue
+ rm -f "$dropin"
+ do_reload=true
+ ocf_log debug "removed unshare --uts from $svc"
+ done
+ rm -f "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE}"
+ $do_reload && systemctl daemon-reload
+}
+
nfsserver_start ()
{
local rc;
@@ -636,6 +745,13 @@ nfsserver_start ()
is_redhat_based && set_env_args
bind_tree
prepare_directory
+ case $EXEC_MODE in [23])
+ if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
+ remove_unshare_uts_dropins
+ else
+ inject_unshare_uts_name_into_systemd_units
+ fi ;;
+ esac
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
@@ -854,6 +970,8 @@ nfsserver_stop ()
ocf_log info "NFS server stopped"
fi
+ case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
+
return $rc
}
From 515697b53c1614d05d39491c9af83e8d8b844b17 Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Fri, 8 Oct 2021 12:01:41 +0200
Subject: [PATCH 2/6] Fix NFSv4 lock failover: set NFS Server Scope, regardless
of EXEC_MODE
Debian (and other systems) may provide "init scripts",
which will only redirect back to systemd.
If we just unshare --uts the init script invocation,
the uts namespace is useless in that case.
If systemd is running, mangle the nfs-server.service unit,
independent of the "EXEC_MODE".
---
heartbeat/nfsserver | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 0888378645..054aabbaf6 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -745,13 +745,20 @@ nfsserver_start ()
is_redhat_based && set_env_args
bind_tree
prepare_directory
- case $EXEC_MODE in [23])
+
+ # Debian (and other systems) may provide "init scripts",
+ # which will only redirect back to systemd.
+ # If we just unshare --uts the init script invocation,
+ # the uts namespace is useless in that case.
+ # If systemd is running, mangle the nfs-server.service unit,
+ # independent of the "EXEC_MODE" we detected.
+ if $systemd_is_running ; then
if [ -z "$OCF_RESKEY_nfs_server_scope" ] ; then
remove_unshare_uts_dropins
else
inject_unshare_uts_name_into_systemd_units
- fi ;;
- esac
+ fi
+ fi
if ! `mount | grep -q " on $OCF_RESKEY_rpcpipefs_dir "`; then
mount -t rpc_pipefs sunrpc $OCF_RESKEY_rpcpipefs_dir
@@ -970,7 +977,9 @@ nfsserver_stop ()
ocf_log info "NFS server stopped"
fi
- case $EXEC_MODE in [23]) remove_unshare_uts_dropins;; esac
+ if $systemd_is_running; then
+ remove_unshare_uts_dropins
+ fi
return $rc
}
@@ -1008,6 +1017,7 @@ nfsserver_validate ()
}
nfsserver_validate
+systemd_is_running && systemd_is_running=true || systemd_is_running=false
case $__OCF_ACTION in
start) nfsserver_start
From e83c20d88f404f9f9d829c654883d60eb6cc9ff3 Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Fri, 8 Oct 2021 17:06:18 +0200
Subject: [PATCH 3/6] Fix NFSv4 lock failover: add missing "|cut -f1" in
remove_unshare_uts_dropins
---
heartbeat/nfsserver | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 054aabbaf6..d3db89a537 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -717,7 +717,7 @@ ___
remove_unshare_uts_dropins ()
{
local services
- services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE)
+ services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
local svc dir dropin do_reload=false
for svc in $services ; do
From b5b0e4a0b60d285af576b2d8ecfbe95e5a177a87 Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Fri, 8 Oct 2021 17:07:13 +0200
Subject: [PATCH 4/6] Fix NFSv4 lock failover: get rid of "world-inaccessible"
warning
by temporarily changing the umask before generating the dropins
---
heartbeat/nfsserver | 3 +++
1 file changed, 3 insertions(+)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index d3db89a537..447e0302b2 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -687,6 +687,8 @@ inject_unshare_uts_name_into_systemd_units ()
services=$(systemctl list-unit-files --no-legend $NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE | cut -d ' ' -f1)
local svc dir dropin edited_exec_start do_reload=false
+ local old_umask=$(umask)
+ umask 0022
for svc in $services ; do
dir=/run/systemd/system/$svc.d
dropin=$dir/$SYSTEMD_UNSHARE_UTS_DROPIN
@@ -710,6 +712,7 @@ ___
mkdir -p "${SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE%/*}"
echo "NFS_SERVER_SCOPE=$OCF_RESKEY_nfs_server_scope" > "$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE"
+ umask $old_umask
$do_reload && systemctl daemon-reload
}
From 3c6c91ce5a00eeef9cd766389d73a0b42580a1e6 Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Fri, 8 Oct 2021 17:08:09 +0200
Subject: [PATCH 5/6] Fix NFSv4 lock failover: deal with "special executable
prefix" chars in ExecStart
---
heartbeat/nfsserver | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 447e0302b2..5326bd2c6e 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -697,7 +697,7 @@ inject_unshare_uts_name_into_systemd_units ()
test -d "$dir" || mkdir -p "$dir"
test -e "$dropin" && rm -f "$dropin"
- edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\(.*\\)#ExecStart=/usr/bin/unshare --uts /bin/sh -ec 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\1#p")
+ edited_exec_start=$(systemctl cat $svc | sed -ne "s#^ExecStart=\\([-+:!@]*\\)\\(.*\\)#ExecStart=\\1/usr/bin/unshare --uts /bin/sh -c 'hostname \${NFS_SERVER_SCOPE}; exec \"\$@\"' -- \\2#p")
cat > "$dropin" <<___
[Service]
EnvironmentFile=$SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE
From 512fbaf61e6d24a1236ef50e323ea17a62485c36 Mon Sep 17 00:00:00 2001
From: Lars Ellenberg <lars.ellenberg@linbit.com>
Date: Fri, 8 Oct 2021 17:08:59 +0200
Subject: [PATCH 6/6] Fix NFSv4 lock failover: add rpc-statd-notify to the
comment list of potentially interesting services
---
heartbeat/nfsserver | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index 5326bd2c6e..240dd1a76c 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -12,7 +12,7 @@
# did the unshare for gssd and idmapd as well, even though it seems unclear why.
# Let's start with just the nfs-server, and add others if/when we have clear
# indication they need it.
-#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpcbind.service"
+#NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-idmapd.service nfs-mountd.service nfs-server.service nfsdcld.service rpc-gssd.service rpc-statd.service rpc-statd-notify.service rpcbind.service"
NFSD_RELATED_SYSTEMD_SERVICE_FOR_UNSHARE_UTS_NAMESPACE="nfs-server.service"
SYSTEMD_ENVIRONMENT_FILE_NFS_SERVER_SCOPE=/run/sysconfig/nfs-server-scope
SYSTEMD_UNSHARE_UTS_DROPIN=51-resource-agents-unshare-uts.conf

View File

@ -0,0 +1,29 @@
From 9a7b47f1838e9d6e3c807e9db5312097adb5c499 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Fri, 5 Nov 2021 10:30:49 +0100
Subject: [PATCH] gcp-ilb/Squid: fix issues detected by CI
---
heartbeat/Squid.in | 2 +-
heartbeat/gcp-ilb | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
index 28484b241..48dc3ac4e 100755
--- a/heartbeat/gcp-ilb
+++ b/heartbeat/gcp-ilb
@@ -53,12 +53,12 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
#Validate command for logging
-if $OCF_RESKEY_log_enable = "true"; then
+if [ $OCF_RESKEY_log_enable = "true" ]; then
if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
ocf_log debug "Logging command is: \'$logging_cmd\' "
else
- $OCF_RESKEY_log_enable = "false"
+ OCF_RESKEY_log_enable="false"
ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
fi;

View File

@ -0,0 +1,51 @@
From 14576f7ca02fb0abff188238ac019e88ab06e878 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Tue, 9 Nov 2021 11:49:36 +0100
Subject: [PATCH] gcp-ilb: only check if log_cmd binary is available if
log_enable is true
---
heartbeat/gcp-ilb | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
index 48dc3ac4e..f84f373b7 100755
--- a/heartbeat/gcp-ilb
+++ b/heartbeat/gcp-ilb
@@ -37,7 +37,7 @@ if type "socat" > /dev/null 2>&1; then
OCF_RESKEY_cat_default="socat"
else
OCF_RESKEY_cat_default="nc"
-fi;
+fi
: ${OCF_RESKEY_cat=${OCF_RESKEY_cat_default}}
@@ -53,7 +53,7 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
#Validate command for logging
-if [ $OCF_RESKEY_log_enable = "true" ]; then
+if ocf_is_true "$OCF_RESKEY_log_enable"; then
if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
ocf_log debug "Logging command is: \'$logging_cmd\' "
@@ -61,7 +61,7 @@ if [ $OCF_RESKEY_log_enable = "true" ]; then
OCF_RESKEY_log_enable="false"
ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
- fi;
+ fi
fi
@@ -285,7 +285,8 @@ ilb_stop() {
ilb_validate() {
check_binary "$OCF_RESKEY_cat"
- check_binary "$OCF_RESKEY_log_cmd"
+
+ ocf_is_true "$OCF_RESKEY_log_enable" && check_binary "$OCF_RESKEY_log_cmd"
if ! ocf_is_decimal "$OCF_RESKEY_port"; then
ocf_exit_reason "$OCF_RESKEY_port is not a valid port"

View File

@ -0,0 +1,11 @@
--- a/heartbeat/gcp-ilb 2021-11-09 14:13:20.311243373 +0100
+++ b/heartbeat/gcp-ilb 2021-11-09 14:13:50.269329165 +0100
@@ -28,7 +28,7 @@
OCF_RESKEY_cat_default="socat"
OCF_RESKEY_port_default="60000"
OCF_RESKEY_log_enable_default="false"
-OCF_RESKEY_log_cmd_default="gcloud"
+OCF_RESKEY_log_cmd_default="gcloud-ra"
OCF_RESKEY_log_params_default="logging write GCPILB"
OCF_RESKEY_log_end_params_default=""

View File

@ -0,0 +1,22 @@
From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Thu, 26 Aug 2021 12:27:50 +0200
Subject: [PATCH] nfsnotify: fix default value for "notify_args"
---
heartbeat/nfsnotify.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in
index 851f6ad6b4..fe6d2793ba 100644
--- a/heartbeat/nfsnotify.in
+++ b/heartbeat/nfsnotify.in
@@ -33,7 +33,7 @@
# Parameter defaults
OCF_RESKEY_source_host_default=""
-OCF_RESKEY_notify_args_default="false"
+OCF_RESKEY_notify_args_default=""
: ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}}
: ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}}

View File

@ -0,0 +1,32 @@
From 925180da2f41feddc5aac3c249563eb179b34029 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Mon, 22 Nov 2021 16:44:48 +0100
Subject: [PATCH] db2: use -l forever instead of -t nodes -l reboot, as they
conflict with eachother
---
heartbeat/db2 | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/db2 b/heartbeat/db2
index 03146a957..fa2a45a5d 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -274,7 +274,7 @@ db2_fal_attrib() {
while read id node member
do
[ "$member" = member -a "$node" != "$me" ] || continue
- crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3"
+ crm_attribute -l forever --node=$node -n $attr -v "$3"
rc=$?
ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node"
[ $rc != 0 ] && break
@@ -282,7 +282,7 @@ db2_fal_attrib() {
;;
get)
- crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1
+ crm_attribute -l forever -n $attr -G --quiet 2>&1
rc=$?
if [ $rc != 0 ]
then

View File

@ -0,0 +1,32 @@
From 75eaf06eea8957aa3941823955d1c8fa7933ab1d Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Wed, 23 Feb 2022 16:32:21 +0100
Subject: [PATCH] db2: only warn when notify isnt set, and use
ocf_local_nodename() to get node name
---
heartbeat/db2 | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/db2 b/heartbeat/db2
index fa2a45a5d..ea24d33fc 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -267,7 +267,7 @@ db2_fal_attrib() {
case "$2" in
set)
- me=$(uname -n)
+ me=$(ocf_local_nodename)
# loop over all member nodes and set attribute
crm_node -l |
@@ -284,7 +284,7 @@ db2_fal_attrib() {
get)
crm_attribute -l forever -n $attr -G --quiet 2>&1
rc=$?
- if [ $rc != 0 ]
+ if ! ocf_is_true "$OCF_RESKEY_CRM_meta_notify" && [ $rc != 0 ]
then
ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?"
fi

View File

@ -0,0 +1,41 @@
From 6d2ed7615614ede093f097189876d0f08553a43e Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Mon, 14 Feb 2022 22:23:39 -0800
Subject: [PATCH] IPsrcaddr: Add warning about DHCP
If DHCP is enabled for the interface that serves OCF_RESKEY_ipaddress,
then NetworkManager (and possibly dhclient in systems without NM;
unsure) may later re-add a route that the IPsrcaddr resource replaced.
This may cause the resource to fail or cause other unexpected behavior.
So far this has been observed with a default route, albeit with an edge
case of a configuration (OCF_RESKEY_ipaddress on a different subnet)
that may not be totally valid. There are likely to be other situations
as well where DHCP can cause conflicts with IPsrcaddr's manual updates
via iproute. The safest option is to use only static configuration for
the involved interface.
Resolves: RHBZ#1654862
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/IPsrcaddr | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
index ec868409f..fd7b6f68d 100755
--- a/heartbeat/IPsrcaddr
+++ b/heartbeat/IPsrcaddr
@@ -99,6 +99,12 @@ meta_data() {
<longdesc lang="en">
Resource script for IPsrcaddr. It manages the preferred source address
modification.
+
+Note: DHCP should not be enabled for the interface serving the preferred
+source address. Enabling DHCP may result in unexpected behavior, such as
+the automatic addition of duplicate or conflicting routes. This may
+cause the IPsrcaddr resource to fail, or it may produce undesired
+behavior while the resource continues to run.
</longdesc>
<shortdesc lang="en">Manages the preferred source address for outgoing IP packets</shortdesc>

View File

@ -0,0 +1,49 @@
From 5a65f66ff803ad7ed15af958cc1efdde4d53dcb7 Mon Sep 17 00:00:00 2001
From: Reid Wahl <nrwahl@protonmail.com>
Date: Thu, 17 Feb 2022 03:53:21 -0800
Subject: [PATCH] IPsrcaddr: Better error message when no matching route found
If OCF_RESKEY_destination is not explicitly set and `ip route list`
can't find a route matching the specifications, the NETWORK variable
doesn't get set. This causes a certain failure of the start operation,
because there is no PREFIX argument to `ip route replace` (syntax
error). It may also cause unexpected behavior for stop operations (but
not in all cases). During a monitor, this event can only happen if
something has changed outside the cluster's control, and so is cause
for warning there.
Exit OCF_ERR_ARGS for start, log debug for probe, log warning for all
other ops.
Resolves: RHBZ#1654862
Signed-off-by: Reid Wahl <nrwahl@protonmail.com>
---
heartbeat/IPsrcaddr | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
index fd7b6f68d..f0216722d 100755
--- a/heartbeat/IPsrcaddr
+++ b/heartbeat/IPsrcaddr
@@ -549,6 +549,20 @@ rc=$?
INTERFACE=`echo $findif_out | awk '{print $1}'`
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
+
+ if [ -z "$NETWORK" ]; then
+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
+ err_str="$err_str match $ipaddress' failed to find a matching route"
+
+ if [ "$__OCF_ACTION" = "start" ]; then
+ ocf_exit_reason "$err_str"
+ exit $OCF_ERR_ARGS
+ elif ! ocf_is_probe; then
+ ocf_log warn "$err_str"
+ else
+ ocf_log debug "$err_str"
+ fi
+ fi
else
NETWORK="$OCF_RESKEY_destination"
fi

View File

@ -0,0 +1,56 @@
From 0a197f1cd227e768837dff778a0c56fc1085d434 Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Mon, 21 Feb 2022 13:54:04 +0100
Subject: [PATCH] IPsrcaddr: fix indentation in better error message code
---
heartbeat/IPsrcaddr | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
index f0216722d..c82adc0e9 100755
--- a/heartbeat/IPsrcaddr
+++ b/heartbeat/IPsrcaddr
@@ -542,27 +542,27 @@ fi
findif_out=`$FINDIF -C`
rc=$?
[ $rc -ne 0 ] && {
- ocf_exit_reason "[$FINDIF -C] failed"
- exit $rc
+ ocf_exit_reason "[$FINDIF -C] failed"
+ exit $rc
}
INTERFACE=`echo $findif_out | awk '{print $1}'`
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
- if [ -z "$NETWORK" ]; then
- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
- err_str="$err_str match $ipaddress' failed to find a matching route"
-
- if [ "$__OCF_ACTION" = "start" ]; then
- ocf_exit_reason "$err_str"
- exit $OCF_ERR_ARGS
- elif ! ocf_is_probe; then
- ocf_log warn "$err_str"
- else
- ocf_log debug "$err_str"
- fi
- fi
+ if [ -z "$NETWORK" ]; then
+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
+ err_str="$err_str match $ipaddress' failed to find a matching route"
+
+ if [ "$__OCF_ACTION" = "start" ]; then
+ ocf_exit_reason "$err_str"
+ exit $OCF_ERR_ARGS
+ elif ! ocf_is_probe; then
+ ocf_log warn "$err_str"
+ else
+ ocf_log debug "$err_str"
+ fi
+ fi
else
NETWORK="$OCF_RESKEY_destination"
fi

View File

@ -0,0 +1,117 @@
From 50a596bfb977b18902dc62b99145bbd1a087690a Mon Sep 17 00:00:00 2001
From: Oyvind Albrigtsen <oalbrigt@redhat.com>
Date: Tue, 1 Mar 2022 11:06:07 +0100
Subject: [PATCH] IPsrcaddr: fixes
- use findif.sh to detect secondary interfaces
- get metric and proto to update the correct route/update it correctly
- match route using interface to fail when trying to update secondary
interfaces without specifying destination (would update default route
before)
- also use PRIMARY_IP/OPTS during stop-action for default routes (to get
back to the exact routes we started with)
- dont fail during stop-action if route doesnt exist
- use [[:blank:]] for WS to follow POSIX standard (suggested by nrwahl)
---
heartbeat/IPsrcaddr | 35 +++++++++++++++++++----------------
1 file changed, 19 insertions(+), 16 deletions(-)
diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
index c82adc0e9..7dbf65ff5 100755
--- a/heartbeat/IPsrcaddr
+++ b/heartbeat/IPsrcaddr
@@ -52,6 +52,7 @@
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+. ${OCF_FUNCTIONS_DIR}/findif.sh
# Defaults
OCF_RESKEY_ipaddress_default=""
@@ -181,19 +182,21 @@ errorexit() {
#
# where the src clause "src Y.Y.Y.Y" may or may not be present
-WS="[`echo -en ' \t'`]"
+WS="[[:blank:]]"
OCTET="[0-9]\{1,3\}"
IPADDR="\($OCTET\.\)\{3\}$OCTET"
SRCCLAUSE="src$WS$WS*\($IPADDR\)"
MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
-FINDIF=$HA_BIN/findif
+METRICCLAUSE=".*\(metric$WS[^ ]\+\)"
+PROTOCLAUSE=".*\(proto$WS[^ ]\+\)"
+FINDIF=findif
# findif needs that to be set
export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress
srca_read() {
# Capture matching route - doublequotes prevent word splitting...
- ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
+ ROUTE="`$CMDSHOW dev $INTERFACE 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
# ... so we can make sure there is only 1 matching route
[ 1 -eq `echo "$ROUTE" | wc -l` ] || \
@@ -201,7 +204,7 @@ srca_read() {
# But there might still be no matching route
[ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \
- ! ocf_is_probe && errorexit "no matching route exists"
+ ! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists"
# Sed out the source ip address if it exists
SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"`
@@ -232,8 +235,8 @@ srca_start() {
rc=$OCF_SUCCESS
ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)"
else
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed"
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed"
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
$CMDCHANGE $ROUTE_WO_SRC src $1 || \
@@ -266,14 +269,11 @@ srca_stop() {
[ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address"
- OPTS=""
- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then
- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
- OPTS="proto kernel scope host src $PRIMARY_IP"
- fi
+ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
+ OPTS="proto kernel scope link src $PRIMARY_IP"
- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \
- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed"
+ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \
+ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
$CMDCHANGE $ROUTE_WO_SRC || \
@@ -539,16 +539,19 @@ if [ $rc -ne $OCF_SUCCESS ]; then
esac
fi
-findif_out=`$FINDIF -C`
+findif_out=`$FINDIF`
rc=$?
[ $rc -ne 0 ] && {
- ocf_exit_reason "[$FINDIF -C] failed"
+ ocf_exit_reason "[$FINDIF] failed"
exit $rc
}
INTERFACE=`echo $findif_out | awk '{print $1}'`
+LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress`
+METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
+[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"`
if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
- NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
+ NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'`
if [ -z "$NETWORK" ]; then
err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"

View File

@ -0,0 +1,102 @@
From e651576c1b5c1ffbe0fd1b78f209be9a3f9764e7 Mon Sep 17 00:00:00 2001
From: XingWei-Liu <liuxingwei@uniontech.com>
Date: Thu, 10 Mar 2022 10:38:11 +0800
Subject: [PATCH 1/4] change lvm_status return value from ocf_not_running to
ocf_err_generic
---
heartbeat/LVM-activate | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index aed672ea3..0aef76706 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -790,7 +790,7 @@ lvm_status() {
fi
if [ $dm_count -eq 0 ]; then
- return $OCF_NOT_RUNNING
+ return $OCF_ERR_GENERIC
fi
case "$OCF_CHECK_LEVEL" in
From 540ae56436a4f9547bb17aa206fe0e8c7a7fea87 Mon Sep 17 00:00:00 2001
From: XingWei-Liu <liuxingwei@uniontech.com>
Date: Thu, 10 Mar 2022 16:44:25 +0800
Subject: [PATCH 2/4] add if ocf_is_probe in monitor func
---
heartbeat/LVM-activate | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index 0aef76706..c86606637 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -790,7 +790,11 @@ lvm_status() {
fi
if [ $dm_count -eq 0 ]; then
- return $OCF_ERR_GENERIC
+ if ocf_is_probe ;then
+ return $OCF_NOT_RUNNING
+ else
+ return $OCF_ERR_GENERIC
+ fi
fi
case "$OCF_CHECK_LEVEL" in
From ae3f35d4f671f3288034a257c6dd8eff9a83447a Mon Sep 17 00:00:00 2001
From: XingWei-Liu <liuxingwei@uniontech.com>
Date: Thu, 10 Mar 2022 16:50:04 +0800
Subject: [PATCH 3/4] add if ocf_is_probe in monitor func
---
heartbeat/LVM-activate | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index c86606637..f345f73a9 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -791,9 +791,9 @@ lvm_status() {
if [ $dm_count -eq 0 ]; then
if ocf_is_probe ;then
- return $OCF_NOT_RUNNING
- else
return $OCF_ERR_GENERIC
+ else
+ return $OCF_NOT_RUNNING
fi
fi
From 1072c0490ef936a1a7dfd8411da434dce1569457 Mon Sep 17 00:00:00 2001
From: XingWei-Liu <liuxingwei@uniontech.com>
Date: Thu, 10 Mar 2022 18:10:21 +0800
Subject: [PATCH 4/4] reverse return value in monitor func
---
heartbeat/LVM-activate | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index f345f73a9..c86606637 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -791,9 +791,9 @@ lvm_status() {
if [ $dm_count -eq 0 ]; then
if ocf_is_probe ;then
- return $OCF_ERR_GENERIC
- else
return $OCF_NOT_RUNNING
+ else
+ return $OCF_ERR_GENERIC
fi
fi

View File

@ -0,0 +1,28 @@
--- ClusterLabs-resource-agents-55a4e2c9/configure.ac 2021-08-19 09:37:57.000000000 +0200
+++ ClusterLabs-resource-agents-55a4e2c9/configure.ac.modif 2021-09-02 13:12:26.336044699 +0200
@@ -522,25 +522,12 @@
AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1)
BUILD_GCP_PD_MOVE=1
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
- BUILD_GCP_PD_MOVE=0
- AC_MSG_WARN("Not building gcp-pd-move")
-fi
AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1)
BUILD_GCP_VPC_MOVE_ROUTE=1
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || \
- test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then
- BUILD_GCP_VPC_MOVE_ROUTE=0
- AC_MSG_WARN("Not building gcp-vpc-move-route")
-fi
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1)
BUILD_GCP_VPC_MOVE_VIP=1
-if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then
- BUILD_GCP_VPC_MOVE_VIP=0
- AC_MSG_WARN("Not building gcp-vpc-move-vip")
-fi
AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1)
AC_PATH_PROGS(ROUTE, route)

View File

@ -0,0 +1,766 @@
diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
--- a/doc/man/Makefile.am 2021-08-25 09:31:14.033615965 +0200
+++ b/doc/man/Makefile.am 2021-08-24 17:59:40.679372762 +0200
@@ -97,6 +97,8 @@
ocf_heartbeat_ManageRAID.7 \
ocf_heartbeat_ManageVE.7 \
ocf_heartbeat_NodeUtilization.7 \
+ ocf_heartbeat_nova-compute-wait.7 \
+ ocf_heartbeat_NovaEvacuate.7 \
ocf_heartbeat_Pure-FTPd.7 \
ocf_heartbeat_Raid1.7 \
ocf_heartbeat_Route.7 \
diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
--- a/heartbeat/Makefile.am 2021-08-25 09:31:14.034615967 +0200
+++ b/heartbeat/Makefile.am 2021-08-24 17:59:40.679372762 +0200
@@ -29,6 +29,8 @@
ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
+ospdir = $(OCF_RA_DIR_PREFIX)/openstack
+
dtddir = $(datadir)/$(PACKAGE_NAME)
dtd_DATA = ra-api-1.dtd metadata.rng
@@ -50,6 +52,9 @@
send_ua_SOURCES = send_ua.c IPv6addr_utils.c
send_ua_LDADD = $(LIBNETLIBS)
+osp_SCRIPTS = nova-compute-wait \
+ NovaEvacuate
+
ocf_SCRIPTS = AoEtarget \
AudibleAlarm \
ClusterMon \
diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100
+++ b/heartbeat/nova-compute-wait 2021-08-24 17:59:40.678372759 +0200
@@ -0,0 +1,317 @@
+#!/bin/sh
+# Copyright 2015 Red Hat, Inc.
+#
+# Description: Manages compute daemons
+#
+# Authors: Andrew Beekhof
+#
+# Support: openstack@lists.openstack.org
+# License: Apache Software License (ASL) 2.0
+#
+
+
+#######################################################################
+# Initialization:
+
+###
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+###
+
+: ${__OCF_ACTION=$1}
+
+#######################################################################
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="nova-compute-wait" version="1.0">
+<version>1.0</version>
+
+<longdesc lang="en">
+OpenStack Nova Compute Server.
+</longdesc>
+<shortdesc lang="en">OpenStack Nova Compute Server</shortdesc>
+
+<parameters>
+
+<parameter name="auth_url" unique="0" required="1">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="username" unique="0" required="1">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+</parameter>
+
+<parameter name="password" unique="0" required="1">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="tenant_name" unique="0" required="1">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="domain" unique="0" required="0">
+<longdesc lang="en">
+DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
+</longdesc>
+<shortdesc lang="en">DNS domain</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="endpoint_type" unique="0" required="0">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="no_shared_storage" unique="0" required="0">
+<longdesc lang="en">Deprecated - do not use anymore.</longdesc>
+<shortdesc lang="en">Deprecated - do not use anymore</shortdesc>
+<content type="boolean" default="0" />
+</parameter>
+
+<parameter name="evacuation_delay" unique="0" required="0">
+<longdesc lang="en">
+How long to wait for nova to finish evacuating instances elsewhere
+before starting nova-compute. Only used when the agent detects
+evacuations might be in progress.
+
+You may need to increase the start timeout when increasing this value.
+</longdesc>
+<shortdesc lang="en">Delay to allow evacuations time to complete</shortdesc>
+<content type="integer" default="120" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="600" />
+<action name="stop" timeout="300" />
+<action name="monitor" timeout="20" interval="10" depth="0"/>
+<action name="validate-all" timeout="20" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+#######################################################################
+
+# don't exit on TERM, to test that lrmd makes sure that we do exit
+trap sigterm_handler TERM
+sigterm_handler() {
+ ocf_log info "They use TERM to bring us down. No such luck."
+ return
+}
+
+nova_usage() {
+ cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+nova_start() {
+ build_unfence_overlay
+
+ state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
+ if [ "x$state" = x ]; then
+ : never been fenced
+
+ elif [ "x$state" = xno ]; then
+ : has been evacuated, however it could have been 1s ago
+ ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
+ sleep ${OCF_RESKEY_evacuation_delay}
+
+ else
+ while [ "x$state" != "xno" ]; do
+ ocf_log info "Waiting for pending evacuations from ${NOVA_HOST}"
+ state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
+ sleep 5
+ done
+
+ ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
+ sleep ${OCF_RESKEY_evacuation_delay}
+ fi
+
+ touch "$statefile"
+
+ return $OCF_SUCCESS
+}
+
+nova_stop() {
+ rm -f "$statefile"
+ return $OCF_SUCCESS
+}
+
+nova_monitor() {
+ if [ ! -f "$statefile" ]; then
+ return $OCF_NOT_RUNNING
+ fi
+
+ return $OCF_SUCCESS
+}
+
+nova_notify() {
+ return $OCF_SUCCESS
+}
+
+build_unfence_overlay() {
+ fence_options=""
+
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
+ candidates=$(/usr/sbin/stonith_admin -l ${NOVA_HOST})
+ for candidate in ${candidates}; do
+ pcs stonith show $d | grep -q fence_compute
+ if [ $? = 0 ]; then
+ ocf_log info "Unfencing nova based on: $candidate"
+ fence_auth=$(pcs stonith show $candidate | grep Attributes: | sed -e s/Attributes:// -e s/-/_/g -e 's/[^ ]\+=/OCF_RESKEY_\0/g' -e s/passwd/password/g)
+ eval "export $fence_auth"
+ break
+ fi
+ done
+ fi
+
+ # Copied from NovaEvacuate
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
+ ocf_exit_reason "auth_url not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
+
+ if [ -z "${OCF_RESKEY_username}" ]; then
+ ocf_exit_reason "username not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
+
+ if [ -z "${OCF_RESKEY_password}" ]; then
+ ocf_exit_reason "password not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
+
+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
+ ocf_exit_reason "tenant_name not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
+
+ if [ -n "${OCF_RESKEY_domain}" ]; then
+ fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
+ fi
+
+ if [ -n "${OCF_RESKEY_region_name}" ]; then
+ fence_options="${fence_options} \
+ --region-name ${OCF_RESKEY_region_name}"
+ fi
+
+ if [ -n "${OCF_RESKEY_insecure}" ]; then
+ if ocf_is_true "${OCF_RESKEY_insecure}"; then
+ fence_options="${fence_options} --insecure"
+ fi
+ fi
+
+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
+ fence_options="${fence_options} --no-shared-storage"
+ fi
+ fi
+
+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
+ case ${OCF_RESKEY_endpoint_type} in
+ adminURL|publicURL|internalURL)
+ ;;
+ *)
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
+ "not valid. Use adminURL or publicURL or internalURL"
+ exit $OCF_ERR_CONFIGURED
+ ;;
+ esac
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
+ fi
+
+ mkdir -p /run/systemd/system/openstack-nova-compute.service.d
+ cat<<EOF>/run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf
+[Service]
+ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST}
+EOF
+}
+
+nova_validate() {
+ rc=$OCF_SUCCESS
+
+ check_binary crudini
+ check_binary nova-compute
+ check_binary fence_compute
+
+ if [ ! -f /etc/nova/nova.conf ]; then
+ ocf_exit_reason "/etc/nova/nova.conf not found"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ # Is the state directory writable?
+ state_dir=$(dirname $statefile)
+ touch "$state_dir/$$"
+ if [ $? != 0 ]; then
+ ocf_exit_reason "Invalid state directory: $state_dir"
+ return $OCF_ERR_ARGS
+ fi
+ rm -f "$state_dir/$$"
+
+ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null)
+ if [ $? = 1 ]; then
+ short_host=$(uname -n | awk -F. '{print $1}')
+ if [ "x${OCF_RESKEY_domain}" != x ]; then
+ NOVA_HOST=${short_host}.${OCF_RESKEY_domain}
+ else
+ NOVA_HOST=$(uname -n)
+ fi
+ fi
+
+ if [ $rc != $OCF_SUCCESS ]; then
+ exit $rc
+ fi
+ return $rc
+}
+
+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
+
+: ${OCF_RESKEY_evacuation_delay=120}
+case $__OCF_ACTION in
+meta-data) meta_data
+ exit $OCF_SUCCESS
+ ;;
+usage|help) nova_usage
+ exit $OCF_SUCCESS
+ ;;
+esac
+
+case $__OCF_ACTION in
+start) nova_validate; nova_start;;
+stop) nova_stop;;
+monitor) nova_validate; nova_monitor;;
+notify) nova_notify;;
+validate-all) exit $OCF_SUCCESS;;
+*) nova_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
+
diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100
+++ b/heartbeat/NovaEvacuate 2021-08-24 17:59:40.682372770 +0200
@@ -0,0 +1,407 @@
+#!/bin/bash
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# Description: Manages evacuation of nodes running nova-compute
+#
+# Authors: Andrew Beekhof
+#
+# Support: openstack@lists.openstack.org
+# License: Apache Software License (ASL) 2.0
+#
+
+
+#######################################################################
+# Initialization:
+
+###
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+###
+
+: ${__OCF_ACTION=$1}
+
+#######################################################################
+
+meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="NovaEvacuate" version="1.0">
+<version>1.0</version>
+
+<longdesc lang="en">
+Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged.
+</longdesc>
+<shortdesc lang="en">Evacuator for OpenStack Nova Compute Server</shortdesc>
+
+<parameters>
+
+<parameter name="auth_url" unique="0" required="1">
+<longdesc lang="en">
+Authorization URL for connecting to keystone in admin context
+</longdesc>
+<shortdesc lang="en">Authorization URL</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="username" unique="0" required="1">
+<longdesc lang="en">
+Username for connecting to keystone in admin context
+</longdesc>
+<shortdesc lang="en">Username</shortdesc>
+</parameter>
+
+<parameter name="password" unique="0" required="1">
+<longdesc lang="en">
+Password for connecting to keystone in admin context
+</longdesc>
+<shortdesc lang="en">Password</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="tenant_name" unique="0" required="1">
+<longdesc lang="en">
+Tenant name for connecting to keystone in admin context.
+Note that with Keystone V3 tenant names are only unique within a domain.
+</longdesc>
+<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="user_domain" unique="0" required="1">
+<longdesc lang="en">
+User's domain name. Used when authenticating to Keystone.
+</longdesc>
+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="project_domain" unique="0" required="1">
+<longdesc lang="en">
+Domain name containing project. Used when authenticating to Keystone.
+</longdesc>
+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="endpoint_type" unique="0" required="0">
+<longdesc lang="en">
+Nova API location (internal, public or admin URL)
+</longdesc>
+<shortdesc lang="en">Nova API location (internal, public or admin URL)</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="region_name" unique="0" required="0">
+<longdesc lang="en">
+Region name for connecting to nova.
+</longdesc>
+<shortdesc lang="en">Region name</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="insecure" unique="0" required="0">
+<longdesc lang="en">
+Explicitly allow client to perform "insecure" TLS (https) requests.
+The server's certificate will not be verified against any certificate authorities.
+This option should be used with caution.
+</longdesc>
+<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
+<content type="boolean" default="0" />
+</parameter>
+
+<parameter name="no_shared_storage" unique="0" required="0">
+<longdesc lang="en">
+Disable shared storage recovery for instances. Use at your own risk!
+</longdesc>
+<shortdesc lang="en">Disable shared storage recovery for instances</shortdesc>
+<content type="boolean" default="0" />
+</parameter>
+
+<parameter name="verbose" unique="0" required="0">
+<longdesc lang="en">
+Enable extra logging from the evacuation process
+</longdesc>
+<shortdesc lang="en">Enable debug logging</shortdesc>
+<content type="boolean" default="0" />
+</parameter>
+
+<parameter name="evacuate_delay" unique="0" required="0">
+<longdesc lang="en">
+Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean
+up eventual locks/leases.
+</longdesc>
+<shortdesc lang="en">Nova evacuate delay</shortdesc>
+<content type="integer" default="0" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="20" />
+<action name="stop" timeout="20" />
+<action name="monitor" timeout="600" interval="10" depth="0"/>
+<action name="validate-all" timeout="20" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+#######################################################################
+
+# don't exit on TERM, to test that lrmd makes sure that we do exit
+trap sigterm_handler TERM
+sigterm_handler() {
+ ocf_log info "They use TERM to bring us down. No such luck."
+ return
+}
+
+evacuate_usage() {
+ cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+evacuate_stop() {
+ rm -f "$statefile"
+ return $OCF_SUCCESS
+}
+
+evacuate_start() {
+ touch "$statefile"
+ # Do not invole monitor here so that the start timeout can be low
+ return $?
+}
+
+update_evacuation() {
+ attrd_updater -p -n evacuate -Q -N ${1} -v ${2}
+ arc=$?
+ if [ ${arc} != 0 ]; then
+ ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
+ fi
+ return ${arc}
+}
+
+handle_evacuations() {
+ while [ $# -gt 0 ]; do
+ node=$1
+ state=$2
+ shift; shift;
+ need_evacuate=0
+
+ case $state in
+ "")
+ ;;
+ no)
+ ocf_log debug "$node is either fine or already handled"
+ ;;
+ yes) need_evacuate=1
+ ;;
+ *@*)
+ where=$(echo $state | awk -F@ '{print $1}')
+ when=$(echo $state | awk -F@ '{print $2}')
+ now=$(date +%s)
+
+ if [ $(($now - $when)) -gt 60 ]; then
+ ocf_log info "Processing partial evacuation of $node by $where at $when"
+ need_evacuate=1
+ else
+ # Give some time for any in-flight evacuations to either complete or fail
+ # Nova won't react well if there are two overlapping requests
+ ocf_log info "Deferring processing partial evacuation of $node by $where at $when"
+ fi
+ ;;
+ esac
+
+ if [ $need_evacuate = 1 ]; then
+ fence_agent="fence_compute"
+
+ if have_binary fence_evacuate
+ then
+ fence_agent="fence_evacuate"
+ fi
+
+ if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then
+ ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds"
+ sleep ${OCF_RESKEY_evacuate_delay}
+ fi
+
+ ocf_log notice "Initiating evacuation of $node with $fence_agent"
+ $fence_agent ${fence_options} -o status -n ${node}
+ if [ $? = 1 ]; then
+ ocf_log info "Nova does not know about ${node}"
+ # Dont mark as no because perhaps nova is unavailable right now
+ continue
+ fi
+
+ update_evacuation ${node} "$(uname -n)@$(date +%s)"
+ if [ $? != 0 ]; then
+ return $OCF_SUCCESS
+ fi
+
+ $fence_agent ${fence_options} -o off -n $node
+ rc=$?
+
+ if [ $rc = 0 ]; then
+ update_evacuation ${node} no
+ ocf_log notice "Completed evacuation of $node"
+ else
+ ocf_log warn "Evacuation of $node failed: $rc"
+ update_evacuation ${node} yes
+ fi
+ fi
+ done
+
+ return $OCF_SUCCESS
+}
+
+evacuate_monitor() {
+ if [ ! -f "$statefile" ]; then
+ return $OCF_NOT_RUNNING
+ fi
+
+ handle_evacuations $(
+ attrd_updater -n evacuate -A \
+ 2> >(grep -v "attribute does not exist" 1>&2) |
+ sed 's/ value=""/ value="no"/' |
+ tr '="' ' ' |
+ awk '{print $4" "$6}'
+ )
+ return $OCF_SUCCESS
+}
+
+evacuate_validate() {
+ rc=$OCF_SUCCESS
+ fence_options=""
+
+
+ if ! have_binary fence_evacuate; then
+ check_binary fence_compute
+ fi
+
+ # Is the state directory writable?
+ state_dir=$(dirname $statefile)
+ touch "$state_dir/$$"
+ if [ $? != 0 ]; then
+ ocf_exit_reason "Invalid state directory: $state_dir"
+ return $OCF_ERR_ARGS
+ fi
+ rm -f "$state_dir/$$"
+
+ if [ -z "${OCF_RESKEY_auth_url}" ]; then
+ ocf_exit_reason "auth_url not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
+
+ if [ -z "${OCF_RESKEY_username}" ]; then
+ ocf_exit_reason "username not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
+
+ if [ -z "${OCF_RESKEY_password}" ]; then
+ ocf_exit_reason "password not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
+
+ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
+ ocf_exit_reason "tenant_name not configured"
+ exit $OCF_ERR_CONFIGURED
+ fi
+
+ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
+
+ if [ -n "${OCF_RESKEY_user_domain}" ]; then
+ fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
+ fi
+
+ if [ -n "${OCF_RESKEY_project_domain}" ]; then
+ fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
+ fi
+
+ if [ -n "${OCF_RESKEY_region_name}" ]; then
+ fence_options="${fence_options} \
+ --region-name ${OCF_RESKEY_region_name}"
+ fi
+
+ if [ -n "${OCF_RESKEY_insecure}" ]; then
+ if ocf_is_true "${OCF_RESKEY_insecure}"; then
+ fence_options="${fence_options} --insecure"
+ fi
+ fi
+
+ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
+ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
+ fence_options="${fence_options} --no-shared-storage"
+ fi
+ fi
+
+ if [ -n "${OCF_RESKEY_verbose}" ]; then
+ if ocf_is_true "${OCF_RESKEY_verbose}"; then
+ fence_options="${fence_options} --verbose"
+ fi
+ fi
+
+ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
+ case ${OCF_RESKEY_endpoint_type} in
+ adminURL|publicURL|internalURL) ;;
+ *)
+ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type} not valid. Use adminURL or publicURL or internalURL"
+ exit $OCF_ERR_CONFIGURED
+ ;;
+ esac
+ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
+ fi
+
+ if [ -z "${OCF_RESKEY_evacuate_delay}" ]; then
+ OCF_RESKEY_evacuate_delay=0
+ fi
+
+ if [ $rc != $OCF_SUCCESS ]; then
+ exit $rc
+ fi
+ return $rc
+}
+
+statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
+
+case $__OCF_ACTION in
+ start)
+ evacuate_validate
+ evacuate_start
+ ;;
+ stop)
+ evacuate_stop
+ ;;
+ monitor)
+ evacuate_validate
+ evacuate_monitor
+ ;;
+ meta-data)
+ meta_data
+ exit $OCF_SUCCESS
+ ;;
+ usage|help)
+ evacuate_usage
+ exit $OCF_SUCCESS
+ ;;
+ validate-all)
+ exit $OCF_SUCCESS
+ ;;
+ *)
+ evacuate_usage
+ exit $OCF_ERR_UNIMPLEMENTED
+ ;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc

705
python3-syntax-fixes.patch Normal file
View File

@ -0,0 +1,705 @@
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsExportHandler.py 2018-10-08 12:36:31.868765636 +0200
@@ -52,8 +52,8 @@
if not filename == None:
self.exportInstanceToFile(result,filename)
else:
- print 'Filename is needed'
- except Exception,e:
+ print('Filename is needed')
+ except Exception as e:
print(e)
def _optimizeResult(self,result):
keys = result.keys()
@@ -81,9 +81,9 @@
fp = open(fileName,'w')
try :
fp.write(json.dumps(result,indent=4))
- print "success"
+ print("success")
except IOError:
- print "Error: can\'t find file or read data"
+ print("Error: can\'t find file or read data")
finally:
fp.close()
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/ecsImportHandler.py 2018-10-08 12:36:53.882358851 +0200
@@ -16,7 +16,7 @@
if keyValues.has_key('--filename') and len(keyValues['--filename']) > 0:
filename = keyValues['--filename'][0]
else:
- print "A profile is needed! please use \'--filename\' and add the profile name."
+ print("A profile is needed! please use \'--filename\' and add the profile name.")
return filename
def getInstanceCount(self,keyValues):
@@ -25,7 +25,7 @@
if keyValues['--instancecount'][0].isdigit() and int(keyValues['--instancecount'][0]) >= 0:
count = keyValues['--instancecount'][0]
else:
- print "InstanceCount should be a positive number! The default value(1) will be used!"
+ print("InstanceCount should be a positive number! The default value(1) will be used!")
return int(count)
def getSubOperations(self,cmd,operation):
@@ -65,8 +65,8 @@
_newkeyValues["RegionId"] = newkeyValues["RegionId"]
self._handExtraOperation(cmd,extraOperation,_newkeyValues,version,secureRequest)
else:
- print "InstanceId is need!"
- except Exception,e:
+ print("InstanceId is need!")
+ except Exception as e:
print(e)
def _handExtraOperation(self,cmd,extraOperation,keyValues,version , secureRequest = False):
@@ -81,7 +81,7 @@
response.display_response("error", result, "json")
else:
response.display_response(extraOperation, result, "json")
- except Exception,e:
+ except Exception as e:
print(e)
@@ -127,7 +127,7 @@
'''
if data.has_key('InstanceId') and len(data['InstanceId']) > 0:
instanceId = data['InstanceId']
- except Exception,e:
+ except Exception as e:
pass
finally:
return instanceId
@@ -156,5 +156,5 @@
if __name__ == "__main__":
handler = EcsImportHandler()
handler.getKVFromJson('ttt')
- print handler.getKVFromJson('ttt')
+ print(handler.getKVFromJson('ttt'))
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsExportHandler.py 2018-10-08 12:37:08.373091088 +0200
@@ -77,8 +77,8 @@
if not filename == None:
self.exportInstanceToFile(result,filename)
else:
- print 'Filename is needed'
- except Exception,e:
+ print('Filename is needed')
+ except Exception as e:
print(e)
def exportInstanceToFile(self, result, filename):
@@ -96,9 +96,9 @@
fp = open(fileName,'w')
try :
fp.write(json.dumps(result,indent=4))
- print "success"
+ print("success")
except IOError:
- print "Error: can\'t find file or read data"
+ print("Error: can\'t find file or read data")
finally:
fp.close()
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/rdsImportHandler.py 2018-10-08 12:36:20.997966509 +0200
@@ -26,7 +26,7 @@
count = keyValues[import_count][0]
else:
pass
- # print "InstanceCount should be a positive number! The default value(1) will be used!"
+ # print("InstanceCount should be a positive number! The default value(1) will be used!")
return int(count), "InstanceCount is "+str(count)+" created."
def getSubOperations(self,cmd,operation):
@@ -46,7 +46,7 @@
if self.apiHandler.needSetDefaultRegion(cmdInstance, newkeyValues):
newkeyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
newkeyValues["ClientToken"] = [self.random_str()]
- # print newkeyValues.keys()
+ # print(newkeyValues.keys())
# return
# self._setAttr(cmdInstance, newkeyValues) # set all key values in instance
# self.apiHandler.changeEndPoint(cmdInstance, newkeyValues)
@@ -58,7 +58,7 @@
response.display_response("error", result, "json")
else:
response.display_response(item, result, "json")
- except Exception,e:
+ except Exception as e:
print(e)
def getKVFromJson(self,filename):
@@ -77,7 +77,7 @@
fp = open(fileName,'r')
data=json.loads(fp.read())
keys = data.keys()
- # print keys, type(data['Items']['DBInstanceAttribute'][0])
+ # print(keys, type(data['Items']['DBInstanceAttribute'][0]))
# instanceAttribute = data['Items']['DBInstanceAttribute'][0]
items = data['Items']['DBInstanceAttribute'][0]
keys = items.keys()
@@ -130,7 +130,7 @@
if __name__ == "__main__":
handler = RdsImportDBInstanceHandler()
# handler.getKVFromJson('ttt')
- # print handler.getKVFromJson('ttt')
- print handler.random_str()
+ # print(handler.getKVFromJson('ttt'))
+ print(handler.random_str())
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/advance/userProfileHandler.py 2018-10-08 12:11:19.743703469 +0200
@@ -24,9 +24,9 @@
_value = keyValues[ProfileCmd.name][0] # use the first value
self.extensionCliHandler.setUserProfile(_value)
else:
- print "Do your forget profile name? please use \'--name\' and add the profile name."
+ print("Do your forget profile name? please use \'--name\' and add the profile name.")
else:
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?"
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.useProfile+" ?")
def addProfileCmd(self, cmd, keyValues):
userKey = ''
@@ -52,12 +52,12 @@
finally:
f.close()
else:
- print "[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?"
+ print("[", cmd, "] is not right, do you mean "+ProfileCmd.addProfile+" ?")
if __name__ == "__main__":
handler = ProfileHandler()
handler.handleProfileCmd("useprofile", {'--name':["profile444"]})
- print handler.extensionCliHandler.getUserProfile()
+ print(handler.extensionCliHandler.getUserProfile())
handler.addProfileCmd("addProfile", {})
- handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
\ No newline at end of file
+ handler.addProfileCmd("addProfile", {'--name':["profile2222"]})
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliHelp.py 2018-10-08 12:12:25.602486634 +0200
@@ -24,14 +24,14 @@
self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
def showUsage(self):
- print "usage: aliyuncli <command> <operation> [options and parameters]"
+ print("usage: aliyuncli <command> <operation> [options and parameters]")
def showExample(self):
- print "show example"
+ print("show example")
def showCmdError(self, cmd):
self.showUsage()
- print "<aliyuncli> the valid command as follows:\n"
+ print("<aliyuncli> the valid command as follows:\n")
cmds = self.openApiDataHandler.getApiCmds()
self.printAsFormat(cmds)
@@ -44,7 +44,7 @@
error.printInFormat("Wrong version", "The sdk version is not exit.")
return None
self.showUsage()
- print "["+cmd+"]","valid operations as follows:\n"
+ print("["+cmd+"]","valid operations as follows:\n")
operations = self.openApiDataHandler.getApiOperations(cmd, version)
extensions = self.openApiDataHandler.getExtensionOperationsFromCmd(cmd)
operations.update(extensions)
@@ -56,8 +56,8 @@
self.printAsFormat(operations)
def showParameterError(self, cmd, operation, parameterlist):
- print 'usage: aliyuncli <command> <operation> [options and parameters]'
- print '['+cmd+"."+operation+']: current operation can uses parameters as follow :\n'
+ print('usage: aliyuncli <command> <operation> [options and parameters]')
+ print('['+cmd+"."+operation+']: current operation can uses parameters as follow :\n')
self.printAsFormat(parameterlist)
pass
@@ -72,7 +72,7 @@
tmpList.append(item)
count = count+1
if len(tmpList) == 2:
- print '{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10')
+ print('{0:40}'.format(tmpList[0]),'\t|',format(tmpList[1],'<10'))
tmpList = list()
if len(tmpList) == 1 and count == len(mlist):
- print tmpList[0]
\ No newline at end of file
+ print(tmpList[0])
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliMain.py 2018-10-08 12:12:42.799168903 +0200
@@ -91,7 +91,7 @@
keyValues["RegionId"] = [self.extensionHandler.getUserRegion()]
#check necessaryArgs as:accesskeyid accesskeysecret regionId
if not self.handler.hasNecessaryArgs(keyValues):
- print 'accesskeyid/accesskeysecret/regionId is absence'
+ print('accesskeyid/accesskeysecret/regionId is absence')
return
result = self.handler.getResponse(cmd,operation,className,cmdInstance,keyValues,secureRequest)
if result is None:
@@ -102,7 +102,7 @@
else:
response.display_response(operation, result, outPutFormat,keyValues)
else:
- print 'aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com'
+ print('aliyuncli internal error, please contact: haowei.yao@alibaba-inc.com')
elif self.handler.isAvailableExtensionOperation(cmd, operation):
if self.args.__len__() >= 3 and self.args[2] == 'help':
import commandConfigure
@@ -125,7 +125,7 @@
def showInstanceAttribute(self, cmd, operation, classname):
if self.args.__len__() >= 3 and self.args[2] == "help":
self.helper.showParameterError(cmd, operation, self.completer._help_to_show_instance_attribute(classname))
- #print self.completer._help_to_show_instance_attribute(cmdInstance)
+ #print(self.completer._help_to_show_instance_attribute(cmdInstance))
return True
return False
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliParser.py 2018-10-08 12:12:54.764947819 +0200
@@ -141,7 +141,7 @@
_key = keyValues[keystr][0]
if keyValues.has_key(secretstr) and keyValues[secretstr].__len__() > 0:
_secret = keyValues[secretstr][0]
- #print "accesskeyid: ", _key , "accesskeysecret: ",_secret
+ #print("accesskeyid: ", _key , "accesskeysecret: ",_secret)
return _key, _secret
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunCliUpgrade.py 2018-10-08 12:13:23.672413710 +0200
@@ -161,12 +161,12 @@
if __name__ == "__main__":
upgradeHandler = aliyunCliUpgradeHandler()
- # print upgradeHandler.getLatestTimeFromServer()
+ # print(upgradeHandler.getLatestTimeFromServer())
# flag, url = upgradeHandler.isNewVersionReady()
# if flag:
- # print url
+ # print(url)
# else:
- # print "current version is latest one"
- # print "final test:"
- print upgradeHandler.checkForUpgrade()
- print upgradeHandler.handleUserChoice("N")
+ # print("current version is latest one")
+ # print("final test:")
+ print(upgradeHandler.checkForUpgrade())
+ print(upgradeHandler.handleUserChoice("N"))
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunExtensionCliHandler.py 2018-10-08 12:14:46.830877248 +0200
@@ -127,35 +127,35 @@
# this api will show help page when user input aliyuncli help(-h or --help)
def showAliyunCliHelp(self):
- print color.bold+"ALIYUNCLI()"+color.end
- print color.bold+"\nNAME"+color.end
- print "\taliyuncli -"
- print color.bold+"\nDESCRIPTION"+color.end
- print "\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. "
- print color.bold+"\nSYNOPSIS"+color.end
- print "\taliyuncli <command> <operation> [options and parameters]"
- print "\n\taliyuncli has supported command completion now. The detail you can check our site."
- print color.bold+"OPTIONS"+color.end
- print color.bold+"\tconfigure"+color.end
- print "\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)"
- print color.bold+"\n\t--output"+color.end+" (string)"
- print "\n\tThe formatting style for command output."
- print "\n\to json"
- print "\n\to text"
- print "\n\to table"
+ print(color.bold+"ALIYUNCLI()"+color.end)
+ print(color.bold+"\nNAME"+color.end)
+ print("\taliyuncli -")
+ print(color.bold+"\nDESCRIPTION"+color.end)
+ print("\tThe Aliyun Command Line Interface is a unified tool to manage your aliyun services. ")
+ print(color.bold+"\nSYNOPSIS"+color.end)
+ print("\taliyuncli <command> <operation> [options and parameters]")
+ print("\n\taliyuncli has supported command completion now. The detail you can check our site.")
+ print(color.bold+"OPTIONS"+color.end)
+ print(color.bold+"\tconfigure"+color.end)
+ print("\n\tThis option will help you save the key and secret and your favorite output format (text, json or table)")
+ print(color.bold+"\n\t--output"+color.end+" (string)")
+ print("\n\tThe formatting style for command output.")
+ print("\n\to json")
+ print("\n\to text")
+ print("\n\to table")
- print color.bold+"\n\t--secure"+color.end
- print "\n\tMaking secure requests(HTTPS) to service"
+ print(color.bold+"\n\t--secure"+color.end)
+ print("\n\tMaking secure requests(HTTPS) to service")
- print color.bold+"\nAVAILABLE SERVICES"+color.end
- print "\n\to ecs"
- print "\n\to ess"
- print "\n\to mts"
- print "\n\to rds"
- print "\n\to slb"
+ print(color.bold+"\nAVAILABLE SERVICES"+color.end)
+ print("\n\to ecs")
+ print("\n\to ess")
+ print("\n\to mts")
+ print("\n\to rds")
+ print("\n\to slb")
def showCurrentVersion(self):
- print self._version
+ print(self._version)
def findConfigureFilePath(self):
homePath = ""
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunOpenApiData.py 2018-10-08 12:16:00.008525187 +0200
@@ -39,9 +39,9 @@
def oss_notice():
- print "OSS operation in aliyuncli is not supported."
- print "Please use 'ossutil' command line tool for Alibaba Cloud OSS operation."
- print "You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n"
+ print("OSS operation in aliyuncli is not supported.")
+ print("Please use 'ossutil' command line tool for Alibaba Cloud OSS operation.")
+ print("You can find information about 'ossutil' here: https://github.com/aliyun/ossutil.\n")
try:
@@ -391,22 +391,22 @@
return jsonobj
except ImportError as e:
- print module, 'is not exist!'
+ print(module, 'is not exist!')
sys.exit(1)
except ServerException as e:
error = cliError.error()
error.printInFormat(e.get_error_code(), e.get_error_msg())
- print "Detail of Server Exception:\n"
- print str(e)
+ print("Detail of Server Exception:\n")
+ print(str(e))
sys.exit(1)
except ClientException as e:
- # print e.get_error_msg()
+ # print(e.get_error_msg())
error = cliError.error()
error.printInFormat(e.get_error_code(), e.get_error_msg())
- print "Detail of Client Exception:\n"
- print str(e)
+ print("Detail of Client Exception:\n")
+ print(str(e))
sys.exit(1)
def getSetFuncs(self,classname):
@@ -549,6 +549,6 @@
if __name__ == '__main__':
handler = aliyunOpenApiDataHandler()
- print "###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance')
- print "###############",handler.isAvailableOperation('ecs', 'DescribeInstances')
- print "###############",handler.getExtensionOperationsFromCmd('ecs')
+ print("###############",handler.isAvailableExtensionOperation('ecs', 'exportInstance'))
+ print("###############",handler.isAvailableOperation('ecs', 'DescribeInstances'))
+ print("###############",handler.getExtensionOperationsFromCmd('ecs'))
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/aliyunSdkConfigure.py 2018-10-08 12:16:14.865250686 +0200
@@ -44,7 +44,7 @@
filename=self.fileName
self.writeCmdVersionToFile(cmd,version,filename)
else:
- print "A argument is needed! please use \'--version\' and add the sdk version."
+ print("A argument is needed! please use \'--version\' and add the sdk version.")
return
def showVersions(self,cmd,operation,stream=None):
configureVersion='(not configure)'
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/commandConfigure.py 2018-10-08 12:17:34.763774477 +0200
@@ -55,7 +55,7 @@
# _mlist = self.rds.extensionOptions[self.rds.exportDBInstance]
self.appendList(parameterList, self.rds.extensionOptions[self.rds.exportDBInstance])
if operation.lower() == self.rds.importDBInstance.lower():
- # print "haha", (self.rds.extensionOptions[self.rds.importDBInstance])
+ # print("haha", (self.rds.extensionOptions[self.rds.importDBInstance]))
# parameterList.append(self.rds.extensionOptions[self.rds.importDBInstance])
self.appendList(parameterList, self.rds.extensionOptions[self.rds.importDBInstance])
@@ -89,8 +89,8 @@
importInstance:['count','filename']}
if __name__ == '__main__':
- # print type(rds.extensionOperations)
- # print type(rds.extensionOptions)
- # print rds.extensionOptions['ll']
+ # print(type(rds.extensionOperations))
+ # print(type(rds.extensionOptions))
+ # print(rds.extensionOptions['ll'])
configure = commandConfigure()
- print configure.showExtensionOperationHelp("ecs", "ExportInstance")
+ print(configure.showExtensionOperationHelp("ecs", "ExportInstance"))
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/configure.py 2018-10-08 12:17:59.282322043 +0200
@@ -577,7 +577,7 @@
operation = operations[i].strip()
self._getKeyFromSection(profilename,operation)
else:
- print 'The correct usage:aliyuncli configure get key --profile profilename'
+ print('The correct usage:aliyuncli configure get key --profile profilename')
return
def _getKeyFromSection(self,profilename,key):
@@ -591,7 +591,7 @@
elif key in _WRITE_TO_CONFIG_FILE :
self._getKeyFromFile(config_filename,sectionName,key)
else:
- print key,'=','None'
+ print(key,'=','None')
def _getKeyFromFile(self,filename,section,key):
if os.path.isfile(filename):
with open(filename, 'r') as f:
@@ -600,9 +600,9 @@
start = self._configWriter.hasSectionName(section,contents)[1]
end = self._configWriter._getSectionEnd(start,contents)
value = self._configWriter._getValueInSlice(start,end,key,contents)
- print key,'=',value
+ print(key,'=',value)
else:
- print key,'=None'
+ print(key,'=None')
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/handleEndPoint.py 2018-10-08 12:18:25.178844179 +0200
@@ -2,7 +2,7 @@
def handleEndPoint(cmd,operation,keyValues):
if not hasNecessaryArgs(keyValues):
- print 'RegionId/EndPoint is absence'
+ print('RegionId/EndPoint is absence')
return
if cmd is not None:
cmd = cmd.capitalize()
@@ -25,7 +25,7 @@
from aliyunsdkcore.profile.region_provider import modify_point
modify_point(cmd,regionId,endPoint)
except Exception as e:
- print e
+ print(e)
pass
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/oasadp/oasHandler.py 2018-10-08 12:18:45.458469966 +0200
@@ -111,14 +111,14 @@
if os.path.isfile(cfgfile):
ans = raw_input('File existed. Do you wish to overwrite it?(y/n)')
if ans.lower() != 'y':
- print 'Answer is No. Quit now'
+ print('Answer is No. Quit now')
return
with open(cfgfile, 'w+') as f:
config.write(f)
- print 'Your configuration is saved to %s.' % cfgfile
+ print('Your configuration is saved to %s.' % cfgfile)
def cmd_help(args):
- print HELP
+ print(HELP)
def add_config(parser):
parser.add_argument('--host', type=str, help='service host')
@@ -161,7 +161,7 @@
return CMD_LIST.keys()
def handleOas(pars=None):
if pars is None:
- print HELP
+ print(HELP)
sys.exit(0)
parser = ArgumentParser(prog="aliyuncli oas",formatter_class=ArgumentDefaultsHelpFormatter)
diff -uNr a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py
--- a/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-01-24 04:08:33.000000000 +0100
+++ b/bundled/aliyun/aliyun-cli/aliyuncli/paramOptimize.py 2018-10-08 12:18:59.713206928 +0200
@@ -61,7 +61,7 @@
data = f.read()
return data
except (OSError, IOError) as e:
- print e
+ print(e)
def _getParamFromUrl(prefix,value,mode):
req = urllib2.Request(value)
@@ -74,7 +74,7 @@
errorMsg='Get the wrong content'
errorClass.printInFormat(response.getcode(), errorMsg)
except Exception as e:
- print e
+ print(e)
PrefixMap = {'file://': _getParamFromFile,
'fileb://': _getParamFromFile
@@ -86,4 +86,4 @@
'fileb://': {'mode': 'rb'},
#'http://': {},
#'https://': {}
- }
\ No newline at end of file
+ }
diff -uNr a/bundled/aliyun/colorama/demos/demo07.py b/bundled/aliyun/colorama/demos/demo07.py
--- a/bundled/aliyun/colorama/demos/demo07.py 2015-01-06 11:41:47.000000000 +0100
+++ b/bundled/aliyun/colorama/demos/demo07.py 2018-10-08 12:20:25.598622106 +0200
@@ -16,10 +16,10 @@
3a4
"""
colorama.init()
- print "aaa"
- print "aaa"
- print "aaa"
- print forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4"
+ print("aaa")
+ print("aaa")
+ print("aaa")
+ print(forward() + up(2) + "b" + up() + back(2) + "1" + forward() + "2" + back(3) + down(2) + "3" + forward() + "4")
if __name__ == '__main__':
diff -uNr a/bundled/aliyun/pycryptodome/Doc/conf.py b/bundled/aliyun/pycryptodome/Doc/conf.py
--- a/bundled/aliyun/pycryptodome/Doc/conf.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/Doc/conf.py 2018-10-08 12:08:11.122188094 +0200
@@ -15,7 +15,7 @@
# Modules to document with autodoc are in another directory
sys.path.insert(0, os.path.abspath('../lib'))
-print sys.path
+print(sys.path)
# Mock existance of native modules
from Crypto.Util import _raw_api
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/Math/Primality.py 2018-10-08 12:08:11.123188075 +0200
@@ -302,7 +302,7 @@
randfunc = kwargs.pop("randfunc", None)
prime_filter = kwargs.pop("prime_filter", lambda x: True)
if kwargs:
- print "Unknown parameters:", kwargs.keys()
+ print("Unknown parameters:", kwargs.keys())
if exact_bits is None:
raise ValueError("Missing exact_bits parameter")
@@ -341,7 +341,7 @@
exact_bits = kwargs.pop("exact_bits", None)
randfunc = kwargs.pop("randfunc", None)
if kwargs:
- print "Unknown parameters:", kwargs.keys()
+ print("Unknown parameters:", kwargs.keys())
if randfunc is None:
randfunc = Random.new().read
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/PublicKey/ECC.py 2018-10-08 12:08:11.124188057 +0200
@@ -912,4 +912,4 @@
count = 30
for x in xrange(count):
_ = point * d
- print (time.time() - start) / count * 1000, "ms"
+ print((time.time() - start) / count * 1000, "ms")
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_AES.py 2018-10-08 12:08:11.124188057 +0200
@@ -1276,7 +1276,7 @@
tests += make_block_tests(AES, "AESNI", test_data, {'use_aesni': True})
tests += [ TestMultipleBlocks(True) ]
else:
- print "Skipping AESNI tests"
+ print("Skipping AESNI tests")
return tests
if __name__ == '__main__':
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_GCM.py 2018-10-08 12:08:11.125188038 +0200
@@ -894,7 +894,7 @@
if config.get('slow_tests'):
tests += list_test_cases(NISTTestVectorsGCM_no_clmul)
else:
- print "Skipping test of PCLMULDQD in AES GCM"
+ print("Skipping test of PCLMULDQD in AES GCM")
return tests
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py 2018-10-08 12:08:11.125188038 +0200
@@ -39,7 +39,7 @@
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
- print clean
+ print(clean)
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/SelfTest/__main__.py 2018-10-08 12:08:11.126188020 +0200
@@ -25,11 +25,11 @@
slow_tests = not "--skip-slow-tests" in sys.argv
if not slow_tests:
- print "Skipping slow tests"
+ print("Skipping slow tests")
wycheproof_warnings = "--wycheproof-warnings" in sys.argv
if wycheproof_warnings:
- print "Printing Wycheproof warnings"
+ print("Printing Wycheproof warnings")
config = {'slow_tests' : slow_tests, 'wycheproof_warnings' : wycheproof_warnings }
SelfTest.run(stream=sys.stdout, verbosity=1, config=config)
diff -uNr a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py
--- a/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-07-10 21:32:46.000000000 +0200
+++ b/bundled/aliyun/pycryptodome/lib/Crypto/Util/RFC1751.py 2018-10-08 12:08:11.126188020 +0200
@@ -369,13 +369,13 @@
]
for key, words in data:
- print 'Trying key', key
+ print('Trying key', key)
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
- print 'key_to_english fails on key', repr(key), ', producing', str(w2)
+ print('key_to_english fails on key', repr(key), ', producing', str(w2))
k2=english_to_key(words)
if k2!=key:
- print 'english_to_key fails on key', repr(key), ', producing', repr(k2)
+ print('english_to_key fails on key', repr(key), ', producing', repr(k2))

1856
resource-agents.spec Normal file

File diff suppressed because it is too large Load Diff

9
sources Normal file
View File

@ -0,0 +1,9 @@
SHA512 (ClusterLabs-resource-agents-55a4e2c9.tar.gz) = 0b8dbf466cc4be3e165e4fb3b6a3ce93a6b49a7f4a7dc27916bfe3ec0b5de1e9df5c114d255608fb6ba1ca945ba49a4626e95cd28d6852e2e09d099d6edcb00b
SHA512 (aliyun-cli-2.1.10.tar.gz) = 1c883e1116b695ff87f81bebc506df04004d097157cd817ac8dc81a18bc4df308579c5c34b3396a90a59cd07f5d9079002295231d999fa26eaf56914ea6ede9f
SHA512 (aliyun-python-sdk-core-2.13.1.tar.gz) = c9eaccf3ed06ff2f5edc89d605511223650816ae3b192e6149f1b113d1d04c05c5220bdd588dc83024486748354436fec65fa59f5858befaa7cf9524dbb6da19
SHA512 (aliyun-python-sdk-ecs-4.9.3.tar.gz) = 83b35b7e774fa8892106f771731cc11ca823fe3d6f3e2f5bc5f075e475623573b9123e5ecb2a750d13ebcda9bc76242485636d2d3284c3eec89afcb4ec3070a4
SHA512 (aliyun-python-sdk-vpc-3.0.2.tar.gz) = e9e4f8224f828a0e0737e4515799e62e4d3808ef2985ae733c8dbe88961daae2d7524000d1ec09e6a5d7ec9491f1e29365e61a4bcfa4b6dbf5a4ec338386e209
SHA512 (colorama-0.3.3.tar.gz) = 2e960ee25f89ca4bc21fc5ab7dac12bd09c9139c4af04a5131ead31f01bb86af23e749e85dafe0bda814b032cc917ad03c0152b333eb532646603b470adbbb64
SHA512 (google-cloud-sdk-360.0.0-linux-x86_64.tar.gz) = 0e441359edd981038310c7e66ed3cbdaff1c5b24264bdebdca351ea5cbdef7b54b8d9d56ee45c713f467e65f1bc949c7f5175cd06ed16eae05bd589b187b6260
SHA512 (pycryptodome-3.6.4.tar.gz) = b565acf2d4dad80842a677dac2e69719dedb870d93d35948f3ef04da120c89fdf80f5b08864c182e2537ff60bbce8487cec6bfe8bb9acc1833194a667932a5c6
SHA512 (pyroute2-0.4.13.tar.gz) = 7a86ef38c4892198a29b688b225df9b7fe7761a1685d7bf0430252783d93cfb13a52c82e05ba808f11b82ff3deac178585487eaa3adf77487a4e8402987fe646