forked from rpms/cloud-init
AlmaLinux changes
This commit is contained in:
commit
b677b9bb2d
@ -0,0 +1,43 @@
|
||||
From 244a3f9059fc95a5e644bd7868aed8060d9edc61 Mon Sep 17 00:00:00 2001
|
||||
From: Eduardo Otubo <otubo@redhat.com>
|
||||
Date: Fri, 4 Feb 2022 16:04:31 +0100
|
||||
Subject: [PATCH] Add _netdev option to mount Azure ephemeral disk (#1213)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 19: Add _netdev option to mount Azure ephemeral disk (#1213)
|
||||
RH-Commit: [1/1] e44291a50634594b8a0505cab3415d5c58cc34c4 (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 1998445
|
||||
RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
|
||||
RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
The ephemeral disk depends on a functional network to be mounted. Even
|
||||
though it depends on cloud-init.service, sometimes an ordering cycle is
|
||||
noticed on the instance. If the option "_netdev" is added the problem is
|
||||
gone.
|
||||
|
||||
rhbz: #1998445
|
||||
|
||||
Signed-off-by: Eduardo Otubo otubo@redhat.com
|
||||
---
|
||||
cloudinit/config/cc_mounts.py | 4 +++-
|
||||
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
|
||||
index c22d1698..5125f17c 100644
|
||||
--- a/cloudinit/config/cc_mounts.py
|
||||
+++ b/cloudinit/config/cc_mounts.py
|
||||
@@ -362,7 +362,9 @@ def handle(_name, cfg, cloud, log, _args):
|
||||
def_mnt_opts = "defaults,nobootwait"
|
||||
uses_systemd = cloud.distro.uses_systemd()
|
||||
if uses_systemd:
|
||||
- def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"
|
||||
+ def_mnt_opts = (
|
||||
+ "defaults,nofail, x-systemd.requires=cloud-init.service, _netdev"
|
||||
+ )
|
||||
|
||||
defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
|
||||
defvals = cfg.get("mount_default_fields", defvals)
|
||||
--
|
||||
2.27.0
|
||||
|
295
SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch
Normal file
295
SOURCES/ci-Add-flexibility-to-IMDS-api-version-793.patch
Normal file
@ -0,0 +1,295 @@
|
||||
From f844e9c263e59a623ca8c647bd87bf4f91374d54 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Stringer <thstring@microsoft.com>
|
||||
Date: Wed, 3 Mar 2021 11:07:43 -0500
|
||||
Subject: [PATCH 1/7] Add flexibility to IMDS api-version (#793)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [1/7] 99a3db20e3f277a2f12ea21e937e06939434a2ca (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Add flexibility to IMDS api-version by having both a desired IMDS
|
||||
api-version and a minimum api-version. The desired api-version will
|
||||
be used first, and if that fails it will fall back to the minimum
|
||||
api-version.
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 113 ++++++++++++++----
|
||||
tests/unittests/test_datasource/test_azure.py | 42 ++++++-
|
||||
2 files changed, 129 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index 553b5a7e..de1452ce 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent'
|
||||
# In the event where the IMDS primary server is not
|
||||
# available, it takes 1s to fallback to the secondary one
|
||||
IMDS_TIMEOUT_IN_SECONDS = 2
|
||||
-IMDS_URL = "http://169.254.169.254/metadata/"
|
||||
-IMDS_VER = "2019-06-01"
|
||||
-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER)
|
||||
+IMDS_URL = "http://169.254.169.254/metadata"
|
||||
+IMDS_VER_MIN = "2019-06-01"
|
||||
+IMDS_VER_WANT = "2020-09-01"
|
||||
|
||||
|
||||
class metadata_type(Enum):
|
||||
- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM)
|
||||
- network = "{}instance/network?{}".format(IMDS_URL,
|
||||
- IMDS_VER_PARAM)
|
||||
- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL,
|
||||
- IMDS_VER_PARAM)
|
||||
+ compute = "{}/instance".format(IMDS_URL)
|
||||
+ network = "{}/instance/network".format(IMDS_URL)
|
||||
+ reprovisiondata = "{}/reprovisiondata".format(IMDS_URL)
|
||||
|
||||
|
||||
PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
|
||||
@@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource):
|
||||
self.update_events['network'].add(EventType.BOOT)
|
||||
self._ephemeral_dhcp_ctx = None
|
||||
|
||||
+ self.failed_desired_api_version = False
|
||||
+
|
||||
def __str__(self):
|
||||
root = sources.DataSource.__str__(self)
|
||||
return "%s [seed=%s]" % (root, self.seed)
|
||||
@@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource):
|
||||
self._wait_for_all_nics_ready()
|
||||
ret = self._reprovision()
|
||||
|
||||
- imds_md = get_metadata_from_imds(
|
||||
- self.fallback_interface, retries=10)
|
||||
+ imds_md = self.get_imds_data_with_api_fallback(
|
||||
+ self.fallback_interface,
|
||||
+ retries=10
|
||||
+ )
|
||||
(md, userdata_raw, cfg, files) = ret
|
||||
self.seed = cdev
|
||||
crawled_data.update({
|
||||
@@ -652,6 +654,57 @@ class DataSourceAzure(sources.DataSource):
|
||||
self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
|
||||
return True
|
||||
|
||||
+ @azure_ds_telemetry_reporter
|
||||
+ def get_imds_data_with_api_fallback(
|
||||
+ self,
|
||||
+ fallback_nic,
|
||||
+ retries,
|
||||
+ md_type=metadata_type.compute):
|
||||
+ """
|
||||
+ Wrapper for get_metadata_from_imds so that we can have flexibility
|
||||
+ in which IMDS api-version we use. If a particular instance of IMDS
|
||||
+ does not have the api version that is desired, we want to make
|
||||
+ this fault tolerant and fall back to a good known minimum api
|
||||
+ version.
|
||||
+ """
|
||||
+
|
||||
+ if not self.failed_desired_api_version:
|
||||
+ for _ in range(retries):
|
||||
+ try:
|
||||
+ LOG.info(
|
||||
+ "Attempting IMDS api-version: %s",
|
||||
+ IMDS_VER_WANT
|
||||
+ )
|
||||
+ return get_metadata_from_imds(
|
||||
+ fallback_nic=fallback_nic,
|
||||
+ retries=0,
|
||||
+ md_type=md_type,
|
||||
+ api_version=IMDS_VER_WANT
|
||||
+ )
|
||||
+ except UrlError as err:
|
||||
+ LOG.info(
|
||||
+ "UrlError with IMDS api-version: %s",
|
||||
+ IMDS_VER_WANT
|
||||
+ )
|
||||
+ if err.code == 400:
|
||||
+ log_msg = "Fall back to IMDS api-version: {}".format(
|
||||
+ IMDS_VER_MIN
|
||||
+ )
|
||||
+ report_diagnostic_event(
|
||||
+ log_msg,
|
||||
+ logger_func=LOG.info
|
||||
+ )
|
||||
+ self.failed_desired_api_version = True
|
||||
+ break
|
||||
+
|
||||
+ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN)
|
||||
+ return get_metadata_from_imds(
|
||||
+ fallback_nic=fallback_nic,
|
||||
+ retries=retries,
|
||||
+ md_type=md_type,
|
||||
+ api_version=IMDS_VER_MIN
|
||||
+ )
|
||||
+
|
||||
def device_name_to_device(self, name):
|
||||
return self.ds_cfg['disk_aliases'].get(name)
|
||||
|
||||
@@ -880,10 +933,11 @@ class DataSourceAzure(sources.DataSource):
|
||||
# primary nic is being attached first helps here. Otherwise each nic
|
||||
# could add several seconds of delay.
|
||||
try:
|
||||
- imds_md = get_metadata_from_imds(
|
||||
+ imds_md = self.get_imds_data_with_api_fallback(
|
||||
ifname,
|
||||
5,
|
||||
- metadata_type.network)
|
||||
+ metadata_type.network
|
||||
+ )
|
||||
except Exception as e:
|
||||
LOG.warning(
|
||||
"Failed to get network metadata using nic %s. Attempt to "
|
||||
@@ -1017,7 +1071,10 @@ class DataSourceAzure(sources.DataSource):
|
||||
def _poll_imds(self):
|
||||
"""Poll IMDS for the new provisioning data until we get a valid
|
||||
response. Then return the returned JSON object."""
|
||||
- url = metadata_type.reprovisiondata.value
|
||||
+ url = "{}?api-version={}".format(
|
||||
+ metadata_type.reprovisiondata.value,
|
||||
+ IMDS_VER_MIN
|
||||
+ )
|
||||
headers = {"Metadata": "true"}
|
||||
nl_sock = None
|
||||
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
|
||||
@@ -2059,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict:
|
||||
@azure_ds_telemetry_reporter
|
||||
def get_metadata_from_imds(fallback_nic,
|
||||
retries,
|
||||
- md_type=metadata_type.compute):
|
||||
+ md_type=metadata_type.compute,
|
||||
+ api_version=IMDS_VER_MIN):
|
||||
"""Query Azure's instance metadata service, returning a dictionary.
|
||||
|
||||
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
|
||||
@@ -2069,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic,
|
||||
@param fallback_nic: String. The name of the nic which requires active
|
||||
network in order to query IMDS.
|
||||
@param retries: The number of retries of the IMDS_URL.
|
||||
+ @param md_type: Metadata type for IMDS request.
|
||||
+ @param api_version: IMDS api-version to use in the request.
|
||||
|
||||
@return: A dict of instance metadata containing compute and network
|
||||
info.
|
||||
"""
|
||||
kwargs = {'logfunc': LOG.debug,
|
||||
'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
|
||||
- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)}
|
||||
+ 'func': _get_metadata_from_imds,
|
||||
+ 'args': (retries, md_type, api_version,)}
|
||||
if net.is_up(fallback_nic):
|
||||
return util.log_time(**kwargs)
|
||||
else:
|
||||
@@ -2091,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic,
|
||||
|
||||
|
||||
@azure_ds_telemetry_reporter
|
||||
-def _get_metadata_from_imds(retries, md_type=metadata_type.compute):
|
||||
-
|
||||
- url = md_type.value
|
||||
+def _get_metadata_from_imds(
|
||||
+ retries,
|
||||
+ md_type=metadata_type.compute,
|
||||
+ api_version=IMDS_VER_MIN):
|
||||
+ url = "{}?api-version={}".format(md_type.value, api_version)
|
||||
headers = {"Metadata": "true"}
|
||||
try:
|
||||
response = readurl(
|
||||
url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
|
||||
retries=retries, exception_cb=retry_on_url_exc)
|
||||
except Exception as e:
|
||||
- report_diagnostic_event(
|
||||
- 'Ignoring IMDS instance metadata. '
|
||||
- 'Get metadata from IMDS failed: %s' % e,
|
||||
- logger_func=LOG.warning)
|
||||
- return {}
|
||||
+ # pylint:disable=no-member
|
||||
+ if isinstance(e, UrlError) and e.code == 400:
|
||||
+ raise
|
||||
+ else:
|
||||
+ report_diagnostic_event(
|
||||
+ 'Ignoring IMDS instance metadata. '
|
||||
+ 'Get metadata from IMDS failed: %s' % e,
|
||||
+ logger_func=LOG.warning)
|
||||
+ return {}
|
||||
try:
|
||||
from json.decoder import JSONDecodeError
|
||||
json_decode_error = JSONDecodeError
|
||||
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
|
||||
index f597c723..dedebeb1 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure.py
|
||||
@@ -408,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestGetMetadataFromIMDS, self).setUp()
|
||||
- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
|
||||
+ self.network_md_url = "{}/instance?api-version=2019-06-01".format(
|
||||
+ dsaz.IMDS_URL
|
||||
+ )
|
||||
|
||||
@mock.patch(MOCKPATH + 'readurl')
|
||||
@mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
|
||||
@@ -518,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
"""Return empty dict when IMDS network metadata is absent."""
|
||||
httpretty.register_uri(
|
||||
httpretty.GET,
|
||||
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
|
||||
+ dsaz.IMDS_URL + '/instance?api-version=2017-12-01',
|
||||
body={}, status=404)
|
||||
|
||||
m_net_is_up.return_value = True # skips dhcp
|
||||
@@ -1877,6 +1879,40 @@ scbus-1 on xpt0 bus 0
|
||||
ssh_keys = dsrc.get_public_ssh_keys()
|
||||
self.assertEqual(ssh_keys, ['key2'])
|
||||
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_imds_api_version_wanted_nonexistent(
|
||||
+ self,
|
||||
+ m_get_metadata_from_imds):
|
||||
+ def get_metadata_from_imds_side_eff(*args, **kwargs):
|
||||
+ if kwargs['api_version'] == dsaz.IMDS_VER_WANT:
|
||||
+ raise url_helper.UrlError("No IMDS version", code=400)
|
||||
+ return NETWORK_METADATA
|
||||
+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ self.assertIsNotNone(dsrc.metadata)
|
||||
+ self.assertTrue(dsrc.failed_desired_api_version)
|
||||
+
|
||||
+ @mock.patch(
|
||||
+ MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA)
|
||||
+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ self.assertIsNotNone(dsrc.metadata)
|
||||
+ self.assertFalse(dsrc.failed_desired_api_version)
|
||||
+
|
||||
|
||||
class TestAzureBounce(CiTestCase):
|
||||
|
||||
@@ -2657,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
|
||||
@mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
|
||||
@mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
|
||||
@mock.patch('cloudinit.sources.net.find_fallback_nic')
|
||||
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
|
||||
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
|
||||
@mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
|
||||
@mock.patch('os.path.isfile')
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,38 @@
|
||||
From b9c6c6c88d16685475bb9c8f0de3c765bd5303fa Mon Sep 17 00:00:00 2001
|
||||
From: Eduardo Otubo <otubo@redhat.com>
|
||||
Date: Thu, 17 Feb 2022 15:01:41 +0100
|
||||
Subject: [PATCH 2/3] Adding _netdev to the default mount configuration
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 21: Adding _netdev to the default mount configuration
|
||||
RH-Commit: [1/1] 250860a24db396a5088d207d6526a0028ac73eb3 (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 1998445
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Adding _netdev option also to the default configuration for RHEL.
|
||||
|
||||
rhbz: 1998445
|
||||
x-downstream-only: yes
|
||||
|
||||
Signed-off-by: Eduardo Otubo <otubo@redhat.com>
|
||||
---
|
||||
rhel/cloud.cfg | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg
|
||||
index cbee197a..75d5c84b 100644
|
||||
--- a/rhel/cloud.cfg
|
||||
+++ b/rhel/cloud.cfg
|
||||
@@ -4,7 +4,7 @@ users:
|
||||
disable_root: 1
|
||||
ssh_pwauth: 0
|
||||
|
||||
-mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2']
|
||||
+mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2']
|
||||
resize_rootfs_tmp: /dev
|
||||
ssh_deletekeys: 1
|
||||
ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519']
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,397 @@
|
||||
From 68f058e8d20a499f74bc78af8e0c6a90ca57ae20 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Stringer <thstring@microsoft.com>
|
||||
Date: Mon, 26 Apr 2021 09:41:38 -0400
|
||||
Subject: [PATCH 5/7] Azure: Retrieve username and hostname from IMDS (#865)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [5/7] 6a768d31e63e5f00dae0fad2712a7618d62b0879 (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
This change allows us to retrieve the username and hostname from
|
||||
IMDS instead of having to rely on the mounted OVF.
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 149 ++++++++++++++----
|
||||
tests/unittests/test_datasource/test_azure.py | 87 +++++++++-
|
||||
2 files changed, 205 insertions(+), 31 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index 39e67c4f..6d7954ee 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -5,6 +5,7 @@
|
||||
# This file is part of cloud-init. See LICENSE file for license information.
|
||||
|
||||
import base64
|
||||
+from collections import namedtuple
|
||||
import contextlib
|
||||
import crypt
|
||||
from functools import partial
|
||||
@@ -25,6 +26,7 @@ from cloudinit.net import device_driver
|
||||
from cloudinit.net.dhcp import EphemeralDHCPv4
|
||||
from cloudinit import sources
|
||||
from cloudinit.sources.helpers import netlink
|
||||
+from cloudinit import ssh_util
|
||||
from cloudinit import subp
|
||||
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
|
||||
from cloudinit import util
|
||||
@@ -80,7 +82,12 @@ AGENT_SEED_DIR = '/var/lib/waagent'
|
||||
IMDS_TIMEOUT_IN_SECONDS = 2
|
||||
IMDS_URL = "http://169.254.169.254/metadata"
|
||||
IMDS_VER_MIN = "2019-06-01"
|
||||
-IMDS_VER_WANT = "2020-09-01"
|
||||
+IMDS_VER_WANT = "2020-10-01"
|
||||
+
|
||||
+
|
||||
+# This holds SSH key data including if the source was
|
||||
+# from IMDS, as well as the SSH key data itself.
|
||||
+SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys"))
|
||||
|
||||
|
||||
class metadata_type(Enum):
|
||||
@@ -391,6 +398,8 @@ class DataSourceAzure(sources.DataSource):
|
||||
"""Return the subplatform metadata source details."""
|
||||
if self.seed.startswith('/dev'):
|
||||
subplatform_type = 'config-disk'
|
||||
+ elif self.seed.lower() == 'imds':
|
||||
+ subplatform_type = 'imds'
|
||||
else:
|
||||
subplatform_type = 'seed-dir'
|
||||
return '%s (%s)' % (subplatform_type, self.seed)
|
||||
@@ -433,9 +442,11 @@ class DataSourceAzure(sources.DataSource):
|
||||
|
||||
found = None
|
||||
reprovision = False
|
||||
+ ovf_is_accessible = True
|
||||
reprovision_after_nic_attach = False
|
||||
for cdev in candidates:
|
||||
try:
|
||||
+ LOG.debug("cdev: %s", cdev)
|
||||
if cdev == "IMDS":
|
||||
ret = None
|
||||
reprovision = True
|
||||
@@ -462,8 +473,18 @@ class DataSourceAzure(sources.DataSource):
|
||||
raise sources.InvalidMetaDataException(msg)
|
||||
except util.MountFailedError:
|
||||
report_diagnostic_event(
|
||||
- '%s was not mountable' % cdev, logger_func=LOG.warning)
|
||||
- continue
|
||||
+ '%s was not mountable' % cdev, logger_func=LOG.debug)
|
||||
+ cdev = 'IMDS'
|
||||
+ ovf_is_accessible = False
|
||||
+ empty_md = {'local-hostname': ''}
|
||||
+ empty_cfg = dict(
|
||||
+ system_info=dict(
|
||||
+ default_user=dict(
|
||||
+ name=''
|
||||
+ )
|
||||
+ )
|
||||
+ )
|
||||
+ ret = (empty_md, '', empty_cfg, {})
|
||||
|
||||
report_diagnostic_event("Found provisioning metadata in %s" % cdev,
|
||||
logger_func=LOG.debug)
|
||||
@@ -490,6 +511,10 @@ class DataSourceAzure(sources.DataSource):
|
||||
self.fallback_interface,
|
||||
retries=10
|
||||
)
|
||||
+ if not imds_md and not ovf_is_accessible:
|
||||
+ msg = 'No OVF or IMDS available'
|
||||
+ report_diagnostic_event(msg)
|
||||
+ raise sources.InvalidMetaDataException(msg)
|
||||
(md, userdata_raw, cfg, files) = ret
|
||||
self.seed = cdev
|
||||
crawled_data.update({
|
||||
@@ -498,6 +523,21 @@ class DataSourceAzure(sources.DataSource):
|
||||
'metadata': util.mergemanydict(
|
||||
[md, {'imds': imds_md}]),
|
||||
'userdata_raw': userdata_raw})
|
||||
+ imds_username = _username_from_imds(imds_md)
|
||||
+ imds_hostname = _hostname_from_imds(imds_md)
|
||||
+ imds_disable_password = _disable_password_from_imds(imds_md)
|
||||
+ if imds_username:
|
||||
+ LOG.debug('Username retrieved from IMDS: %s', imds_username)
|
||||
+ cfg['system_info']['default_user']['name'] = imds_username
|
||||
+ if imds_hostname:
|
||||
+ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
|
||||
+ crawled_data['metadata']['local-hostname'] = imds_hostname
|
||||
+ if imds_disable_password:
|
||||
+ LOG.debug(
|
||||
+ 'Disable password retrieved from IMDS: %s',
|
||||
+ imds_disable_password
|
||||
+ )
|
||||
+ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
|
||||
found = cdev
|
||||
|
||||
report_diagnostic_event(
|
||||
@@ -676,6 +716,13 @@ class DataSourceAzure(sources.DataSource):
|
||||
|
||||
@azure_ds_telemetry_reporter
|
||||
def get_public_ssh_keys(self):
|
||||
+ """
|
||||
+ Retrieve public SSH keys.
|
||||
+ """
|
||||
+
|
||||
+ return self._get_public_ssh_keys_and_source().ssh_keys
|
||||
+
|
||||
+ def _get_public_ssh_keys_and_source(self):
|
||||
"""
|
||||
Try to get the ssh keys from IMDS first, and if that fails
|
||||
(i.e. IMDS is unavailable) then fallback to getting the ssh
|
||||
@@ -685,30 +732,50 @@ class DataSourceAzure(sources.DataSource):
|
||||
advantage, so this is a strong preference. But we must keep
|
||||
OVF as a second option for environments that don't have IMDS.
|
||||
"""
|
||||
+
|
||||
LOG.debug('Retrieving public SSH keys')
|
||||
ssh_keys = []
|
||||
+ keys_from_imds = True
|
||||
+ LOG.debug('Attempting to get SSH keys from IMDS')
|
||||
try:
|
||||
- raise KeyError(
|
||||
- "Not using public SSH keys from IMDS"
|
||||
- )
|
||||
- # pylint:disable=unreachable
|
||||
ssh_keys = [
|
||||
public_key['keyData']
|
||||
for public_key
|
||||
in self.metadata['imds']['compute']['publicKeys']
|
||||
]
|
||||
- LOG.debug('Retrieved SSH keys from IMDS')
|
||||
+ for key in ssh_keys:
|
||||
+ if not _key_is_openssh_formatted(key=key):
|
||||
+ keys_from_imds = False
|
||||
+ break
|
||||
+
|
||||
+ if not keys_from_imds:
|
||||
+ log_msg = 'Keys not in OpenSSH format, using OVF'
|
||||
+ else:
|
||||
+ log_msg = 'Retrieved {} keys from IMDS'.format(
|
||||
+ len(ssh_keys)
|
||||
+ if ssh_keys is not None
|
||||
+ else 0
|
||||
+ )
|
||||
except KeyError:
|
||||
log_msg = 'Unable to get keys from IMDS, falling back to OVF'
|
||||
+ keys_from_imds = False
|
||||
+ finally:
|
||||
report_diagnostic_event(log_msg, logger_func=LOG.debug)
|
||||
+
|
||||
+ if not keys_from_imds:
|
||||
+ LOG.debug('Attempting to get SSH keys from OVF')
|
||||
try:
|
||||
ssh_keys = self.metadata['public-keys']
|
||||
- LOG.debug('Retrieved keys from OVF')
|
||||
+ log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys))
|
||||
except KeyError:
|
||||
log_msg = 'No keys available from OVF'
|
||||
+ finally:
|
||||
report_diagnostic_event(log_msg, logger_func=LOG.debug)
|
||||
|
||||
- return ssh_keys
|
||||
+ return SSHKeys(
|
||||
+ keys_from_imds=keys_from_imds,
|
||||
+ ssh_keys=ssh_keys
|
||||
+ )
|
||||
|
||||
def get_config_obj(self):
|
||||
return self.cfg
|
||||
@@ -1325,30 +1392,21 @@ class DataSourceAzure(sources.DataSource):
|
||||
self.bounce_network_with_azure_hostname()
|
||||
|
||||
pubkey_info = None
|
||||
- try:
|
||||
- raise KeyError(
|
||||
- "Not using public SSH keys from IMDS"
|
||||
- )
|
||||
- # pylint:disable=unreachable
|
||||
- public_keys = self.metadata['imds']['compute']['publicKeys']
|
||||
- LOG.debug(
|
||||
- 'Successfully retrieved %s key(s) from IMDS',
|
||||
- len(public_keys)
|
||||
- if public_keys is not None
|
||||
+ ssh_keys_and_source = self._get_public_ssh_keys_and_source()
|
||||
+
|
||||
+ if not ssh_keys_and_source.keys_from_imds:
|
||||
+ pubkey_info = self.cfg.get('_pubkeys', None)
|
||||
+ log_msg = 'Retrieved {} fingerprints from OVF'.format(
|
||||
+ len(pubkey_info)
|
||||
+ if pubkey_info is not None
|
||||
else 0
|
||||
)
|
||||
- except KeyError:
|
||||
- LOG.debug(
|
||||
- 'Unable to retrieve SSH keys from IMDS during '
|
||||
- 'negotiation, falling back to OVF'
|
||||
- )
|
||||
- pubkey_info = self.cfg.get('_pubkeys', None)
|
||||
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
|
||||
|
||||
metadata_func = partial(get_metadata_from_fabric,
|
||||
fallback_lease_file=self.
|
||||
dhclient_lease_file,
|
||||
- pubkey_info=pubkey_info,
|
||||
- iso_dev=self.iso_dev)
|
||||
+ pubkey_info=pubkey_info)
|
||||
|
||||
LOG.debug("negotiating with fabric via agent command %s",
|
||||
self.ds_cfg['agent_command'])
|
||||
@@ -1404,6 +1462,41 @@ class DataSourceAzure(sources.DataSource):
|
||||
return self.metadata.get('imds', {}).get('compute', {}).get('location')
|
||||
|
||||
|
||||
+def _username_from_imds(imds_data):
|
||||
+ try:
|
||||
+ return imds_data['compute']['osProfile']['adminUsername']
|
||||
+ except KeyError:
|
||||
+ return None
|
||||
+
|
||||
+
|
||||
+def _hostname_from_imds(imds_data):
|
||||
+ try:
|
||||
+ return imds_data['compute']['osProfile']['computerName']
|
||||
+ except KeyError:
|
||||
+ return None
|
||||
+
|
||||
+
|
||||
+def _disable_password_from_imds(imds_data):
|
||||
+ try:
|
||||
+ return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501
|
||||
+ except KeyError:
|
||||
+ return None
|
||||
+
|
||||
+
|
||||
+def _key_is_openssh_formatted(key):
|
||||
+ """
|
||||
+ Validate whether or not the key is OpenSSH-formatted.
|
||||
+ """
|
||||
+
|
||||
+ parser = ssh_util.AuthKeyLineParser()
|
||||
+ try:
|
||||
+ akl = parser.parse(key)
|
||||
+ except TypeError:
|
||||
+ return False
|
||||
+
|
||||
+ return akl.keytype is not None
|
||||
+
|
||||
+
|
||||
def _partitions_on_device(devpath, maxnum=16):
|
||||
# return a list of tuples (ptnum, path) for each part on devpath
|
||||
for suff in ("-part", "p", ""):
|
||||
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
|
||||
index 320fa857..d9817d84 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure.py
|
||||
@@ -108,7 +108,7 @@ NETWORK_METADATA = {
|
||||
"zone": "",
|
||||
"publicKeys": [
|
||||
{
|
||||
- "keyData": "key1",
|
||||
+ "keyData": "ssh-rsa key1",
|
||||
"path": "path1"
|
||||
}
|
||||
]
|
||||
@@ -1761,8 +1761,29 @@ scbus-1 on xpt0 bus 0
|
||||
dsrc.get_data()
|
||||
dsrc.setup(True)
|
||||
ssh_keys = dsrc.get_public_ssh_keys()
|
||||
- # Temporarily alter this test so that SSH public keys
|
||||
- # from IMDS are *not* going to be in use to fix a regression.
|
||||
+ self.assertEqual(ssh_keys, ["ssh-rsa key1"])
|
||||
+ self.assertEqual(m_parse_certificates.call_count, 0)
|
||||
+
|
||||
+ @mock.patch(
|
||||
+ 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_get_public_ssh_keys_with_no_openssh_format(
|
||||
+ self,
|
||||
+ m_get_metadata_from_imds,
|
||||
+ m_parse_certificates):
|
||||
+ imds_data = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format'
|
||||
+ m_get_metadata_from_imds.return_value = imds_data
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ dsrc.setup(True)
|
||||
+ ssh_keys = dsrc.get_public_ssh_keys()
|
||||
self.assertEqual(ssh_keys, [])
|
||||
self.assertEqual(m_parse_certificates.call_count, 0)
|
||||
|
||||
@@ -1818,6 +1839,66 @@ scbus-1 on xpt0 bus 0
|
||||
self.assertIsNotNone(dsrc.metadata)
|
||||
self.assertFalse(dsrc.failed_desired_api_version)
|
||||
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_hostname_from_imds(self, m_get_metadata_from_imds):
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
|
||||
+ adminUsername="username1",
|
||||
+ computerName="hostname1",
|
||||
+ disablePasswordAuthentication="true"
|
||||
+ )
|
||||
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
|
||||
+
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_username_from_imds(self, m_get_metadata_from_imds):
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
|
||||
+ adminUsername="username1",
|
||||
+ computerName="hostname1",
|
||||
+ disablePasswordAuthentication="true"
|
||||
+ )
|
||||
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ self.assertEqual(
|
||||
+ dsrc.cfg["system_info"]["default_user"]["name"],
|
||||
+ "username1"
|
||||
+ )
|
||||
+
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_disable_password_from_imds(self, m_get_metadata_from_imds):
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
|
||||
+ adminUsername="username1",
|
||||
+ computerName="hostname1",
|
||||
+ disablePasswordAuthentication="true"
|
||||
+ )
|
||||
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ dsrc.get_data()
|
||||
+ self.assertTrue(dsrc.metadata["disable_password"])
|
||||
+
|
||||
|
||||
class TestAzureBounce(CiTestCase):
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,315 @@
|
||||
From 816fe5c2e6d5dcc68f292092b00b2acfbc4c8e88 Mon Sep 17 00:00:00 2001
|
||||
From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com>
|
||||
Date: Mon, 26 Apr 2021 07:28:39 -0700
|
||||
Subject: [PATCH 6/7] Azure: Retry net metadata during nic attach for
|
||||
non-timeout errs (#878)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [6/7] 794cd340644260bb43a7c8582a8067f403b9842d (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
When network interfaces are hot-attached to the VM, attempting to get
|
||||
network metadata might return 410 (or 500, 503 etc) because the info
|
||||
is not yet available. In those cases, we retry getting the metadata
|
||||
before giving up. The only case where we can move on to wait for more
|
||||
nic attach events is if the call times out despite retries, which
|
||||
means the interface is not likely a primary interface, and we should
|
||||
try for more nic attach events.
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 65 +++++++++++--
|
||||
tests/unittests/test_datasource/test_azure.py | 95 ++++++++++++++++---
|
||||
2 files changed, 140 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index 6d7954ee..d0be6d84 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -17,6 +17,7 @@ from time import sleep
|
||||
from xml.dom import minidom
|
||||
import xml.etree.ElementTree as ET
|
||||
from enum import Enum
|
||||
+import requests
|
||||
|
||||
from cloudinit import dmi
|
||||
from cloudinit import log as logging
|
||||
@@ -665,7 +666,9 @@ class DataSourceAzure(sources.DataSource):
|
||||
self,
|
||||
fallback_nic,
|
||||
retries,
|
||||
- md_type=metadata_type.compute):
|
||||
+ md_type=metadata_type.compute,
|
||||
+ exc_cb=retry_on_url_exc,
|
||||
+ infinite=False):
|
||||
"""
|
||||
Wrapper for get_metadata_from_imds so that we can have flexibility
|
||||
in which IMDS api-version we use. If a particular instance of IMDS
|
||||
@@ -685,7 +688,8 @@ class DataSourceAzure(sources.DataSource):
|
||||
fallback_nic=fallback_nic,
|
||||
retries=0,
|
||||
md_type=md_type,
|
||||
- api_version=IMDS_VER_WANT
|
||||
+ api_version=IMDS_VER_WANT,
|
||||
+ exc_cb=exc_cb
|
||||
)
|
||||
except UrlError as err:
|
||||
LOG.info(
|
||||
@@ -708,7 +712,9 @@ class DataSourceAzure(sources.DataSource):
|
||||
fallback_nic=fallback_nic,
|
||||
retries=retries,
|
||||
md_type=md_type,
|
||||
- api_version=IMDS_VER_MIN
|
||||
+ api_version=IMDS_VER_MIN,
|
||||
+ exc_cb=exc_cb,
|
||||
+ infinite=infinite
|
||||
)
|
||||
|
||||
def device_name_to_device(self, name):
|
||||
@@ -938,6 +944,9 @@ class DataSourceAzure(sources.DataSource):
|
||||
is_primary = False
|
||||
expected_nic_count = -1
|
||||
imds_md = None
|
||||
+ metadata_poll_count = 0
|
||||
+ metadata_logging_threshold = 1
|
||||
+ metadata_timeout_count = 0
|
||||
|
||||
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
|
||||
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
|
||||
@@ -962,14 +971,48 @@ class DataSourceAzure(sources.DataSource):
|
||||
% (ifname, e), logger_func=LOG.error)
|
||||
raise
|
||||
|
||||
+ # Retry polling network metadata for a limited duration only when the
|
||||
+ # calls fail due to timeout. This is because the platform drops packets
|
||||
+ # going towards IMDS when it is not a primary nic. If the calls fail
|
||||
+ # due to other issues like 410, 503 etc, then it means we are primary
|
||||
+ # but IMDS service is unavailable at the moment. Retry indefinitely in
|
||||
+ # those cases since we cannot move on without the network metadata.
|
||||
+ def network_metadata_exc_cb(msg, exc):
|
||||
+ nonlocal metadata_timeout_count, metadata_poll_count
|
||||
+ nonlocal metadata_logging_threshold
|
||||
+
|
||||
+ metadata_poll_count = metadata_poll_count + 1
|
||||
+
|
||||
+ # Log when needed but back off exponentially to avoid exploding
|
||||
+ # the log file.
|
||||
+ if metadata_poll_count >= metadata_logging_threshold:
|
||||
+ metadata_logging_threshold *= 2
|
||||
+ report_diagnostic_event(
|
||||
+ "Ran into exception when attempting to reach %s "
|
||||
+ "after %d polls." % (msg, metadata_poll_count),
|
||||
+ logger_func=LOG.error)
|
||||
+
|
||||
+ if isinstance(exc, UrlError):
|
||||
+ report_diagnostic_event("poll IMDS with %s failed. "
|
||||
+ "Exception: %s and code: %s" %
|
||||
+ (msg, exc.cause, exc.code),
|
||||
+ logger_func=LOG.error)
|
||||
+
|
||||
+ if exc.cause and isinstance(exc.cause, requests.Timeout):
|
||||
+ metadata_timeout_count = metadata_timeout_count + 1
|
||||
+ return (metadata_timeout_count <= 10)
|
||||
+ return True
|
||||
+
|
||||
# Primary nic detection will be optimized in the future. The fact that
|
||||
# primary nic is being attached first helps here. Otherwise each nic
|
||||
# could add several seconds of delay.
|
||||
try:
|
||||
imds_md = self.get_imds_data_with_api_fallback(
|
||||
ifname,
|
||||
- 5,
|
||||
- metadata_type.network
|
||||
+ 0,
|
||||
+ metadata_type.network,
|
||||
+ network_metadata_exc_cb,
|
||||
+ True
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.warning(
|
||||
@@ -2139,7 +2182,9 @@ def _generate_network_config_from_fallback_config() -> dict:
|
||||
def get_metadata_from_imds(fallback_nic,
|
||||
retries,
|
||||
md_type=metadata_type.compute,
|
||||
- api_version=IMDS_VER_MIN):
|
||||
+ api_version=IMDS_VER_MIN,
|
||||
+ exc_cb=retry_on_url_exc,
|
||||
+ infinite=False):
|
||||
"""Query Azure's instance metadata service, returning a dictionary.
|
||||
|
||||
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
|
||||
@@ -2158,7 +2203,7 @@ def get_metadata_from_imds(fallback_nic,
|
||||
kwargs = {'logfunc': LOG.debug,
|
||||
'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
|
||||
'func': _get_metadata_from_imds,
|
||||
- 'args': (retries, md_type, api_version,)}
|
||||
+ 'args': (retries, exc_cb, md_type, api_version, infinite)}
|
||||
if net.is_up(fallback_nic):
|
||||
return util.log_time(**kwargs)
|
||||
else:
|
||||
@@ -2176,14 +2221,16 @@ def get_metadata_from_imds(fallback_nic,
|
||||
@azure_ds_telemetry_reporter
|
||||
def _get_metadata_from_imds(
|
||||
retries,
|
||||
+ exc_cb,
|
||||
md_type=metadata_type.compute,
|
||||
- api_version=IMDS_VER_MIN):
|
||||
+ api_version=IMDS_VER_MIN,
|
||||
+ infinite=False):
|
||||
url = "{}?api-version={}".format(md_type.value, api_version)
|
||||
headers = {"Metadata": "true"}
|
||||
try:
|
||||
response = readurl(
|
||||
url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
|
||||
- retries=retries, exception_cb=retry_on_url_exc)
|
||||
+ retries=retries, exception_cb=exc_cb, infinite=infinite)
|
||||
except Exception as e:
|
||||
# pylint:disable=no-member
|
||||
if isinstance(e, UrlError) and e.code == 400:
|
||||
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
|
||||
index d9817d84..c4a8e08d 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure.py
|
||||
@@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
"http://169.254.169.254/metadata/instance?api-version="
|
||||
"2019-06-01", exception_cb=mock.ANY,
|
||||
headers=mock.ANY, retries=mock.ANY,
|
||||
- timeout=mock.ANY)
|
||||
+ timeout=mock.ANY, infinite=False)
|
||||
|
||||
@mock.patch(MOCKPATH + 'readurl', autospec=True)
|
||||
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
|
||||
@@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
"http://169.254.169.254/metadata/instance/network?api-version="
|
||||
"2019-06-01", exception_cb=mock.ANY,
|
||||
headers=mock.ANY, retries=mock.ANY,
|
||||
- timeout=mock.ANY)
|
||||
+ timeout=mock.ANY, infinite=False)
|
||||
|
||||
@mock.patch(MOCKPATH + 'readurl', autospec=True)
|
||||
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
|
||||
@@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
"http://169.254.169.254/metadata/instance?api-version="
|
||||
"2019-06-01", exception_cb=mock.ANY,
|
||||
headers=mock.ANY, retries=mock.ANY,
|
||||
- timeout=mock.ANY)
|
||||
+ timeout=mock.ANY, infinite=False)
|
||||
|
||||
@mock.patch(MOCKPATH + 'readurl', autospec=True)
|
||||
@mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
|
||||
@@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
|
||||
m_readurl.assert_called_with(
|
||||
self.network_md_url, exception_cb=mock.ANY,
|
||||
headers={'Metadata': 'true'}, retries=2,
|
||||
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
|
||||
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False)
|
||||
|
||||
@mock.patch('cloudinit.url_helper.time.sleep')
|
||||
@mock.patch(MOCKPATH + 'net.is_up', autospec=True)
|
||||
@@ -2694,15 +2694,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
|
||||
|
||||
def nic_attach_ret(nl_sock, nics_found):
|
||||
nonlocal m_attach_call_count
|
||||
- if m_attach_call_count == 0:
|
||||
- m_attach_call_count = m_attach_call_count + 1
|
||||
+ m_attach_call_count = m_attach_call_count + 1
|
||||
+ if m_attach_call_count == 1:
|
||||
return "eth0"
|
||||
- return "eth1"
|
||||
+ elif m_attach_call_count == 2:
|
||||
+ return "eth1"
|
||||
+ raise RuntimeError("Must have found primary nic by now.")
|
||||
+
|
||||
+ # Simulate two NICs by adding the same one twice.
|
||||
+ md = {
|
||||
+ "interface": [
|
||||
+ IMDS_NETWORK_METADATA['interface'][0],
|
||||
+ IMDS_NETWORK_METADATA['interface'][0]
|
||||
+ ]
|
||||
+ }
|
||||
|
||||
- def network_metadata_ret(ifname, retries, type):
|
||||
- # Simulate two NICs by adding the same one twice.
|
||||
- md = IMDS_NETWORK_METADATA
|
||||
- md['interface'].append(md['interface'][0])
|
||||
+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
|
||||
if ifname == "eth0":
|
||||
return md
|
||||
raise requests.Timeout('Fake connection timeout')
|
||||
@@ -2724,6 +2731,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
|
||||
self.assertEqual(1, m_imds.call_count)
|
||||
self.assertEqual(2, m_link_up.call_count)
|
||||
|
||||
+ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
|
||||
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
|
||||
+ def test_check_if_nic_is_primary_retries_on_failures(
|
||||
+ self, m_dhcpv4, m_imds):
|
||||
+ """Retry polling for network metadata on all failures except timeout"""
|
||||
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
|
||||
+ lease = {
|
||||
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
|
||||
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
|
||||
+ 'unknown-245': '624c3620'}
|
||||
+
|
||||
+ eth0Retries = []
|
||||
+ eth1Retries = []
|
||||
+ # Simulate two NICs by adding the same one twice.
|
||||
+ md = {
|
||||
+ "interface": [
|
||||
+ IMDS_NETWORK_METADATA['interface'][0],
|
||||
+ IMDS_NETWORK_METADATA['interface'][0]
|
||||
+ ]
|
||||
+ }
|
||||
+
|
||||
+ def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
|
||||
+ nonlocal eth0Retries, eth1Retries
|
||||
+
|
||||
+ # Simulate readurl functionality with retries and
|
||||
+ # exception callbacks so that the callback logic can be
|
||||
+ # validated.
|
||||
+ if ifname == "eth0":
|
||||
+ cause = requests.HTTPError()
|
||||
+ for _ in range(0, 15):
|
||||
+ error = url_helper.UrlError(cause=cause, code=410)
|
||||
+ eth0Retries.append(exc_cb("No goal state.", error))
|
||||
+ else:
|
||||
+ cause = requests.Timeout('Fake connection timeout')
|
||||
+ for _ in range(0, 10):
|
||||
+ error = url_helper.UrlError(cause=cause)
|
||||
+ eth1Retries.append(exc_cb("Connection timeout", error))
|
||||
+ # Should stop retrying after 10 retries
|
||||
+ eth1Retries.append(exc_cb("Connection timeout", error))
|
||||
+ raise cause
|
||||
+ return md
|
||||
+
|
||||
+ m_imds.side_effect = network_metadata_ret
|
||||
+
|
||||
+ dhcp_ctx = mock.MagicMock(lease=lease)
|
||||
+ dhcp_ctx.obtain_lease.return_value = lease
|
||||
+ m_dhcpv4.return_value = dhcp_ctx
|
||||
+
|
||||
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
|
||||
+ self.assertEqual(True, is_primary)
|
||||
+ self.assertEqual(2, expected_nic_count)
|
||||
+
|
||||
+ # All Eth0 errors are non-timeout errors. So we should have been
|
||||
+ # retrying indefinitely until success.
|
||||
+ for i in eth0Retries:
|
||||
+ self.assertTrue(i)
|
||||
+
|
||||
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
|
||||
+ self.assertEqual(False, is_primary)
|
||||
+
|
||||
+ # All Eth1 errors are timeout errors. Retry happens for a max of 10 and
|
||||
+ # then we should have moved on assuming it is not the primary nic.
|
||||
+ for i in range(0, 10):
|
||||
+ self.assertTrue(eth1Retries[i])
|
||||
+ self.assertFalse(eth1Retries[10])
|
||||
+
|
||||
@mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
|
||||
def test_wait_for_link_up_returns_if_already_up(
|
||||
self, m_is_link_up):
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,129 @@
|
||||
From 0def71378dc7abf682727c600b696f7313cdcf60 Mon Sep 17 00:00:00 2001
|
||||
From: Anh Vo <anhvo@microsoft.com>
|
||||
Date: Tue, 27 Apr 2021 13:40:59 -0400
|
||||
Subject: [PATCH 7/7] Azure: adding support for consuming userdata from IMDS
|
||||
(#884)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [7/7] 1e7ab925162ed9ef2c9b5b9f5c6d5e6ec6e623dd (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 23 ++++++++-
|
||||
tests/unittests/test_datasource/test_azure.py | 50 +++++++++++++++++++
|
||||
2 files changed, 72 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index d0be6d84..a66f023d 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -83,7 +83,7 @@ AGENT_SEED_DIR = '/var/lib/waagent'
|
||||
IMDS_TIMEOUT_IN_SECONDS = 2
|
||||
IMDS_URL = "http://169.254.169.254/metadata"
|
||||
IMDS_VER_MIN = "2019-06-01"
|
||||
-IMDS_VER_WANT = "2020-10-01"
|
||||
+IMDS_VER_WANT = "2021-01-01"
|
||||
|
||||
|
||||
# This holds SSH key data including if the source was
|
||||
@@ -539,6 +539,20 @@ class DataSourceAzure(sources.DataSource):
|
||||
imds_disable_password
|
||||
)
|
||||
crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
|
||||
+
|
||||
+ # only use userdata from imds if OVF did not provide custom data
|
||||
+ # userdata provided by IMDS is always base64 encoded
|
||||
+ if not userdata_raw:
|
||||
+ imds_userdata = _userdata_from_imds(imds_md)
|
||||
+ if imds_userdata:
|
||||
+ LOG.debug("Retrieved userdata from IMDS")
|
||||
+ try:
|
||||
+ crawled_data['userdata_raw'] = base64.b64decode(
|
||||
+ ''.join(imds_userdata.split()))
|
||||
+ except Exception:
|
||||
+ report_diagnostic_event(
|
||||
+ "Bad userdata in IMDS",
|
||||
+ logger_func=LOG.warning)
|
||||
found = cdev
|
||||
|
||||
report_diagnostic_event(
|
||||
@@ -1512,6 +1526,13 @@ def _username_from_imds(imds_data):
|
||||
return None
|
||||
|
||||
|
||||
+def _userdata_from_imds(imds_data):
|
||||
+ try:
|
||||
+ return imds_data['compute']['userData']
|
||||
+ except KeyError:
|
||||
+ return None
|
||||
+
|
||||
+
|
||||
def _hostname_from_imds(imds_data):
|
||||
try:
|
||||
return imds_data['compute']['osProfile']['computerName']
|
||||
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
|
||||
index c4a8e08d..f8433690 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure.py
|
||||
@@ -1899,6 +1899,56 @@ scbus-1 on xpt0 bus 0
|
||||
dsrc.get_data()
|
||||
self.assertTrue(dsrc.metadata["disable_password"])
|
||||
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_userdata_from_imds(self, m_get_metadata_from_imds):
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+ userdata = "userdataImds"
|
||||
+ imds_data = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data["compute"]["osProfile"] = dict(
|
||||
+ adminUsername="username1",
|
||||
+ computerName="hostname1",
|
||||
+ disablePasswordAuthentication="true",
|
||||
+ )
|
||||
+ imds_data["compute"]["userData"] = b64e(userdata)
|
||||
+ m_get_metadata_from_imds.return_value = imds_data
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ ret = dsrc.get_data()
|
||||
+ self.assertTrue(ret)
|
||||
+ self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8'))
|
||||
+
|
||||
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
|
||||
+ def test_userdata_from_imds_with_customdata_from_OVF(
|
||||
+ self, m_get_metadata_from_imds):
|
||||
+ userdataOVF = "userdataOVF"
|
||||
+ odata = {
|
||||
+ 'HostName': "myhost", 'UserName': "myuser",
|
||||
+ 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'}
|
||||
+ }
|
||||
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
|
||||
+ data = {
|
||||
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
|
||||
+ 'sys_cfg': sys_cfg
|
||||
+ }
|
||||
+
|
||||
+ userdataImds = "userdataImds"
|
||||
+ imds_data = copy.deepcopy(NETWORK_METADATA)
|
||||
+ imds_data["compute"]["osProfile"] = dict(
|
||||
+ adminUsername="username1",
|
||||
+ computerName="hostname1",
|
||||
+ disablePasswordAuthentication="true",
|
||||
+ )
|
||||
+ imds_data["compute"]["userData"] = b64e(userdataImds)
|
||||
+ m_get_metadata_from_imds.return_value = imds_data
|
||||
+ dsrc = self._get_ds(data)
|
||||
+ ret = dsrc.get_data()
|
||||
+ self.assertTrue(ret)
|
||||
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8'))
|
||||
+
|
||||
|
||||
class TestAzureBounce(CiTestCase):
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,177 @@
|
||||
From 2ece71923a37a5e1107c80f091a1cc620943fbf2 Mon Sep 17 00:00:00 2001
|
||||
From: Anh Vo <anhvo@microsoft.com>
|
||||
Date: Fri, 23 Apr 2021 10:18:05 -0400
|
||||
Subject: [PATCH 4/7] Azure: eject the provisioning iso before reporting ready
|
||||
(#861)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [4/7] 63e379a4406530c0c15c733f8eee35421079508b (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Due to hyper-v implementations, iso ejection is more efficient if performed
|
||||
from within the guest. The code will attempt to perform a best-effort ejection.
|
||||
Failure during ejection will not prevent reporting ready from happening. If iso
|
||||
ejection is successful, later iso ejection from the platform will be a no-op.
|
||||
In the event the iso ejection from the guest fails, iso ejection will still happen at
|
||||
the platform level.
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 22 +++++++++++++++---
|
||||
cloudinit/sources/helpers/azure.py | 23 ++++++++++++++++---
|
||||
.../test_datasource/test_azure_helper.py | 13 +++++++++--
|
||||
3 files changed, 50 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index 020b7006..39e67c4f 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -332,6 +332,7 @@ class DataSourceAzure(sources.DataSource):
|
||||
dsname = 'Azure'
|
||||
_negotiated = False
|
||||
_metadata_imds = sources.UNSET
|
||||
+ _ci_pkl_version = 1
|
||||
|
||||
def __init__(self, sys_cfg, distro, paths):
|
||||
sources.DataSource.__init__(self, sys_cfg, distro, paths)
|
||||
@@ -346,8 +347,13 @@ class DataSourceAzure(sources.DataSource):
|
||||
# Regenerate network config new_instance boot and every boot
|
||||
self.update_events['network'].add(EventType.BOOT)
|
||||
self._ephemeral_dhcp_ctx = None
|
||||
-
|
||||
self.failed_desired_api_version = False
|
||||
+ self.iso_dev = None
|
||||
+
|
||||
+ def _unpickle(self, ci_pkl_version: int) -> None:
|
||||
+ super()._unpickle(ci_pkl_version)
|
||||
+ if "iso_dev" not in self.__dict__:
|
||||
+ self.iso_dev = None
|
||||
|
||||
def __str__(self):
|
||||
root = sources.DataSource.__str__(self)
|
||||
@@ -459,6 +465,13 @@ class DataSourceAzure(sources.DataSource):
|
||||
'%s was not mountable' % cdev, logger_func=LOG.warning)
|
||||
continue
|
||||
|
||||
+ report_diagnostic_event("Found provisioning metadata in %s" % cdev,
|
||||
+ logger_func=LOG.debug)
|
||||
+
|
||||
+ # save the iso device for ejection before reporting ready
|
||||
+ if cdev.startswith("/dev"):
|
||||
+ self.iso_dev = cdev
|
||||
+
|
||||
perform_reprovision = reprovision or self._should_reprovision(ret)
|
||||
perform_reprovision_after_nic_attach = (
|
||||
reprovision_after_nic_attach or
|
||||
@@ -1226,7 +1239,9 @@ class DataSourceAzure(sources.DataSource):
|
||||
@return: The success status of sending the ready signal.
|
||||
"""
|
||||
try:
|
||||
- get_metadata_from_fabric(None, lease['unknown-245'])
|
||||
+ get_metadata_from_fabric(fallback_lease_file=None,
|
||||
+ dhcp_opts=lease['unknown-245'],
|
||||
+ iso_dev=self.iso_dev)
|
||||
return True
|
||||
except Exception as e:
|
||||
report_diagnostic_event(
|
||||
@@ -1332,7 +1347,8 @@ class DataSourceAzure(sources.DataSource):
|
||||
metadata_func = partial(get_metadata_from_fabric,
|
||||
fallback_lease_file=self.
|
||||
dhclient_lease_file,
|
||||
- pubkey_info=pubkey_info)
|
||||
+ pubkey_info=pubkey_info,
|
||||
+ iso_dev=self.iso_dev)
|
||||
|
||||
LOG.debug("negotiating with fabric via agent command %s",
|
||||
self.ds_cfg['agent_command'])
|
||||
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
|
||||
index 03e7156b..ad476076 100755
|
||||
--- a/cloudinit/sources/helpers/azure.py
|
||||
+++ b/cloudinit/sources/helpers/azure.py
|
||||
@@ -865,7 +865,19 @@ class WALinuxAgentShim:
|
||||
return endpoint_ip_address
|
||||
|
||||
@azure_ds_telemetry_reporter
|
||||
- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict:
|
||||
+ def eject_iso(self, iso_dev) -> None:
|
||||
+ try:
|
||||
+ LOG.debug("Ejecting the provisioning iso")
|
||||
+ subp.subp(['eject', iso_dev])
|
||||
+ except Exception as e:
|
||||
+ report_diagnostic_event(
|
||||
+ "Failed ejecting the provisioning iso: %s" % e,
|
||||
+ logger_func=LOG.debug)
|
||||
+
|
||||
+ @azure_ds_telemetry_reporter
|
||||
+ def register_with_azure_and_fetch_data(self,
|
||||
+ pubkey_info=None,
|
||||
+ iso_dev=None) -> dict:
|
||||
"""Gets the VM's GoalState from Azure, uses the GoalState information
|
||||
to report ready/send the ready signal/provisioning complete signal to
|
||||
Azure, and then uses pubkey_info to filter and obtain the user's
|
||||
@@ -891,6 +903,10 @@ class WALinuxAgentShim:
|
||||
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
|
||||
health_reporter = GoalStateHealthReporter(
|
||||
goal_state, self.azure_endpoint_client, self.endpoint)
|
||||
+
|
||||
+ if iso_dev is not None:
|
||||
+ self.eject_iso(iso_dev)
|
||||
+
|
||||
health_reporter.send_ready_signal()
|
||||
return {'public-keys': ssh_keys}
|
||||
|
||||
@@ -1046,11 +1062,12 @@ class WALinuxAgentShim:
|
||||
|
||||
@azure_ds_telemetry_reporter
|
||||
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
|
||||
- pubkey_info=None):
|
||||
+ pubkey_info=None, iso_dev=None):
|
||||
shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
|
||||
dhcp_options=dhcp_opts)
|
||||
try:
|
||||
- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
|
||||
+ return shim.register_with_azure_and_fetch_data(
|
||||
+ pubkey_info=pubkey_info, iso_dev=iso_dev)
|
||||
finally:
|
||||
shim.clean_up()
|
||||
|
||||
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
|
||||
index 63482c6c..552c7905 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure_helper.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure_helper.py
|
||||
@@ -1009,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase):
|
||||
self.GoalState.return_value.container_id = self.test_container_id
|
||||
self.GoalState.return_value.instance_id = self.test_instance_id
|
||||
|
||||
+ def test_eject_iso_is_called(self):
|
||||
+ shim = wa_shim()
|
||||
+ with mock.patch.object(
|
||||
+ shim, 'eject_iso', autospec=True
|
||||
+ ) as m_eject_iso:
|
||||
+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
|
||||
+ m_eject_iso.assert_called_once_with("/dev/sr0")
|
||||
+
|
||||
def test_http_client_does_not_use_certificate_for_report_ready(self):
|
||||
shim = wa_shim()
|
||||
shim.register_with_azure_and_fetch_data()
|
||||
@@ -1283,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
|
||||
|
||||
def test_calls_shim_register_with_azure_and_fetch_data(self):
|
||||
m_pubkey_info = mock.MagicMock()
|
||||
- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
|
||||
+ azure_helper.get_metadata_from_fabric(
|
||||
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0")
|
||||
self.assertEqual(
|
||||
1,
|
||||
self.m_shim.return_value
|
||||
.register_with_azure_and_fetch_data.call_count)
|
||||
self.assertEqual(
|
||||
- mock.call(pubkey_info=m_pubkey_info),
|
||||
+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
|
||||
self.m_shim.return_value
|
||||
.register_with_azure_and_fetch_data.call_args)
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,90 @@
|
||||
From 3ee42e6e6ca51b3fd0b6461f707d62c89d54e227 Mon Sep 17 00:00:00 2001
|
||||
From: Johnson Shi <Johnson.Shi@microsoft.com>
|
||||
Date: Thu, 25 Mar 2021 07:20:10 -0700
|
||||
Subject: [PATCH 2/7] Azure helper: Ensure Azure http handler sleeps between
|
||||
retries (#842)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [2/7] 65672cdfe2265f32e6d3c440ba5a8accafdb6ca6 (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Ensure that the Azure helper's http handler sleeps a fixed duration
|
||||
between retry failure attempts. The http handler will sleep a fixed
|
||||
duration between failed attempts regardless of whether the attempt
|
||||
failed due to (1) request timing out or (2) instant failure (no
|
||||
timeout).
|
||||
|
||||
Due to certain platform issues, the http request to the Azure endpoint
|
||||
may instantly fail without reaching the http timeout duration. Without
|
||||
sleeping a fixed duration in between retry attempts, the http handler
|
||||
will loop through the max retry attempts quickly. This causes the
|
||||
communication between cloud-init and the Azure platform to be less
|
||||
resilient due to the short total duration if there is no sleep in
|
||||
between retries.
|
||||
---
|
||||
cloudinit/sources/helpers/azure.py | 2 ++
|
||||
tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++--
|
||||
2 files changed, 11 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
|
||||
index d3055d08..03e7156b 100755
|
||||
--- a/cloudinit/sources/helpers/azure.py
|
||||
+++ b/cloudinit/sources/helpers/azure.py
|
||||
@@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str:
|
||||
|
||||
max_readurl_attempts = 240
|
||||
default_readurl_timeout = 5
|
||||
+ sleep_duration_between_retries = 5
|
||||
periodic_logging_attempts = 12
|
||||
|
||||
if 'timeout' not in kwargs:
|
||||
@@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str:
|
||||
'attempt %d with exception: %s' %
|
||||
(url, attempt, e),
|
||||
logger_func=LOG.debug)
|
||||
+ time.sleep(sleep_duration_between_retries)
|
||||
|
||||
raise exc
|
||||
|
||||
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
|
||||
index b8899807..63482c6c 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure_helper.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure_helper.py
|
||||
@@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
|
||||
|
||||
max_readurl_attempts = 240
|
||||
default_readurl_timeout = 5
|
||||
+ sleep_duration_between_retries = 5
|
||||
periodic_logging_attempts = 12
|
||||
|
||||
def setUp(self):
|
||||
@@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
|
||||
self.m_readurl = patches.enter_context(
|
||||
mock.patch.object(
|
||||
azure_helper.url_helper, 'readurl', mock.MagicMock()))
|
||||
- patches.enter_context(
|
||||
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
|
||||
+ self.m_sleep = patches.enter_context(
|
||||
+ mock.patch.object(azure_helper.time, 'sleep', autospec=True))
|
||||
|
||||
def test_http_with_retries(self):
|
||||
self.m_readurl.return_value = 'TestResp'
|
||||
@@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
|
||||
self.m_readurl.call_count,
|
||||
self.periodic_logging_attempts + 1)
|
||||
|
||||
+ # Ensure that cloud-init did sleep between each failed request
|
||||
+ self.assertEqual(
|
||||
+ self.m_sleep.call_count,
|
||||
+ self.periodic_logging_attempts)
|
||||
+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
|
||||
+
|
||||
def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
|
||||
self.m_readurl.side_effect = \
|
||||
[SentinelException] * self.periodic_logging_attempts + \
|
||||
--
|
||||
2.27.0
|
||||
|
47
SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch
Normal file
47
SOURCES/ci-Change-netifaces-dependency-to-0.10.4-965.patch
Normal file
@ -0,0 +1,47 @@
|
||||
From 18138313e009a08592fe79c5e66d6eba8f027f19 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Fri, 14 Jan 2022 16:49:57 +0100
|
||||
Subject: [PATCH 2/5] Change netifaces dependency to 0.10.4 (#965)
|
||||
|
||||
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
RH-MergeRequest: 17: Datasource for VMware
|
||||
RH-Commit: [2/5] 8688e8b955a7ee15cf66de0b2a242c7c418b7630 (eesposit/cloud-init-centos-)
|
||||
RH-Bugzilla: 2040090
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
|
||||
|
||||
commit b9d308b4d61d22bacc05bcae59819755975631f8
|
||||
Author: Andrew Kutz <101085+akutz@users.noreply.github.com>
|
||||
Date: Tue Aug 10 15:10:44 2021 -0500
|
||||
|
||||
Change netifaces dependency to 0.10.4 (#965)
|
||||
|
||||
Change netifaces dependency to 0.10.4
|
||||
|
||||
Currently versions Ubuntu <=20.10 use netifaces 0.10.4 By requiring
|
||||
netifaces 0.10.9, the VMware datasource omitted itself from cloud-init
|
||||
on Ubuntu <=20.10.
|
||||
|
||||
This patch changes the netifaces dependency to 0.10.4. While it is true
|
||||
there are patches to netifaces post 0.10.4 that are desirable, testing
|
||||
against the most common network configuration was performed to verify
|
||||
the VMware datasource will still function with netifaces 0.10.4.
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
requirements.txt | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/requirements.txt b/requirements.txt
|
||||
index 41d01d62..c4adc455 100644
|
||||
--- a/requirements.txt
|
||||
+++ b/requirements.txt
|
||||
@@ -40,4 +40,4 @@ jsonschema
|
||||
# and still participate in instance-data by gathering the network in detail at
|
||||
# runtime and merge that information into the metadata and repersist that to
|
||||
# disk.
|
||||
-netifaces>=0.10.9
|
||||
+netifaces>=0.10.4
|
||||
--
|
||||
2.27.0
|
||||
|
2201
SOURCES/ci-Datasource-for-VMware-953.patch
Normal file
2201
SOURCES/ci-Datasource-for-VMware-953.patch
Normal file
File diff suppressed because it is too large
Load Diff
474
SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
Normal file
474
SOURCES/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
Normal file
@ -0,0 +1,474 @@
|
||||
From 290353d6df0b3bbbbcfa4f949f943388939ebc12 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Fri, 11 Feb 2022 14:57:40 +0100
|
||||
Subject: [PATCH 1/3] Fix IPv6 netmask format for sysconfig (#1215)
|
||||
|
||||
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
RH-MergeRequest: 20: Fix IPv6 netmask format for sysconfig (#1215)
|
||||
RH-Commit: [1/1] 2eb7ac7c85e82c14f9a95b9baf1482ac987b1084 (eesposit/cloud-init-centos-)
|
||||
RH-Bugzilla: 2053546
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
|
||||
commit b97a30f0a05c1dea918c46ca9c05c869d15fe2d5
|
||||
Author: Harald <hjensas@redhat.com>
|
||||
Date: Tue Feb 8 15:49:00 2022 +0100
|
||||
|
||||
Fix IPv6 netmask format for sysconfig (#1215)
|
||||
|
||||
This change converts the IPv6 netmask from the network_data.json[1]
|
||||
format to the CIDR style, <IPv6_addr>/<prefix>.
|
||||
|
||||
Using an IPv6 address like ffff:ffff:ffff:ffff:: does not work with
|
||||
NetworkManager, nor networkscripts.
|
||||
|
||||
NetworkManager will ignore the route, logging:
|
||||
ifcfg-rh: ignoring invalid route at \
|
||||
"::/:: via fd00:fd00:fd00:2::fffe dev $DEV" \
|
||||
(/etc/sysconfig/network-scripts/route6-$DEV:3): \
|
||||
Argument for "::/::" is not ADDR/PREFIX format
|
||||
|
||||
Similarly if using networkscripts, ip route fail with error:
|
||||
Error: inet6 prefix is expected rather than \
|
||||
"fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".
|
||||
|
||||
Also a bit of refactoring ...
|
||||
|
||||
cloudinit.net.sysconfig.Route.to_string:
|
||||
* Move a couple of lines around to reduce repeated code.
|
||||
* if "ADDRESS" not in key -> continute, so that the
|
||||
code block following it can be de-indented.
|
||||
cloudinit.net.network_state:
|
||||
* Refactors the ipv4_mask_to_net_prefix, ipv6_mask_to_net_prefix
|
||||
removes mask_to_net_prefix methods. Utilize ipaddress library to
|
||||
do some of the heavy lifting.
|
||||
|
||||
LP: #1959148
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
cloudinit/net/__init__.py | 7 +-
|
||||
cloudinit/net/network_state.py | 103 +++++++-----------
|
||||
cloudinit/net/sysconfig.py | 91 ++++++++++------
|
||||
cloudinit/sources/DataSourceOpenNebula.py | 2 +-
|
||||
.../sources/helpers/vmware/imc/config_nic.py | 4 +-
|
||||
tests/unittests/test_net.py | 78 ++++++++++++-
|
||||
6 files changed, 176 insertions(+), 109 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
|
||||
index 4bdc1bda..91cb0627 100644
|
||||
--- a/cloudinit/net/__init__.py
|
||||
+++ b/cloudinit/net/__init__.py
|
||||
@@ -13,7 +13,7 @@ import re
|
||||
|
||||
from cloudinit import subp
|
||||
from cloudinit import util
|
||||
-from cloudinit.net.network_state import mask_to_net_prefix
|
||||
+from cloudinit.net.network_state import ipv4_mask_to_net_prefix
|
||||
from cloudinit.url_helper import UrlError, readurl
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@@ -986,10 +986,11 @@ class EphemeralIPv4Network(object):
|
||||
'Cannot init network on {0} with {1}/{2} and bcast {3}'.format(
|
||||
interface, ip, prefix_or_mask, broadcast))
|
||||
try:
|
||||
- self.prefix = mask_to_net_prefix(prefix_or_mask)
|
||||
+ self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask)
|
||||
except ValueError as e:
|
||||
raise ValueError(
|
||||
- 'Cannot setup network: {0}'.format(e)
|
||||
+ "Cannot setup network, invalid prefix or "
|
||||
+ "netmask: {0}".format(e)
|
||||
) from e
|
||||
|
||||
self.connectivity_url = connectivity_url
|
||||
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
|
||||
index e8bf9e39..2768ef94 100644
|
||||
--- a/cloudinit/net/network_state.py
|
||||
+++ b/cloudinit/net/network_state.py
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
import copy
|
||||
import functools
|
||||
+import ipaddress
|
||||
import logging
|
||||
import socket
|
||||
import struct
|
||||
@@ -872,12 +873,18 @@ def _normalize_net_keys(network, address_keys=()):
|
||||
try:
|
||||
prefix = int(maybe_prefix)
|
||||
except ValueError:
|
||||
- # this supports input of <address>/255.255.255.0
|
||||
- prefix = mask_to_net_prefix(maybe_prefix)
|
||||
- elif netmask:
|
||||
- prefix = mask_to_net_prefix(netmask)
|
||||
- elif 'prefix' in net:
|
||||
- prefix = int(net['prefix'])
|
||||
+ if ipv6:
|
||||
+ # this supports input of ffff:ffff:ffff::
|
||||
+ prefix = ipv6_mask_to_net_prefix(maybe_prefix)
|
||||
+ else:
|
||||
+ # this supports input of 255.255.255.0
|
||||
+ prefix = ipv4_mask_to_net_prefix(maybe_prefix)
|
||||
+ elif netmask and not ipv6:
|
||||
+ prefix = ipv4_mask_to_net_prefix(netmask)
|
||||
+ elif netmask and ipv6:
|
||||
+ prefix = ipv6_mask_to_net_prefix(netmask)
|
||||
+ elif "prefix" in net:
|
||||
+ prefix = int(net["prefix"])
|
||||
else:
|
||||
prefix = 64 if ipv6 else 24
|
||||
|
||||
@@ -972,72 +979,42 @@ def ipv4_mask_to_net_prefix(mask):
|
||||
str(24) => 24
|
||||
"24" => 24
|
||||
"""
|
||||
- if isinstance(mask, int):
|
||||
- return mask
|
||||
- if isinstance(mask, str):
|
||||
- try:
|
||||
- return int(mask)
|
||||
- except ValueError:
|
||||
- pass
|
||||
- else:
|
||||
- raise TypeError("mask '%s' is not a string or int")
|
||||
-
|
||||
- if '.' not in mask:
|
||||
- raise ValueError("netmask '%s' does not contain a '.'" % mask)
|
||||
-
|
||||
- toks = mask.split(".")
|
||||
- if len(toks) != 4:
|
||||
- raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks)))
|
||||
-
|
||||
- return sum([bin(int(x)).count('1') for x in toks])
|
||||
+ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen
|
||||
|
||||
|
||||
def ipv6_mask_to_net_prefix(mask):
|
||||
"""Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
|
||||
|
||||
- If 'mask' is an integer or string representation of one then
|
||||
- int(mask) will be returned.
|
||||
+ If the input is already an integer or a string representation of
|
||||
+ an integer, then int(mask) will be returned.
|
||||
+ "ffff:ffff:ffff::" => 48
|
||||
+ "48" => 48
|
||||
"""
|
||||
-
|
||||
- if isinstance(mask, int):
|
||||
- return mask
|
||||
- if isinstance(mask, str):
|
||||
- try:
|
||||
- return int(mask)
|
||||
- except ValueError:
|
||||
- pass
|
||||
- else:
|
||||
- raise TypeError("mask '%s' is not a string or int")
|
||||
-
|
||||
- if ':' not in mask:
|
||||
- raise ValueError("mask '%s' does not have a ':'")
|
||||
-
|
||||
- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
|
||||
- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
|
||||
- 0xfffe, 0xffff]
|
||||
- prefix = 0
|
||||
- for word in mask.split(':'):
|
||||
- if not word or int(word, 16) == 0:
|
||||
- break
|
||||
- prefix += bitCount.index(int(word, 16))
|
||||
-
|
||||
- return prefix
|
||||
-
|
||||
-
|
||||
-def mask_to_net_prefix(mask):
|
||||
- """Return the network prefix for the netmask provided.
|
||||
-
|
||||
- Supports ipv4 or ipv6 netmasks."""
|
||||
try:
|
||||
- # if 'mask' is a prefix that is an integer.
|
||||
- # then just return it.
|
||||
- return int(mask)
|
||||
+ # In the case the mask is already a prefix
|
||||
+ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen
|
||||
+ return prefixlen
|
||||
except ValueError:
|
||||
+ # ValueError means mask is an IPv6 address representation and need
|
||||
+ # conversion.
|
||||
pass
|
||||
- if is_ipv6_addr(mask):
|
||||
- return ipv6_mask_to_net_prefix(mask)
|
||||
- else:
|
||||
- return ipv4_mask_to_net_prefix(mask)
|
||||
+
|
||||
+ netmask = ipaddress.ip_address(mask)
|
||||
+ mask_int = int(netmask)
|
||||
+ # If the mask is all zeroes, just return it
|
||||
+ if mask_int == 0:
|
||||
+ return mask_int
|
||||
+
|
||||
+ trailing_zeroes = min(
|
||||
+ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length()
|
||||
+ )
|
||||
+ leading_ones = mask_int >> trailing_zeroes
|
||||
+ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes
|
||||
+ all_ones = (1 << prefixlen) - 1
|
||||
+ if leading_ones != all_ones:
|
||||
+ raise ValueError("Invalid network mask '%s'" % mask)
|
||||
+
|
||||
+ return prefixlen
|
||||
|
||||
|
||||
def mask_and_ipv4_to_bcast_addr(mask, ip):
|
||||
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
|
||||
index d5440998..7ecbe1c3 100644
|
||||
--- a/cloudinit/net/sysconfig.py
|
||||
+++ b/cloudinit/net/sysconfig.py
|
||||
@@ -12,6 +12,7 @@ from cloudinit import util
|
||||
from cloudinit import subp
|
||||
from cloudinit.distros.parsers import networkmanager_conf
|
||||
from cloudinit.distros.parsers import resolv_conf
|
||||
+from cloudinit.net import network_state
|
||||
|
||||
from . import renderer
|
||||
from .network_state import (
|
||||
@@ -171,43 +172,61 @@ class Route(ConfigMap):
|
||||
# (because Route can contain a mix of IPv4 and IPv6)
|
||||
reindex = -1
|
||||
for key in sorted(self._conf.keys()):
|
||||
- if 'ADDRESS' in key:
|
||||
- index = key.replace('ADDRESS', '')
|
||||
- address_value = str(self._conf[key])
|
||||
- # only accept combinations:
|
||||
- # if proto ipv6 only display ipv6 routes
|
||||
- # if proto ipv4 only display ipv4 routes
|
||||
- # do not add ipv6 routes if proto is ipv4
|
||||
- # do not add ipv4 routes if proto is ipv6
|
||||
- # (this array will contain a mix of ipv4 and ipv6)
|
||||
- if proto == "ipv4" and not self.is_ipv6_route(address_value):
|
||||
- netmask_value = str(self._conf['NETMASK' + index])
|
||||
- gateway_value = str(self._conf['GATEWAY' + index])
|
||||
- # increase IPv4 index
|
||||
- reindex = reindex + 1
|
||||
- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex),
|
||||
- _quote_value(address_value)))
|
||||
- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex),
|
||||
- _quote_value(gateway_value)))
|
||||
- buf.write("%s=%s\n" % ('NETMASK' + str(reindex),
|
||||
- _quote_value(netmask_value)))
|
||||
- metric_key = 'METRIC' + index
|
||||
- if metric_key in self._conf:
|
||||
- metric_value = str(self._conf['METRIC' + index])
|
||||
- buf.write("%s=%s\n" % ('METRIC' + str(reindex),
|
||||
- _quote_value(metric_value)))
|
||||
- elif proto == "ipv6" and self.is_ipv6_route(address_value):
|
||||
- netmask_value = str(self._conf['NETMASK' + index])
|
||||
- gateway_value = str(self._conf['GATEWAY' + index])
|
||||
- metric_value = (
|
||||
- 'metric ' + str(self._conf['METRIC' + index])
|
||||
- if 'METRIC' + index in self._conf else '')
|
||||
+ if "ADDRESS" not in key:
|
||||
+ continue
|
||||
+
|
||||
+ index = key.replace("ADDRESS", "")
|
||||
+ address_value = str(self._conf[key])
|
||||
+ netmask_value = str(self._conf["NETMASK" + index])
|
||||
+ gateway_value = str(self._conf["GATEWAY" + index])
|
||||
+
|
||||
+ # only accept combinations:
|
||||
+ # if proto ipv6 only display ipv6 routes
|
||||
+ # if proto ipv4 only display ipv4 routes
|
||||
+ # do not add ipv6 routes if proto is ipv4
|
||||
+ # do not add ipv4 routes if proto is ipv6
|
||||
+ # (this array will contain a mix of ipv4 and ipv6)
|
||||
+ if proto == "ipv4" and not self.is_ipv6_route(address_value):
|
||||
+ # increase IPv4 index
|
||||
+ reindex = reindex + 1
|
||||
+ buf.write(
|
||||
+ "%s=%s\n"
|
||||
+ % ("ADDRESS" + str(reindex), _quote_value(address_value))
|
||||
+ )
|
||||
+ buf.write(
|
||||
+ "%s=%s\n"
|
||||
+ % ("GATEWAY" + str(reindex), _quote_value(gateway_value))
|
||||
+ )
|
||||
+ buf.write(
|
||||
+ "%s=%s\n"
|
||||
+ % ("NETMASK" + str(reindex), _quote_value(netmask_value))
|
||||
+ )
|
||||
+ metric_key = "METRIC" + index
|
||||
+ if metric_key in self._conf:
|
||||
+ metric_value = str(self._conf["METRIC" + index])
|
||||
buf.write(
|
||||
- "%s/%s via %s %s dev %s\n" % (address_value,
|
||||
- netmask_value,
|
||||
- gateway_value,
|
||||
- metric_value,
|
||||
- self._route_name))
|
||||
+ "%s=%s\n"
|
||||
+ % ("METRIC" + str(reindex), _quote_value(metric_value))
|
||||
+ )
|
||||
+ elif proto == "ipv6" and self.is_ipv6_route(address_value):
|
||||
+ prefix_value = network_state.ipv6_mask_to_net_prefix(
|
||||
+ netmask_value
|
||||
+ )
|
||||
+ metric_value = (
|
||||
+ "metric " + str(self._conf["METRIC" + index])
|
||||
+ if "METRIC" + index in self._conf
|
||||
+ else ""
|
||||
+ )
|
||||
+ buf.write(
|
||||
+ "%s/%s via %s %s dev %s\n"
|
||||
+ % (
|
||||
+ address_value,
|
||||
+ prefix_value,
|
||||
+ gateway_value,
|
||||
+ metric_value,
|
||||
+ self._route_name,
|
||||
+ )
|
||||
+ )
|
||||
|
||||
return buf.getvalue()
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
|
||||
index 730ec586..e7980ab1 100644
|
||||
--- a/cloudinit/sources/DataSourceOpenNebula.py
|
||||
+++ b/cloudinit/sources/DataSourceOpenNebula.py
|
||||
@@ -233,7 +233,7 @@ class OpenNebulaNetwork(object):
|
||||
# Set IPv4 address
|
||||
devconf['addresses'] = []
|
||||
mask = self.get_mask(c_dev)
|
||||
- prefix = str(net.mask_to_net_prefix(mask))
|
||||
+ prefix = str(net.ipv4_mask_to_net_prefix(mask))
|
||||
devconf['addresses'].append(
|
||||
self.get_ip(c_dev, mac) + '/' + prefix)
|
||||
|
||||
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
|
||||
index 9cd2c0c0..3a45c67e 100644
|
||||
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
|
||||
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
|
||||
@@ -9,7 +9,7 @@ import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
-from cloudinit.net.network_state import mask_to_net_prefix
|
||||
+from cloudinit.net.network_state import ipv4_mask_to_net_prefix
|
||||
from cloudinit import subp
|
||||
from cloudinit import util
|
||||
|
||||
@@ -180,7 +180,7 @@ class NicConfigurator(object):
|
||||
"""
|
||||
route_list = []
|
||||
|
||||
- cidr = mask_to_net_prefix(netmask)
|
||||
+ cidr = ipv4_mask_to_net_prefix(netmask)
|
||||
|
||||
for gateway in gateways:
|
||||
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
|
||||
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
|
||||
index c67b5fcc..0bc547af 100644
|
||||
--- a/tests/unittests/test_net.py
|
||||
+++ b/tests/unittests/test_net.py
|
||||
@@ -2025,10 +2025,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
|
||||
routes:
|
||||
- gateway: 2001:67c:1562:1
|
||||
network: 2001:67c:1
|
||||
- netmask: ffff:ffff:0
|
||||
+ netmask: "ffff:ffff::"
|
||||
- gateway: 3001:67c:1562:1
|
||||
network: 3001:67c:1
|
||||
- netmask: ffff:ffff:0
|
||||
+ netmask: "ffff:ffff::"
|
||||
metric: 10000
|
||||
"""),
|
||||
'expected_netplan': textwrap.dedent("""
|
||||
@@ -2295,8 +2295,8 @@ iface bond0 inet6 static
|
||||
'route6-bond0': textwrap.dedent("""\
|
||||
# Created by cloud-init on instance boot automatically, do not edit.
|
||||
#
|
||||
- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0
|
||||
- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0
|
||||
+ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0
|
||||
+ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0
|
||||
"""),
|
||||
'route-bond0': textwrap.dedent("""\
|
||||
ADDRESS0=10.1.3.0
|
||||
@@ -3084,6 +3084,76 @@ USERCTL=no
|
||||
renderer.render_network_state(ns, target=render_dir)
|
||||
self.assertEqual([], os.listdir(render_dir))
|
||||
|
||||
+ def test_invalid_network_mask_ipv6(self):
|
||||
+ net_json = {
|
||||
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
|
||||
+ "networks": [
|
||||
+ {
|
||||
+ "network_id": "public-ipv6",
|
||||
+ "type": "ipv6",
|
||||
+ "netmask": "",
|
||||
+ "link": "tap1a81968a-79",
|
||||
+ "routes": [
|
||||
+ {
|
||||
+ "gateway": "2001:DB8::1",
|
||||
+ "netmask": "ff:ff:ff:ff::",
|
||||
+ "network": "2001:DB8:1::1",
|
||||
+ },
|
||||
+ ],
|
||||
+ "ip_address": "2001:DB8::10",
|
||||
+ "id": "network1",
|
||||
+ }
|
||||
+ ],
|
||||
+ "links": [
|
||||
+ {
|
||||
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
|
||||
+ "mtu": None,
|
||||
+ "type": "bridge",
|
||||
+ "id": "tap1a81968a-79",
|
||||
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
|
||||
+ },
|
||||
+ ],
|
||||
+ }
|
||||
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
|
||||
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
|
||||
+ with self.assertRaises(ValueError):
|
||||
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
|
||||
+
|
||||
+ def test_invalid_network_mask_ipv4(self):
|
||||
+ net_json = {
|
||||
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
|
||||
+ "networks": [
|
||||
+ {
|
||||
+ "network_id": "public-ipv4",
|
||||
+ "type": "ipv4",
|
||||
+ "netmask": "",
|
||||
+ "link": "tap1a81968a-79",
|
||||
+ "routes": [
|
||||
+ {
|
||||
+ "gateway": "172.20.0.1",
|
||||
+ "netmask": "255.234.255.0",
|
||||
+ "network": "172.19.0.0",
|
||||
+ },
|
||||
+ ],
|
||||
+ "ip_address": "172.20.0.10",
|
||||
+ "id": "network1",
|
||||
+ }
|
||||
+ ],
|
||||
+ "links": [
|
||||
+ {
|
||||
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
|
||||
+ "mtu": None,
|
||||
+ "type": "bridge",
|
||||
+ "id": "tap1a81968a-79",
|
||||
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
|
||||
+ },
|
||||
+ ],
|
||||
+ }
|
||||
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
|
||||
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
|
||||
+ with self.assertRaises(ValueError):
|
||||
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
|
||||
+
|
||||
def test_openstack_rendering_samples(self):
|
||||
for os_sample in OS_SAMPLES:
|
||||
render_dir = self.tmp_dir()
|
||||
--
|
||||
2.27.0
|
||||
|
47
SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
Normal file
47
SOURCES/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
Normal file
@ -0,0 +1,47 @@
|
||||
From 0aba80bf749458960945acf106833b098c3c5c97 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Fri, 14 Jan 2022 16:50:44 +0100
|
||||
Subject: [PATCH 4/5] Revert unnecesary lcase in ds-identify (#978)
|
||||
|
||||
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
RH-MergeRequest: 17: Datasource for VMware
|
||||
RH-Commit: [4/5] 334aae223b966173238a905150cf7bc07829c255 (eesposit/cloud-init-centos-)
|
||||
RH-Bugzilla: 2040090
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
|
||||
|
||||
commit f516a7d37c1654addc02485e681b4358d7e7c0db
|
||||
Author: Andrew Kutz <101085+akutz@users.noreply.github.com>
|
||||
Date: Fri Aug 13 14:30:55 2021 -0500
|
||||
|
||||
Revert unnecesary lcase in ds-identify (#978)
|
||||
|
||||
This patch reverts an unnecessary lcase optimization in the
|
||||
ds-identify script. SystemD documents the values produced by
|
||||
the systemd-detect-virt command are lower case, and the mapping
|
||||
table used by the FreeBSD check is also lower-case.
|
||||
|
||||
The optimization added two new forked processes, needlessly
|
||||
causing overhead.
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
tools/ds-identify | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tools/ds-identify b/tools/ds-identify
|
||||
index 0e12298f..7b782462 100755
|
||||
--- a/tools/ds-identify
|
||||
+++ b/tools/ds-identify
|
||||
@@ -449,7 +449,7 @@ detect_virt() {
|
||||
read_virt() {
|
||||
cached "$DI_VIRT" && return 0
|
||||
detect_virt
|
||||
- DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')"
|
||||
+ DI_VIRT="${_RET}"
|
||||
}
|
||||
|
||||
is_container() {
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,46 @@
|
||||
From cf7b45eaa070061615ad26f6754f7d2b39e7de76 Mon Sep 17 00:00:00 2001
|
||||
From: Eduardo Otubo <otubo@redhat.com>
|
||||
Date: Thu, 17 Feb 2022 15:32:35 +0100
|
||||
Subject: [PATCH 3/3] Setting highest autoconnect priority for network-scripts
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 22: Setting highest autoconnect priority for network-scripts
|
||||
RH-Commit: [1/1] 34f1d62f8934a983a124df95b861a1e448681d3b (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2036060
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Set the highest autoconnect priority for network-scripts which is
|
||||
loaded by NetworkManager ifcfg-rh plugin. Note that keyfile is the only
|
||||
and default existing plugin on RHEL9, by setting the highest autoconnect
|
||||
priority for network-scripts, NetworkManager will activate
|
||||
network-scripts but keyfile. Network-scripts path:
|
||||
|
||||
Since this is a blocking issue, we decided to have this one-liner
|
||||
downstream-only patch so we can move forward and have a better
|
||||
NetworkManager support later on the release.
|
||||
|
||||
rhbz: 2036060
|
||||
x-downstream-only: yes
|
||||
|
||||
Signed-off-by: Eduardo Otubo <otubo@redhat.com>
|
||||
---
|
||||
cloudinit/net/sysconfig.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
|
||||
index 7ecbe1c3..c7ca7c56 100644
|
||||
--- a/cloudinit/net/sysconfig.py
|
||||
+++ b/cloudinit/net/sysconfig.py
|
||||
@@ -309,7 +309,7 @@ class Renderer(renderer.Renderer):
|
||||
|
||||
iface_defaults = {
|
||||
'rhel': {'ONBOOT': True, 'USERCTL': False,
|
||||
- 'BOOTPROTO': 'none'},
|
||||
+ 'BOOTPROTO': 'none', "AUTOCONNECT_PRIORITY": 999},
|
||||
'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'},
|
||||
}
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
97
SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch
Normal file
97
SOURCES/ci-Update-dscheck_VMware-s-rpctool-check-970.patch
Normal file
@ -0,0 +1,97 @@
|
||||
From f284c2925b7076b81afb9207161f01718ba70951 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Fri, 14 Jan 2022 16:50:18 +0100
|
||||
Subject: [PATCH 3/5] Update dscheck_VMware's rpctool check (#970)
|
||||
|
||||
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
RH-MergeRequest: 17: Datasource for VMware
|
||||
RH-Commit: [3/5] 0739bc18b46b8877fb3825d13f7cda57acda2dde (eesposit/cloud-init-centos-)
|
||||
RH-Bugzilla: 2040090
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
|
||||
|
||||
commit 7781dec3306e9467f216cfcb36b7e10a8b38547a
|
||||
Author: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com>
|
||||
Date: Fri Aug 13 00:40:39 2021 +0530
|
||||
|
||||
Update dscheck_VMware's rpctool check (#970)
|
||||
|
||||
This patch updates the dscheck_VMware function's use of "vmware-rpctool".
|
||||
|
||||
When checking to see if a "guestinfo" property is set.
|
||||
Because a successful exit code can occur even if there is an empty
|
||||
string returned, it is possible that the VMware datasource will be
|
||||
loaded as a false-positive. This patch ensures that in addition to
|
||||
validating the exit code, the emitted output is also examined to ensure
|
||||
a non-empty value is returned by rpctool before returning "${DS_FOUND}"
|
||||
from "dscheck_VMware()".
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
tools/ds-identify | 15 +++++++++------
|
||||
1 file changed, 9 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/tools/ds-identify b/tools/ds-identify
|
||||
index c01eae3d..0e12298f 100755
|
||||
--- a/tools/ds-identify
|
||||
+++ b/tools/ds-identify
|
||||
@@ -141,6 +141,7 @@ error() {
|
||||
debug 0 "$@"
|
||||
stderr "$@"
|
||||
}
|
||||
+
|
||||
warn() {
|
||||
set -- "WARN:" "$@"
|
||||
debug 0 "$@"
|
||||
@@ -344,7 +345,6 @@ geom_label_status_as() {
|
||||
return $ret
|
||||
}
|
||||
|
||||
-
|
||||
read_fs_info_freebsd() {
|
||||
local oifs="$IFS" line="" delim=","
|
||||
local ret=0 labels="" dev="" label="" ftype="" isodevs=""
|
||||
@@ -404,7 +404,6 @@ cached() {
|
||||
[ -n "$1" ] && _RET="$1" && return || return 1
|
||||
}
|
||||
|
||||
-
|
||||
detect_virt() {
|
||||
local virt="${UNAVAILABLE}" r="" out=""
|
||||
if [ -d /run/systemd ]; then
|
||||
@@ -450,7 +449,7 @@ detect_virt() {
|
||||
read_virt() {
|
||||
cached "$DI_VIRT" && return 0
|
||||
detect_virt
|
||||
- DI_VIRT=${_RET}
|
||||
+ DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')"
|
||||
}
|
||||
|
||||
is_container() {
|
||||
@@ -1370,16 +1369,20 @@ vmware_has_rpctool() {
|
||||
command -v vmware-rpctool >/dev/null 2>&1
|
||||
}
|
||||
|
||||
+vmware_rpctool_guestinfo() {
|
||||
+ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]"
|
||||
+}
|
||||
+
|
||||
vmware_rpctool_guestinfo_metadata() {
|
||||
- vmware-rpctool "info-get guestinfo.metadata"
|
||||
+ vmware_rpctool_guestinfo "metadata"
|
||||
}
|
||||
|
||||
vmware_rpctool_guestinfo_userdata() {
|
||||
- vmware-rpctool "info-get guestinfo.userdata"
|
||||
+ vmware_rpctool_guestinfo "userdata"
|
||||
}
|
||||
|
||||
vmware_rpctool_guestinfo_vendordata() {
|
||||
- vmware-rpctool "info-get guestinfo.vendordata"
|
||||
+ vmware_rpctool_guestinfo "vendordata"
|
||||
}
|
||||
|
||||
dscheck_VMware() {
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,470 @@
|
||||
From 9ccb738cf078555b68122b1fc745a45fe952c439 Mon Sep 17 00:00:00 2001
|
||||
From: Anh Vo <anhvo@microsoft.com>
|
||||
Date: Tue, 13 Apr 2021 17:39:39 -0400
|
||||
Subject: [PATCH 3/7] azure: Removing ability to invoke walinuxagent (#799)
|
||||
|
||||
RH-Author: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-MergeRequest: 18: Add support for userdata on Azure from IMDS
|
||||
RH-Commit: [3/7] 7431b912e3df7ea384820f45e0230b47ab54643c (otubo/cloud-init-src)
|
||||
RH-Bugzilla: 2042351
|
||||
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
Invoking walinuxagent from within cloud-init is no longer
|
||||
supported/necessary
|
||||
---
|
||||
cloudinit/sources/DataSourceAzure.py | 137 ++++--------------
|
||||
doc/rtd/topics/datasources/azure.rst | 62 ++------
|
||||
tests/unittests/test_datasource/test_azure.py | 97 -------------
|
||||
3 files changed, 35 insertions(+), 261 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
|
||||
index de1452ce..020b7006 100755
|
||||
--- a/cloudinit/sources/DataSourceAzure.py
|
||||
+++ b/cloudinit/sources/DataSourceAzure.py
|
||||
@@ -381,53 +381,6 @@ class DataSourceAzure(sources.DataSource):
|
||||
util.logexc(LOG, "handling set_hostname failed")
|
||||
return False
|
||||
|
||||
- @azure_ds_telemetry_reporter
|
||||
- def get_metadata_from_agent(self):
|
||||
- temp_hostname = self.metadata.get('local-hostname')
|
||||
- agent_cmd = self.ds_cfg['agent_command']
|
||||
- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
|
||||
- temp_hostname, agent_cmd)
|
||||
-
|
||||
- self.bounce_network_with_azure_hostname()
|
||||
-
|
||||
- try:
|
||||
- invoke_agent(agent_cmd)
|
||||
- except subp.ProcessExecutionError:
|
||||
- # claim the datasource even if the command failed
|
||||
- util.logexc(LOG, "agent command '%s' failed.",
|
||||
- self.ds_cfg['agent_command'])
|
||||
-
|
||||
- ddir = self.ds_cfg['data_dir']
|
||||
-
|
||||
- fp_files = []
|
||||
- key_value = None
|
||||
- for pk in self.cfg.get('_pubkeys', []):
|
||||
- if pk.get('value', None):
|
||||
- key_value = pk['value']
|
||||
- LOG.debug("SSH authentication: using value from fabric")
|
||||
- else:
|
||||
- bname = str(pk['fingerprint'] + ".crt")
|
||||
- fp_files += [os.path.join(ddir, bname)]
|
||||
- LOG.debug("SSH authentication: "
|
||||
- "using fingerprint from fabric")
|
||||
-
|
||||
- with events.ReportEventStack(
|
||||
- name="waiting-for-ssh-public-key",
|
||||
- description="wait for agents to retrieve SSH keys",
|
||||
- parent=azure_ds_reporter):
|
||||
- # wait very long for public SSH keys to arrive
|
||||
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
|
||||
- missing = util.log_time(logfunc=LOG.debug,
|
||||
- msg="waiting for SSH public key files",
|
||||
- func=util.wait_for_files,
|
||||
- args=(fp_files, 900))
|
||||
- if len(missing):
|
||||
- LOG.warning("Did not find files, but going on: %s", missing)
|
||||
-
|
||||
- metadata = {}
|
||||
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
|
||||
- return metadata
|
||||
-
|
||||
def _get_subplatform(self):
|
||||
"""Return the subplatform metadata source details."""
|
||||
if self.seed.startswith('/dev'):
|
||||
@@ -1354,35 +1307,32 @@ class DataSourceAzure(sources.DataSource):
|
||||
On failure, returns False.
|
||||
"""
|
||||
|
||||
- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
|
||||
- self.bounce_network_with_azure_hostname()
|
||||
+ self.bounce_network_with_azure_hostname()
|
||||
|
||||
- pubkey_info = None
|
||||
- try:
|
||||
- raise KeyError(
|
||||
- "Not using public SSH keys from IMDS"
|
||||
- )
|
||||
- # pylint:disable=unreachable
|
||||
- public_keys = self.metadata['imds']['compute']['publicKeys']
|
||||
- LOG.debug(
|
||||
- 'Successfully retrieved %s key(s) from IMDS',
|
||||
- len(public_keys)
|
||||
- if public_keys is not None
|
||||
- else 0
|
||||
- )
|
||||
- except KeyError:
|
||||
- LOG.debug(
|
||||
- 'Unable to retrieve SSH keys from IMDS during '
|
||||
- 'negotiation, falling back to OVF'
|
||||
- )
|
||||
- pubkey_info = self.cfg.get('_pubkeys', None)
|
||||
-
|
||||
- metadata_func = partial(get_metadata_from_fabric,
|
||||
- fallback_lease_file=self.
|
||||
- dhclient_lease_file,
|
||||
- pubkey_info=pubkey_info)
|
||||
- else:
|
||||
- metadata_func = self.get_metadata_from_agent
|
||||
+ pubkey_info = None
|
||||
+ try:
|
||||
+ raise KeyError(
|
||||
+ "Not using public SSH keys from IMDS"
|
||||
+ )
|
||||
+ # pylint:disable=unreachable
|
||||
+ public_keys = self.metadata['imds']['compute']['publicKeys']
|
||||
+ LOG.debug(
|
||||
+ 'Successfully retrieved %s key(s) from IMDS',
|
||||
+ len(public_keys)
|
||||
+ if public_keys is not None
|
||||
+ else 0
|
||||
+ )
|
||||
+ except KeyError:
|
||||
+ LOG.debug(
|
||||
+ 'Unable to retrieve SSH keys from IMDS during '
|
||||
+ 'negotiation, falling back to OVF'
|
||||
+ )
|
||||
+ pubkey_info = self.cfg.get('_pubkeys', None)
|
||||
+
|
||||
+ metadata_func = partial(get_metadata_from_fabric,
|
||||
+ fallback_lease_file=self.
|
||||
+ dhclient_lease_file,
|
||||
+ pubkey_info=pubkey_info)
|
||||
|
||||
LOG.debug("negotiating with fabric via agent command %s",
|
||||
self.ds_cfg['agent_command'])
|
||||
@@ -1617,33 +1567,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
|
||||
return True
|
||||
|
||||
|
||||
-@azure_ds_telemetry_reporter
|
||||
-def crtfile_to_pubkey(fname, data=None):
|
||||
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
|
||||
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
|
||||
- (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
|
||||
- capture=True, data=data)
|
||||
- return out.rstrip()
|
||||
-
|
||||
-
|
||||
-@azure_ds_telemetry_reporter
|
||||
-def pubkeys_from_crt_files(flist):
|
||||
- pubkeys = []
|
||||
- errors = []
|
||||
- for fname in flist:
|
||||
- try:
|
||||
- pubkeys.append(crtfile_to_pubkey(fname))
|
||||
- except subp.ProcessExecutionError:
|
||||
- errors.append(fname)
|
||||
-
|
||||
- if errors:
|
||||
- report_diagnostic_event(
|
||||
- "failed to convert the crt files to pubkey: %s" % errors,
|
||||
- logger_func=LOG.warning)
|
||||
-
|
||||
- return pubkeys
|
||||
-
|
||||
-
|
||||
@azure_ds_telemetry_reporter
|
||||
def write_files(datadir, files, dirmode=None):
|
||||
|
||||
@@ -1672,16 +1595,6 @@ def write_files(datadir, files, dirmode=None):
|
||||
util.write_file(filename=fname, content=content, mode=0o600)
|
||||
|
||||
|
||||
-@azure_ds_telemetry_reporter
|
||||
-def invoke_agent(cmd):
|
||||
- # this is a function itself to simplify patching it for test
|
||||
- if cmd:
|
||||
- LOG.debug("invoking agent: %s", cmd)
|
||||
- subp.subp(cmd, shell=(not isinstance(cmd, list)))
|
||||
- else:
|
||||
- LOG.debug("not invoking agent")
|
||||
-
|
||||
-
|
||||
def find_child(node, filter_func):
|
||||
ret = []
|
||||
if not node.hasChildNodes():
|
||||
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
|
||||
index e04c3a33..ad9f2236 100644
|
||||
--- a/doc/rtd/topics/datasources/azure.rst
|
||||
+++ b/doc/rtd/topics/datasources/azure.rst
|
||||
@@ -5,28 +5,6 @@ Azure
|
||||
|
||||
This datasource finds metadata and user-data from the Azure cloud platform.
|
||||
|
||||
-walinuxagent
|
||||
-------------
|
||||
-walinuxagent has several functions within images. For cloud-init
|
||||
-specifically, the relevant functionality it performs is to register the
|
||||
-instance with the Azure cloud platform at boot so networking will be
|
||||
-permitted. For more information about the other functionality of
|
||||
-walinuxagent, see `Azure's documentation
|
||||
-<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
|
||||
-(Note, however, that only one of walinuxagent's provisioning and cloud-init
|
||||
-should be used to perform instance customisation.)
|
||||
-
|
||||
-If you are configuring walinuxagent yourself, you will want to ensure that you
|
||||
-have `Provisioning.UseCloudInit
|
||||
-<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
|
||||
-``y``.
|
||||
-
|
||||
-
|
||||
-Builtin Agent
|
||||
--------------
|
||||
-An alternative to using walinuxagent to register to the Azure cloud platform
|
||||
-is to use the ``__builtin__`` agent command. This section contains more
|
||||
-background on what that code path does, and how to enable it.
|
||||
|
||||
The Azure cloud platform provides initial data to an instance via an attached
|
||||
CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
|
||||
@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
|
||||
'dhclient_hook' of cloud-init itself. This sub-command will write the client
|
||||
information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
|
||||
|
||||
-In order for cloud-init to leverage this method to find the endpoint, the
|
||||
-cloud.cfg file must contain:
|
||||
-
|
||||
-.. sourcecode:: yaml
|
||||
-
|
||||
- datasource:
|
||||
- Azure:
|
||||
- set_hostname: False
|
||||
- agent_command: __builtin__
|
||||
-
|
||||
If those files are not available, the fallback is to check the leases file
|
||||
for the endpoint server (again option 245).
|
||||
|
||||
@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
|
||||
|
||||
The settings that may be configured are:
|
||||
|
||||
- * **agent_command**: Either __builtin__ (default) or a command to run to getcw
|
||||
- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
|
||||
- provided command to obtain metadata.
|
||||
* **apply_network_config**: Boolean set to True to use network configuration
|
||||
described by Azure's IMDS endpoint instead of fallback network config of
|
||||
dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
|
||||
@@ -121,7 +86,6 @@ An example configuration with the default values is provided below:
|
||||
|
||||
datasource:
|
||||
Azure:
|
||||
- agent_command: __builtin__
|
||||
apply_network_config: true
|
||||
data_dir: /var/lib/waagent
|
||||
dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
|
||||
@@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
|
||||
If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
|
||||
which will be selected.
|
||||
|
||||
-In the example below, user-data provided is 'this is my userdata', and the
|
||||
-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
|
||||
-That agent command will take affect as if it were specified in system config.
|
||||
+In the example below, user-data provided is 'this is my userdata'
|
||||
|
||||
Example:
|
||||
|
||||
@@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as
|
||||
Whatever value the instance provides in its dhcp request will resolve in the
|
||||
domain returned in the 'search' request.
|
||||
|
||||
-The interesting issue is that a generic image will already have a hostname
|
||||
-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
|
||||
-system, and the initial dhcp request on eth0 is not guaranteed to occur after
|
||||
-the datasource code has been run. So, on first boot, that initial value will
|
||||
-be sent in the dhcp request and *that* value will resolve.
|
||||
-
|
||||
-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
|
||||
-dhcp request must be made with the new value. Walinuxagent (in its current
|
||||
-version) handles this by polling the state of hostname and bouncing ('``ifdown
|
||||
-eth0; ifup eth0``' the network interface if it sees that a change has been
|
||||
-made.
|
||||
+A generic image will already have a hostname configured. The ubuntu
|
||||
+cloud images have 'ubuntu' as the hostname of the system, and the
|
||||
+initial dhcp request on eth0 is not guaranteed to occur after the
|
||||
+datasource code has been run. So, on first boot, that initial value
|
||||
+will be sent in the dhcp request and *that* value will resolve.
|
||||
|
||||
-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
|
||||
-method via '``hostname $HostName``', and then bouncing the interface. This
|
||||
+In order to make the ``HostName`` provided in the ovf-env.xml resolve,
|
||||
+a dhcp request must be made with the new value. cloud-init handles
|
||||
+this by setting the hostname in the DataSource's 'get_data' method via
|
||||
+'``hostname $HostName``', and then bouncing the interface. This
|
||||
behavior can be configured or disabled in the datasource config. See
|
||||
'Configuration' above.
|
||||
|
||||
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
|
||||
index dedebeb1..320fa857 100644
|
||||
--- a/tests/unittests/test_datasource/test_azure.py
|
||||
+++ b/tests/unittests/test_datasource/test_azure.py
|
||||
@@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0
|
||||
def dsdevs():
|
||||
return data.get('dsdevs', [])
|
||||
|
||||
- def _invoke_agent(cmd):
|
||||
- data['agent_invoked'] = cmd
|
||||
-
|
||||
def _wait_for_files(flist, _maxwait=None, _naplen=None):
|
||||
data['waited'] = flist
|
||||
return []
|
||||
|
||||
- def _pubkeys_from_crt_files(flist):
|
||||
- data['pubkey_files'] = flist
|
||||
- return ["pubkey_from: %s" % f for f in flist]
|
||||
-
|
||||
if data.get('ovfcontent') is not None:
|
||||
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
|
||||
{'ovf-env.xml': data['ovfcontent']})
|
||||
@@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0
|
||||
|
||||
self.apply_patches([
|
||||
(dsaz, 'list_possible_azure_ds_devs', dsdevs),
|
||||
- (dsaz, 'invoke_agent', _invoke_agent),
|
||||
- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
|
||||
(dsaz, 'perform_hostname_bounce', mock.MagicMock()),
|
||||
(dsaz, 'get_hostname', mock.MagicMock()),
|
||||
(dsaz, 'set_hostname', mock.MagicMock()),
|
||||
@@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0
|
||||
ret = dsrc.get_data()
|
||||
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
|
||||
self.assertFalse(ret)
|
||||
- self.assertNotIn('agent_invoked', data)
|
||||
# Assert that for non viable platforms,
|
||||
# there is no communication with the Azure datasource.
|
||||
self.assertEqual(
|
||||
@@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0
|
||||
ret = dsrc.get_data()
|
||||
self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
|
||||
self.assertFalse(ret)
|
||||
- self.assertNotIn('agent_invoked', data)
|
||||
self.assertEqual(
|
||||
1,
|
||||
m_report_failure.call_count)
|
||||
@@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0
|
||||
1,
|
||||
m_crawl_metadata.call_count)
|
||||
self.assertFalse(ret)
|
||||
- self.assertNotIn('agent_invoked', data)
|
||||
|
||||
def test_crawl_metadata_exception_should_report_failure_with_msg(self):
|
||||
data = {}
|
||||
@@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0
|
||||
self.assertTrue(os.path.isdir(self.waagent_d))
|
||||
self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
|
||||
|
||||
- def test_user_cfg_set_agent_command_plain(self):
|
||||
- # set dscfg in via plaintext
|
||||
- # we must have friendly-to-xml formatted plaintext in yaml_cfg
|
||||
- # not all plaintext is expected to work.
|
||||
- yaml_cfg = "{agent_command: my_command}\n"
|
||||
- cfg = yaml.safe_load(yaml_cfg)
|
||||
- odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
-
|
||||
- dsrc = self._get_ds(data)
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
|
||||
-
|
||||
@mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
|
||||
return_value=None)
|
||||
def test_network_config_set_from_imds(self, m_driver):
|
||||
@@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0
|
||||
dsrc.get_data()
|
||||
self.assertEqual('eastus2', dsrc.region)
|
||||
|
||||
- def test_user_cfg_set_agent_command(self):
|
||||
- # set dscfg in via base64 encoded yaml
|
||||
- cfg = {'agent_command': "my_command"}
|
||||
- odata = {'HostName': "myhost", 'UserName': "myuser",
|
||||
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
|
||||
- 'encoding': 'base64'}}
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
|
||||
-
|
||||
- dsrc = self._get_ds(data)
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
|
||||
-
|
||||
- def test_sys_cfg_set_agent_command(self):
|
||||
- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
|
||||
- 'sys_cfg': sys_cfg}
|
||||
-
|
||||
- dsrc = self._get_ds(data)
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
- self.assertEqual(data['agent_invoked'], '_COMMAND')
|
||||
-
|
||||
def test_sys_cfg_set_never_destroy_ntfs(self):
|
||||
sys_cfg = {'datasource': {'Azure': {
|
||||
'never_destroy_ntfs': 'user-supplied-value'}}}
|
||||
@@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0
|
||||
self.assertTrue(ret)
|
||||
self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
|
||||
|
||||
- def test_cfg_has_pubkeys_fingerprint(self):
|
||||
- odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
|
||||
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
|
||||
- pubkeys=pubkeys)}
|
||||
-
|
||||
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
- for mypk in mypklist:
|
||||
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
|
||||
- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
|
||||
-
|
||||
- def test_cfg_has_pubkeys_value(self):
|
||||
- # make sure that provided key is used over fingerprint
|
||||
- odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
|
||||
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
|
||||
- pubkeys=pubkeys)}
|
||||
-
|
||||
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
-
|
||||
- for mypk in mypklist:
|
||||
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
|
||||
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
|
||||
-
|
||||
- def test_cfg_has_no_fingerprint_has_value(self):
|
||||
- # test value is used when fingerprint not provided
|
||||
- odata = {'HostName': "myhost", 'UserName': "myuser"}
|
||||
- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
|
||||
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
|
||||
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
|
||||
- pubkeys=pubkeys)}
|
||||
-
|
||||
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
|
||||
- ret = self._get_and_setup(dsrc)
|
||||
- self.assertTrue(ret)
|
||||
-
|
||||
- for mypk in mypklist:
|
||||
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
|
||||
-
|
||||
def test_default_ephemeral_configs_ephemeral_exists(self):
|
||||
# make sure the ephemeral configs are correct if disk present
|
||||
odata = {}
|
||||
@@ -1919,8 +1824,6 @@ class TestAzureBounce(CiTestCase):
|
||||
with_logs = True
|
||||
|
||||
def mock_out_azure_moving_parts(self):
|
||||
- self.patches.enter_context(
|
||||
- mock.patch.object(dsaz, 'invoke_agent'))
|
||||
self.patches.enter_context(
|
||||
mock.patch.object(dsaz.util, 'wait_for_files'))
|
||||
self.patches.enter_context(
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,87 @@
|
||||
From e0eca40388080dabf6598c0d9653ea50ae10c984 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Tue, 7 Dec 2021 10:04:43 +0100
|
||||
Subject: [PATCH] cloudinit/net: handle two different routes for the same ip
|
||||
(#1124)
|
||||
|
||||
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
RH-MergeRequest: 15: cloudinit/net: handle two different routes for the same ip (#1124)
|
||||
RH-Commit: [1/1] b623a76ccd642e22e8d9c4aebc26f0b0cec8118b (eesposit/cloud-init-centos-)
|
||||
RH-Bugzilla: 2028031
|
||||
RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
|
||||
RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
|
||||
|
||||
commit 0e25076b34fa995161b83996e866c0974cee431f
|
||||
Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Mon Dec 6 18:34:26 2021 +0100
|
||||
|
||||
cloudinit/net: handle two different routes for the same ip (#1124)
|
||||
|
||||
If we set a dhcp server side like this:
|
||||
$ cat /var/tmp/cloud-init/cloud-init-dhcp-f0rie5tm/dhcp.leases
|
||||
lease {
|
||||
...
|
||||
option classless-static-routes 31.169.254.169.254 0.0.0.0,31.169.254.169.254
|
||||
10.112.143.127,22.10.112.140 0.0.0.0,0 10.112.140.1;
|
||||
...
|
||||
}
|
||||
cloud-init fails to configure the routes via 'ip route add' because to there are
|
||||
two different routes for 169.254.169.254:
|
||||
|
||||
$ ip -4 route add 192.168.1.1/32 via 0.0.0.0 dev eth0
|
||||
$ ip -4 route add 192.168.1.1/32 via 10.112.140.248 dev eth0
|
||||
|
||||
But NetworkManager can handle such scenario successfully as it uses "ip route append".
|
||||
So change cloud-init to also use "ip route append" to fix the issue:
|
||||
|
||||
$ ip -4 route append 192.168.1.1/32 via 0.0.0.0 dev eth0
|
||||
$ ip -4 route append 192.168.1.1/32 via 10.112.140.248 dev eth0
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
RHBZ: #2003231
|
||||
|
||||
Conflicts:
|
||||
cloudinit/net/tests/test_init.py: a mock call in
|
||||
test_ephemeral_ipv4_network_with_rfc3442_static_routes is not
|
||||
present downstream.
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
---
|
||||
cloudinit/net/__init__.py | 2 +-
|
||||
cloudinit/net/tests/test_init.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
|
||||
index de65e7af..4bdc1bda 100644
|
||||
--- a/cloudinit/net/__init__.py
|
||||
+++ b/cloudinit/net/__init__.py
|
||||
@@ -1076,7 +1076,7 @@ class EphemeralIPv4Network(object):
|
||||
if gateway != "0.0.0.0/0":
|
||||
via_arg = ['via', gateway]
|
||||
subp.subp(
|
||||
- ['ip', '-4', 'route', 'add', net_address] + via_arg +
|
||||
+ ['ip', '-4', 'route', 'append', net_address] + via_arg +
|
||||
['dev', self.interface], capture=True)
|
||||
self.cleanup_cmds.insert(
|
||||
0, ['ip', '-4', 'route', 'del', net_address] + via_arg +
|
||||
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
|
||||
index 0535387a..6754df8d 100644
|
||||
--- a/cloudinit/net/tests/test_init.py
|
||||
+++ b/cloudinit/net/tests/test_init.py
|
||||
@@ -715,10 +715,10 @@ class TestEphemeralIPV4Network(CiTestCase):
|
||||
['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
|
||||
capture=True),
|
||||
mock.call(
|
||||
- ['ip', '-4', 'route', 'add', '169.254.169.254/32',
|
||||
+ ['ip', '-4', 'route', 'append', '169.254.169.254/32',
|
||||
'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
|
||||
mock.call(
|
||||
- ['ip', '-4', 'route', 'add', '0.0.0.0/0',
|
||||
+ ['ip', '-4', 'route', 'append', '0.0.0.0/0',
|
||||
'via', '192.168.2.1', 'dev', 'eth0'], capture=True)]
|
||||
expected_teardown_calls = [
|
||||
mock.call(
|
||||
--
|
||||
2.27.0
|
||||
|
@ -0,0 +1,174 @@
|
||||
From 83f3d481c5f0d962bff5bacfd2c323529754869e Mon Sep 17 00:00:00 2001
|
||||
From: Amy Chen <xiachen@redhat.com>
|
||||
Date: Thu, 2 Dec 2021 18:11:08 +0800
|
||||
Subject: [PATCH] fix error on upgrade caused by new vendordata2 attributes
|
||||
|
||||
RH-Author: xiachen <None>
|
||||
RH-MergeRequest: 14: fix error on upgrade caused by new vendordata2 attributes
|
||||
RH-Commit: [1/1] ef14db399cd1fe6e4ba847d98acee15fef8021de (xiachen/cloud-init-centos)
|
||||
RH-Bugzilla: 2028381
|
||||
RH-Acked-by: Eduardo Otubo <otubo@redhat.com>
|
||||
RH-Acked-by: Mohamed Gamal Morsy <mmorsy@redhat.com>
|
||||
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
|
||||
commit d132356cc361abef2d90d4073438f3ab759d5964
|
||||
Author: James Falcon <TheRealFalcon@users.noreply.github.com>
|
||||
Date: Mon Apr 19 11:31:28 2021 -0500
|
||||
|
||||
fix error on upgrade caused by new vendordata2 attributes (#869)
|
||||
|
||||
In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to
|
||||
the DataSource class, but didn't use the upgrade framework to deal
|
||||
with an unpickle after upgrade. This commit adds the necessary
|
||||
upgrade code.
|
||||
|
||||
Additionally, added a smaller-scope upgrade test to our integration
|
||||
tests that will be run on every CI run so we catch these issues
|
||||
immediately in the future.
|
||||
|
||||
LP: #1922739
|
||||
|
||||
Signed-off-by: Amy Chen <xiachen@redhat.com>
|
||||
---
|
||||
cloudinit/sources/__init__.py | 12 +++++++++++-
|
||||
cloudinit/tests/test_upgrade.py | 4 ++++
|
||||
tests/integration_tests/clouds.py | 4 ++--
|
||||
tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++-
|
||||
4 files changed, 41 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
|
||||
index 1ad1880d..7d74f8d9 100644
|
||||
--- a/cloudinit/sources/__init__.py
|
||||
+++ b/cloudinit/sources/__init__.py
|
||||
@@ -24,6 +24,7 @@ from cloudinit import util
|
||||
from cloudinit.atomic_helper import write_json
|
||||
from cloudinit.event import EventType
|
||||
from cloudinit.filters import launch_index
|
||||
+from cloudinit.persistence import CloudInitPickleMixin
|
||||
from cloudinit.reporting import events
|
||||
|
||||
DSMODE_DISABLED = "disabled"
|
||||
@@ -134,7 +135,7 @@ URLParams = namedtuple(
|
||||
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
|
||||
|
||||
|
||||
-class DataSource(metaclass=abc.ABCMeta):
|
||||
+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
|
||||
|
||||
dsmode = DSMODE_NETWORK
|
||||
default_locale = 'en_US.UTF-8'
|
||||
@@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta):
|
||||
# non-root users
|
||||
sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
|
||||
|
||||
+ _ci_pkl_version = 1
|
||||
+
|
||||
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
|
||||
self.sys_cfg = sys_cfg
|
||||
self.distro = distro
|
||||
@@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta):
|
||||
else:
|
||||
self.ud_proc = ud_proc
|
||||
|
||||
+ def _unpickle(self, ci_pkl_version: int) -> None:
|
||||
+ """Perform deserialization fixes for Paths."""
|
||||
+ if not hasattr(self, 'vendordata2'):
|
||||
+ self.vendordata2 = None
|
||||
+ if not hasattr(self, 'vendordata2_raw'):
|
||||
+ self.vendordata2_raw = None
|
||||
+
|
||||
def __str__(self):
|
||||
return type_utils.obj_name(self)
|
||||
|
||||
diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py
|
||||
index f79a2536..71cea616 100644
|
||||
--- a/cloudinit/tests/test_upgrade.py
|
||||
+++ b/cloudinit/tests/test_upgrade.py
|
||||
@@ -43,3 +43,7 @@ class TestUpgrade:
|
||||
def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl):
|
||||
"""We always expect Networking.blacklist_drivers to be initialised."""
|
||||
assert previous_obj_pkl.distro.networking.blacklist_drivers is None
|
||||
+
|
||||
+ def test_vendordata_exists(self, previous_obj_pkl):
|
||||
+ assert previous_obj_pkl.vendordata2 is None
|
||||
+ assert previous_obj_pkl.vendordata2_raw is None
|
||||
\ No newline at end of file
|
||||
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
|
||||
index 9527a413..1d0b9d83 100644
|
||||
--- a/tests/integration_tests/clouds.py
|
||||
+++ b/tests/integration_tests/clouds.py
|
||||
@@ -100,14 +100,14 @@ class IntegrationCloud(ABC):
|
||||
# Even if we're using the default key, it may still have a
|
||||
# different name in the clouds, so we need to set it separately.
|
||||
self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME
|
||||
- self._released_image_id = self._get_initial_image()
|
||||
+ self.released_image_id = self._get_initial_image()
|
||||
self.snapshot_id = None
|
||||
|
||||
@property
|
||||
def image_id(self):
|
||||
if self.snapshot_id:
|
||||
return self.snapshot_id
|
||||
- return self._released_image_id
|
||||
+ return self.released_image_id
|
||||
|
||||
def emit_settings_to_log(self) -> None:
|
||||
log.info(
|
||||
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
|
||||
index c20cb3c1..48e0691b 100644
|
||||
--- a/tests/integration_tests/test_upgrade.py
|
||||
+++ b/tests/integration_tests/test_upgrade.py
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
+import os
|
||||
import pytest
|
||||
import time
|
||||
from pathlib import Path
|
||||
@@ -8,6 +9,8 @@ from tests.integration_tests.conftest import (
|
||||
get_validated_source,
|
||||
session_start_time,
|
||||
)
|
||||
+from tests.integration_tests.instances import CloudInitSource
|
||||
+
|
||||
|
||||
log = logging.getLogger('integration_testing')
|
||||
|
||||
@@ -63,7 +66,7 @@ def test_upgrade(session_cloud: IntegrationCloud):
|
||||
return # type checking doesn't understand that skip raises
|
||||
|
||||
launch_kwargs = {
|
||||
- 'image_id': session_cloud._get_initial_image(),
|
||||
+ 'image_id': session_cloud.released_image_id,
|
||||
}
|
||||
|
||||
image = ImageSpecification.from_os_image()
|
||||
@@ -93,6 +96,26 @@ def test_upgrade(session_cloud: IntegrationCloud):
|
||||
instance.install_new_cloud_init(source, take_snapshot=False)
|
||||
instance.execute('hostname something-else')
|
||||
_restart(instance)
|
||||
+ assert instance.execute('cloud-init status --wait --long').ok
|
||||
_output_to_compare(instance, after_path, netcfg_path)
|
||||
|
||||
log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ci
|
||||
+@pytest.mark.ubuntu
|
||||
+def test_upgrade_package(session_cloud: IntegrationCloud):
|
||||
+ if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE:
|
||||
+ not_run_message = 'Test only supports upgrading to build deb'
|
||||
+ if os.environ.get('TRAVIS'):
|
||||
+ # If this isn't running on CI, we should know
|
||||
+ pytest.fail(not_run_message)
|
||||
+ else:
|
||||
+ pytest.skip(not_run_message)
|
||||
+
|
||||
+ launch_kwargs = {'image_id': session_cloud.released_image_id}
|
||||
+
|
||||
+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance:
|
||||
+ instance.install_deb()
|
||||
+ instance.restart()
|
||||
+ assert instance.execute('cloud-init status --wait --long').ok
|
||||
--
|
||||
2.27.0
|
||||
|
@ -1,6 +1,6 @@
|
||||
Name: cloud-init
|
||||
Version: 21.1
|
||||
Release: 12%{?dist}.alma
|
||||
Release: 19%{?dist}.alma
|
||||
Summary: Cloud instance init scripts
|
||||
License: ASL 2.0 or GPLv3
|
||||
URL: http://launchpad.net/cloud-init
|
||||
@ -30,6 +30,40 @@ Patch11: ci-Inhibit-sshd-keygen-.service-if-cloud-init-is-active.patch
|
||||
Patch12: ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch
|
||||
# For bz#2016305 - disable-sshd-keygen-if-cloud-init-active.conf:8: Missing '=', ignoring line
|
||||
Patch13: ci-remove-unnecessary-EOF-string-in-disable-sshd-keygen.patch
|
||||
# For bz#2028381 - cloud-init.service fails to start after package update
|
||||
Patch14: ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch
|
||||
# For bz#2028031 - [RHEL-9] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP
|
||||
Patch15: ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch
|
||||
# For bz#2040090 - [cloud-init][RHEL9] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
|
||||
Patch16: ci-Datasource-for-VMware-953.patch
|
||||
# For bz#2040090 - [cloud-init][RHEL9] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
|
||||
Patch17: ci-Change-netifaces-dependency-to-0.10.4-965.patch
|
||||
# For bz#2040090 - [cloud-init][RHEL9] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
|
||||
Patch18: ci-Update-dscheck_VMware-s-rpctool-check-970.patch
|
||||
# For bz#2040090 - [cloud-init][RHEL9] Support for cloud-init datasource 'cloud-init-vmware-guestinfo'
|
||||
Patch19: ci-Revert-unnecesary-lcase-in-ds-identify-978.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch20: ci-Add-flexibility-to-IMDS-api-version-793.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch21: ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch22: ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch23: ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch24: ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch25: ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch
|
||||
# For bz#2042351 - [RHEL-9] Support for provisioning Azure VM with userdata
|
||||
Patch26: ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch
|
||||
# For bz#1998445 - [Azure][RHEL-9] ordering cycle exists after reboot
|
||||
Patch27: ci-Add-_netdev-option-to-mount-Azure-ephemeral-disk-121.patch
|
||||
# For bz#2053546 - cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".
|
||||
Patch28: ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch
|
||||
# For bz#1998445 - [Azure][RHEL-9] ordering cycle exists after reboot
|
||||
Patch29: ci-Adding-_netdev-to-the-default-mount-configuration.patch
|
||||
# For bz#2036060 - [cloud-init][ESXi][RHEL-9] Failed to config static IP according to VMware Customization Config File
|
||||
Patch30: ci-Setting-highest-autoconnect-priority-for-network-scr.patch
|
||||
|
||||
# AlmaLinux patches
|
||||
Patch100: cloud-init-20.3-add_almalinux.patch
|
||||
@ -82,6 +116,10 @@ Requires: shadow-utils
|
||||
Requires: util-linux
|
||||
Requires: xfsprogs
|
||||
Requires: dhcp-client
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2032524
|
||||
Requires: gdisk
|
||||
Requires: openssl
|
||||
Requires: python3-netifaces
|
||||
|
||||
%{?systemd_requires}
|
||||
|
||||
@ -230,9 +268,60 @@ fi
|
||||
%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
|
||||
|
||||
%changelog
|
||||
* Tue Jan 18 2022 Eduard Abdullin <eabdullin@almalinux.org> - 21.1-12.alma
|
||||
* Mon Apr 18 2022 Eduard Abdullin <eabdullin@almalinux.org> - 21.1-19.alma
|
||||
- AlmaLinux support
|
||||
|
||||
* Fri Feb 25 2022 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-19
|
||||
- ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch [bz#2053546]
|
||||
- ci-Adding-_netdev-to-the-default-mount-configuration.patch [bz#1998445]
|
||||
- ci-Setting-highest-autoconnect-priority-for-network-scr.patch [bz#2036060]
|
||||
- Resolves: bz#2053546
|
||||
(cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".)
|
||||
- Resolves: bz#1998445
|
||||
([Azure][RHEL-9] ordering cycle exists after reboot)
|
||||
- Resolves: bz#2036060
|
||||
([cloud-init][ESXi][RHEL-9] Failed to config static IP according to VMware Customization Config File)
|
||||
|
||||
* Fri Feb 11 2022 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-18
|
||||
- ci-Add-_netdev-option-to-mount-Azure-ephemeral-disk-121.patch [bz#1998445]
|
||||
- Resolves: bz#1998445
|
||||
([Azure][RHEL-9] ordering cycle exists after reboot)
|
||||
|
||||
* Mon Feb 07 2022 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-17
|
||||
- ci-Add-flexibility-to-IMDS-api-version-793.patch [bz#2042351]
|
||||
- ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch [bz#2042351]
|
||||
- ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch [bz#2042351]
|
||||
- ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch [bz#2042351]
|
||||
- ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch [bz#2042351]
|
||||
- ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch [bz#2042351]
|
||||
- ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch [bz#2042351]
|
||||
- Resolves: bz#2042351
|
||||
([RHEL-9] Support for provisioning Azure VM with userdata)
|
||||
|
||||
* Fri Jan 21 2022 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-16
|
||||
- ci-Datasource-for-VMware-953.patch [bz#2040090]
|
||||
- ci-Change-netifaces-dependency-to-0.10.4-965.patch [bz#2040090]
|
||||
- ci-Update-dscheck_VMware-s-rpctool-check-970.patch [bz#2040090]
|
||||
- ci-Revert-unnecesary-lcase-in-ds-identify-978.patch [bz#2040090]
|
||||
- ci-Add-netifaces-package-as-a-Requires-in-cloud-init.sp.patch [bz#2040090]
|
||||
- Resolves: bz#2040090
|
||||
([cloud-init][RHEL9] Support for cloud-init datasource 'cloud-init-vmware-guestinfo')
|
||||
|
||||
* Thu Jan 13 2022 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-15
|
||||
- ci-Add-gdisk-and-openssl-as-deps-to-fix-UEFI-Azure-init.patch [bz#2032524]
|
||||
- Resolves: bz#2032524
|
||||
([RHEL9] [Azure] cloud-init fails to configure the system)
|
||||
|
||||
* Tue Dec 14 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-14
|
||||
- ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch [bz#2028031]
|
||||
- Resolves: bz#2028031
|
||||
([RHEL-9] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP)
|
||||
|
||||
* Mon Dec 06 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-13
|
||||
- ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch [bz#2028381]
|
||||
- Resolves: bz#2028381
|
||||
(cloud-init.service fails to start after package update)
|
||||
|
||||
* Mon Nov 01 2021 Miroslav Rezanina <mrezanin@redhat.com> - 21.1-12
|
||||
- ci-remove-unnecessary-EOF-string-in-disable-sshd-keygen.patch [bz#2016305]
|
||||
- Resolves: bz#2016305
|
||||
|
Loading…
Reference in New Issue
Block a user