diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6f555cf --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/cloud-init-21.1.tar.gz +/test_version_change.pkl diff --git a/0001-Add-initial-redhat-setup.patch b/0001-Add-initial-redhat-setup.patch new file mode 100644 index 0000000..b67fcae --- /dev/null +++ b/0001-Add-initial-redhat-setup.patch @@ -0,0 +1,561 @@ +From 074cb9b011623849cfa95c1d7cc813bb28f03ff0 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:03 +0200 +Subject: Add initial redhat setup + +Merged patches (21.1): +- 915d30ad Change gating file to correct rhel version +- 311f318d Removing net-tools dependency +- 74731806 Adding man pages to Red Hat spec file +- 758d333d Removing blocking test from yaml configuration file +- c7e7c59c Changing permission of cloud-init-generator to 755 +- 8b85abbb Installing man pages in the correct place with correct permissions +- c6808d8d Fix unit failure of cloud-final.service if NetworkManager was not present. +- 11866ef6 Report full specific version with "cloud-init --version" + +Rebase notes (18.5): +- added bash_completition file +- added cloud-id file + +Merged patches (20.3): +- 01900d0 changing ds-identify patch from /usr/lib to /usr/libexec +- 7f47ca3 Render the generator from template instead of cp + +Merged patches (19.4): +- 4ab5a61 Fix for network configuration not persisting after reboot +- 84cf125 Removing cloud-user from wheel +- 31290ab Adding gating tests for Azure, ESXi and AWS + +Merged patches (18.5): +- 2d6b469 add power-state-change module to cloud_final_modules +- 764159f Adding systemd mount options to wait for cloud-init +- da4d99e Adding disk_setup to rhel/cloud.cfg +- f5c6832 Enable cloud-init by default on vmware + +Conflicts: +cloudinit/config/cc_chef.py: + - Updated header documentation text + - Replacing double quotes by simple quotes + +setup.py: + - Adding missing cmdclass info + +Signed-off-by: Eduardo Otubo +--- + .gitignore | 1 + + cloudinit/config/cc_chef.py | 67 +++- + cloudinit/settings.py | 7 +- + redhat/.gitignore | 1 + + redhat/Makefile | 71 ++++ + redhat/Makefile.common | 37 ++ + redhat/cloud-init-tmpfiles.conf | 1 + + redhat/cloud-init.spec.template | 530 ++++++++++++++++++++++++++ + redhat/gating.yaml | 8 + + redhat/rpmbuild/BUILD/.gitignore | 3 + + redhat/rpmbuild/RPMS/.gitignore | 3 + + redhat/rpmbuild/SOURCES/.gitignore | 3 + + redhat/rpmbuild/SPECS/.gitignore | 3 + + redhat/rpmbuild/SRPMS/.gitignore | 3 + + redhat/scripts/frh.py | 27 ++ + redhat/scripts/git-backport-diff | 327 ++++++++++++++++ + redhat/scripts/git-compile-check | 215 +++++++++++ + redhat/scripts/process-patches.sh | 77 ++++ + redhat/scripts/tarball_checksum.sh | 3 + + rhel/README.rhel | 5 + + rhel/cloud-init-tmpfiles.conf | 1 + + rhel/cloud.cfg | 69 ++++ + rhel/systemd/cloud-config.service | 18 + + rhel/systemd/cloud-config.target | 11 + + rhel/systemd/cloud-final.service | 24 ++ + rhel/systemd/cloud-init-local.service | 31 ++ + rhel/systemd/cloud-init.service | 25 ++ + rhel/systemd/cloud-init.target | 7 + + setup.py | 23 +- + tools/read-version | 28 +- + 30 files changed, 1579 insertions(+), 50 deletions(-) + create mode 100644 redhat/.gitignore + create mode 100644 redhat/Makefile + create mode 100644 redhat/Makefile.common + create mode 100644 redhat/cloud-init-tmpfiles.conf + create mode 100644 redhat/cloud-init.spec.template + create mode 100644 redhat/gating.yaml + create mode 100644 redhat/rpmbuild/BUILD/.gitignore + create mode 100644 redhat/rpmbuild/RPMS/.gitignore + create mode 100644 redhat/rpmbuild/SOURCES/.gitignore + create mode 100644 redhat/rpmbuild/SPECS/.gitignore + create mode 100644 redhat/rpmbuild/SRPMS/.gitignore + create mode 100755 redhat/scripts/frh.py + create mode 100755 redhat/scripts/git-backport-diff + create mode 100755 redhat/scripts/git-compile-check + create mode 100755 redhat/scripts/process-patches.sh + create mode 100755 redhat/scripts/tarball_checksum.sh + create mode 100644 rhel/README.rhel + create mode 100644 rhel/cloud-init-tmpfiles.conf + create mode 100644 rhel/cloud.cfg + create mode 100644 rhel/systemd/cloud-config.service + create mode 100644 rhel/systemd/cloud-config.target + create mode 100644 rhel/systemd/cloud-final.service + create mode 100644 rhel/systemd/cloud-init-local.service + create mode 100644 rhel/systemd/cloud-init.service + create mode 100644 rhel/systemd/cloud-init.target + +diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py +index aaf71366..97ef649a 100644 +--- a/cloudinit/config/cc_chef.py ++++ b/cloudinit/config/cc_chef.py +@@ -6,7 +6,70 @@ + # + # This file is part of cloud-init. See LICENSE file for license information. + +-"""Chef: module that configures, starts and installs chef.""" ++""" ++Chef ++---- ++**Summary:** module that configures, starts and installs chef. ++ ++This module enables chef to be installed (from packages or ++from gems, or from omnibus). Before this occurs chef configurations are ++written to disk (validation.pem, client.pem, firstboot.json, client.rb), ++and needed chef folders/directories are created (/etc/chef and /var/log/chef ++and so-on). Then once installing proceeds correctly if configured chef will ++be started (in daemon mode or in non-daemon mode) and then once that has ++finished (if ran in non-daemon mode this will be when chef finishes ++converging, if ran in daemon mode then no further actions are possible since ++chef will have forked into its own process) then a post run function can ++run that can do finishing activities (such as removing the validation pem ++file). ++ ++**Internal name:** ``cc_chef`` ++ ++**Module frequency:** per always ++ ++**Supported distros:** all ++ ++**Config keys**:: ++ ++ chef: ++ directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, ++ /var/cache/chef, /var/backups/chef, /run/chef) ++ validation_cert: (optional string to be written to file validation_key) ++ special value 'system' means set use existing file ++ validation_key: (optional the path for validation_cert. default ++ /etc/chef/validation.pem) ++ firstboot_path: (path to write run_list and initial_attributes keys that ++ should also be present in this configuration, defaults ++ to /etc/chef/firstboot.json) ++ exec: boolean to run or not run chef (defaults to false, unless ++ a gem installed is requested ++ where this will then default ++ to true) ++ ++ chef.rb template keys (if falsey, then will be skipped and not ++ written to /etc/chef/client.rb) ++ ++ chef: ++ client_key: ++ encrypted_data_bag_secret: ++ environment: ++ file_backup_path: ++ file_cache_path: ++ json_attribs: ++ log_level: ++ log_location: ++ node_name: ++ omnibus_url: ++ omnibus_url_retries: ++ omnibus_version: ++ pid_file: ++ server_url: ++ show_time: ++ ssl_verify_mode: ++ validation_cert: ++ validation_key: ++ validation_name: ++""" + + import itertools + import json +@@ -31,7 +94,7 @@ CHEF_DIRS = tuple([ + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', +- '/var/run/chef', ++ '/run/chef', + ]) + REQUIRED_CHEF_DIRS = tuple([ + '/etc/chef', +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 91e1bfe7..e690c0fd 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -47,13 +47,16 @@ CFG_BUILTIN = { + ], + 'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], +- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'], ++ 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], ++ 'ssh_deletekeys': False, ++ 'ssh_genkeytypes': [], ++ 'syslog_fix_perms': [], + 'system_info': { + 'paths': { + 'cloud_dir': '/var/lib/cloud', + 'templates_dir': '/etc/cloud/templates/', + }, +- 'distro': 'ubuntu', ++ 'distro': 'rhel', + 'network': {'renderers': None}, + }, + 'vendor_data': {'enabled': True, 'prefix': []}, +diff --git a/rhel/README.rhel b/rhel/README.rhel +new file mode 100644 +index 00000000..aa29630d +--- /dev/null ++++ b/rhel/README.rhel +@@ -0,0 +1,5 @@ ++The following cloud-init modules are currently unsupported on this OS: ++ - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) ++ - byobu ('byobu_by_default' option) ++ - chef ++ - grub_dpkg +diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf +new file mode 100644 +index 00000000..0c6d2a3b +--- /dev/null ++++ b/rhel/cloud-init-tmpfiles.conf +@@ -0,0 +1 @@ ++d /run/cloud-init 0700 root root - - +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +new file mode 100644 +index 00000000..82e8bf62 +--- /dev/null ++++ b/rhel/cloud.cfg +@@ -0,0 +1,69 @@ ++users: ++ - default ++ ++disable_root: 1 ++ssh_pwauth: 0 ++ ++mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] ++resize_rootfs_tmp: /dev ++ssh_deletekeys: 0 ++ssh_genkeytypes: ~ ++syslog_fix_perms: ~ ++disable_vmware_customization: false ++ ++cloud_init_modules: ++ - disk_setup ++ - migrator ++ - bootcmd ++ - write-files ++ - growpart ++ - resizefs ++ - set_hostname ++ - update_hostname ++ - update_etc_hosts ++ - rsyslog ++ - users-groups ++ - ssh ++ ++cloud_config_modules: ++ - mounts ++ - locale ++ - set-passwords ++ - rh_subscription ++ - yum-add-repo ++ - package-update-upgrade-install ++ - timezone ++ - puppet ++ - chef ++ - salt-minion ++ - mcollective ++ - disable-ec2-metadata ++ - runcmd ++ ++cloud_final_modules: ++ - rightscale_userdata ++ - scripts-per-once ++ - scripts-per-boot ++ - scripts-per-instance ++ - scripts-user ++ - ssh-authkey-fingerprints ++ - keys-to-console ++ - phone-home ++ - final-message ++ - power-state-change ++ ++system_info: ++ default_user: ++ name: cloud-user ++ lock_passwd: true ++ gecos: Cloud User ++ groups: [adm, systemd-journal] ++ sudo: ["ALL=(ALL) NOPASSWD:ALL"] ++ shell: /bin/bash ++ distro: rhel ++ paths: ++ cloud_dir: /var/lib/cloud ++ templates_dir: /etc/cloud/templates ++ ssh_svcname: sshd ++ ++# vim:syntax=yaml +diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service +new file mode 100644 +index 00000000..f3dcd4be +--- /dev/null ++++ b/rhel/systemd/cloud-config.service +@@ -0,0 +1,18 @@ ++[Unit] ++Description=Apply the settings specified in cloud-config ++After=network-online.target cloud-config.target ++Wants=network-online.target cloud-config.target ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=config ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target +new file mode 100644 +index 00000000..ae9b7d02 +--- /dev/null ++++ b/rhel/systemd/cloud-config.target +@@ -0,0 +1,11 @@ ++# cloud-init normally emits a "cloud-config" upstart event to inform third ++# parties that cloud-config is available, which does us no good when we're ++# using systemd. cloud-config.target serves as this synchronization point ++# instead. Services that would "start on cloud-config" with upstart can ++# instead use "After=cloud-config.target" and "Wants=cloud-config.target" ++# as appropriate. ++ ++[Unit] ++Description=Cloud-config availability ++Wants=cloud-init-local.service cloud-init.service ++After=cloud-init-local.service cloud-init.service +diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service +new file mode 100644 +index 00000000..e281c0cf +--- /dev/null ++++ b/rhel/systemd/cloud-final.service +@@ -0,0 +1,24 @@ ++[Unit] ++Description=Execute cloud user/final scripts ++After=network-online.target cloud-config.service rc-local.service ++Wants=network-online.target cloud-config.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init modules --mode=final ++RemainAfterExit=yes ++TimeoutSec=0 ++KillMode=process ++# Restart NetworkManager if it is present and running. ++ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ ++ out=$(systemctl show --property=SubState $u) || exit; \ ++ [ "$out" = "SubState=running" ] || exit 0; \ ++ systemctl reload-or-try-restart $u' ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service +new file mode 100644 +index 00000000..8f9f6c9f +--- /dev/null ++++ b/rhel/systemd/cloud-init-local.service +@@ -0,0 +1,31 @@ ++[Unit] ++Description=Initial cloud-init job (pre-networking) ++DefaultDependencies=no ++Wants=network-pre.target ++After=systemd-remount-fs.service ++Requires=dbus.socket ++After=dbus.socket ++Before=NetworkManager.service network.service ++Before=network-pre.target ++Before=shutdown.target ++Before=firewalld.target ++Conflicts=shutdown.target ++RequiresMountsFor=/var/lib/cloud ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStartPre=/bin/mkdir -p /run/cloud-init ++ExecStartPre=/sbin/restorecon /run/cloud-init ++ExecStartPre=/usr/bin/touch /run/cloud-init/enabled ++ExecStart=/usr/bin/cloud-init init --local ++ExecStart=/bin/touch /run/cloud-init/network-config-ready ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +new file mode 100644 +index 00000000..d0023a05 +--- /dev/null ++++ b/rhel/systemd/cloud-init.service +@@ -0,0 +1,25 @@ ++[Unit] ++Description=Initial cloud-init job (metadata service crawler) ++Wants=cloud-init-local.service ++Wants=sshd-keygen.service ++Wants=sshd.service ++After=cloud-init-local.service ++After=NetworkManager.service network.service ++Before=network-online.target ++Before=sshd-keygen.service ++Before=sshd.service ++Before=systemd-user-sessions.service ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init init ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=cloud-init.target +diff --git a/rhel/systemd/cloud-init.target b/rhel/systemd/cloud-init.target +new file mode 100644 +index 00000000..083c3b6f +--- /dev/null ++++ b/rhel/systemd/cloud-init.target +@@ -0,0 +1,7 @@ ++# cloud-init target is enabled by cloud-init-generator ++# To disable it you can either: ++# a.) boot with kernel cmdline of 'cloud-init=disabled' ++# b.) touch a file /etc/cloud/cloud-init.disabled ++[Unit] ++Description=Cloud-init target ++After=multi-user.target +diff --git a/setup.py b/setup.py +index cbacf48e..d5cd01a4 100755 +--- a/setup.py ++++ b/setup.py +@@ -125,14 +125,6 @@ INITSYS_FILES = { + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], + 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], +- 'systemd': [render_tmpl(f) +- for f in (glob('systemd/*.tmpl') + +- glob('systemd/*.service') + +- glob('systemd/*.target')) +- if (is_f(f) and not is_generator(f))], +- 'systemd.generators': [ +- render_tmpl(f, mode=0o755) +- for f in glob('systemd/*') if is_f(f) and is_generator(f)], + 'upstart': [f for f in glob('upstart/*') if is_f(f)], + } + INITSYS_ROOTS = { +@@ -142,9 +134,6 @@ INITSYS_ROOTS = { + 'sysvinit_deb': 'etc/init.d', + 'sysvinit_openrc': 'etc/init.d', + 'sysvinit_suse': 'etc/init.d', +- 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), +- 'systemd.generators': pkg_config_read('systemd', +- 'systemdsystemgeneratordir'), + 'upstart': 'etc/init/', + } + INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) +@@ -245,14 +234,11 @@ if not in_virtualenv(): + INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] + + data_files = [ +- (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), ++ (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), + (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), + (ETC + '/cloud/templates', glob('templates/*')), +- (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', +- 'tools/uncloud-init', ++ (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + 'tools/write-ssh-key-fingerprints']), +- (USR + '/share/bash-completion/completions', +- ['bash_completion/cloud-init']), + (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), + (USR + '/share/doc/cloud-init/examples', + [f for f in glob('doc/examples/*') if is_f(f)]), +@@ -263,8 +249,7 @@ if not platform.system().endswith('BSD'): + data_files.extend([ + (ETC + '/NetworkManager/dispatcher.d/', + ['tools/hook-network-manager']), +- (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), +- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) ++ ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]) + ]) + # Use a subclass for install that handles + # adding on the right init system configuration files +@@ -286,8 +271,6 @@ setuptools.setup( + scripts=['tools/cloud-init-per'], + license='Dual-licensed under GPLv3 or Apache 2.0', + data_files=data_files, +- install_requires=requirements, +- cmdclass=cmdclass, + entry_points={ + 'console_scripts': [ + 'cloud-init = cloudinit.cmd.main:main', +diff --git a/tools/read-version b/tools/read-version +index 02c90643..79755f78 100755 +--- a/tools/read-version ++++ b/tools/read-version +@@ -71,32 +71,8 @@ version_long = None + is_release_branch_ci = ( + os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/") + ) +-if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: +- flags = [] +- if use_tags: +- flags = ['--tags'] +- cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags +- +- try: +- version = tiny_p(cmd).strip() +- except RuntimeError: +- version = None +- +- if version is None or not version.startswith(src_version): +- sys.stderr.write("git describe version (%s) differs from " +- "cloudinit.version (%s)\n" % (version, src_version)) +- sys.stderr.write( +- "Please get the latest upstream tags.\n" +- "As an example, this can be done with the following:\n" +- "$ git remote add upstream https://git.launchpad.net/cloud-init\n" +- "$ git fetch upstream --tags\n" +- ) +- sys.exit(1) +- +- version_long = tiny_p(cmd + ["--long"]).strip() +-else: +- version = src_version +- version_long = None ++version = src_version ++version_long = None + + # version is X.Y.Z[+xxx.gHASH] + # version_long is None or X.Y.Z-xxx-gHASH +-- +2.27.0 + diff --git a/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch b/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch new file mode 100644 index 0000000..3dc704f --- /dev/null +++ b/0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch @@ -0,0 +1,262 @@ +From 472c2b5d4342b6ab6ce1584dc39bed0e6c1ca2e7 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:06 +0200 +Subject: Do not write NM_CONTROLLED=no in generated interface config files + +Conflicts 20.3: + - Not appplying patch on cloudinit/net/sysconfig.py since it now has a +mechanism to identify if cloud-init is running on RHEL, having the +correct settings for NM_CONTROLLED. + +X-downstream-only: true +Signed-off-by: Eduardo Otubo +Signed-off-by: Ryan McCabe +--- + cloudinit/net/sysconfig.py | 2 +- + tests/unittests/test_net.py | 28 ---------------------------- + 2 files changed, 1 insertion(+), 29 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 99a4bae4..3d276666 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -289,7 +289,7 @@ class Renderer(renderer.Renderer): + # details about this) + + iface_defaults = { +- 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False, ++ 'rhel': {'ONBOOT': True, 'USERCTL': False, + 'BOOTPROTO': 'none'}, + 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'}, + } +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 38d934d4..c67b5fcc 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -535,7 +535,6 @@ GATEWAY=172.19.3.254 + HWADDR=fa:16:3e:ed:9a:59 + IPADDR=172.19.1.34 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -633,7 +632,6 @@ IPADDR=172.19.1.34 + IPADDR1=10.0.0.10 + NETMASK=255.255.252.0 + NETMASK1=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -756,7 +754,6 @@ IPV6_AUTOCONF=no + IPV6_DEFAULTGW=2001:DB8::1 + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -884,7 +881,6 @@ NETWORK_CONFIGS = { + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -901,7 +897,6 @@ NETWORK_CONFIGS = { + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + METRIC=10000 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1032,7 +1027,6 @@ NETWORK_CONFIGS = { + IPV6_AUTOCONF=no + IPV6_FORCE_ACCEPT_RA=no + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1737,7 +1731,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no"""), +@@ -1745,7 +1738,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=dhcp + DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=bond0 + USERCTL=no +@@ -1763,7 +1755,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + MACADDR=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=no +@@ -1773,7 +1764,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1790,7 +1780,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no +@@ -1800,7 +1789,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1810,7 +1798,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1820,7 +1807,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1829,7 +1815,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1838,7 +1823,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no + HWADDR=98:bb:9f:2c:e8:8a +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no"""), +@@ -2294,7 +2278,6 @@ iface bond0 inet6 static + MTU=9000 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no +@@ -2304,7 +2287,6 @@ iface bond0 inet6 static + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -2326,7 +2308,6 @@ iface bond0 inet6 static + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -2383,7 +2364,6 @@ iface bond0 inet6 static + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -2402,7 +2382,6 @@ iface bond0 inet6 static + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=en0 + USERCTL=no +@@ -2467,7 +2446,6 @@ iface bond0 inet6 static + DEVICE=br0 + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=no +@@ -2591,7 +2569,6 @@ iface bond0 inet6 static + HWADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -2601,7 +2578,6 @@ iface bond0 inet6 static + DEVICE=eth1 + HWADDR=52:54:00:12:34:aa + MTU=1480 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -2610,7 +2586,6 @@ iface bond0 inet6 static + BOOTPROTO=none + DEVICE=eth2 + HWADDR=52:54:00:12:34:ff +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -3027,7 +3002,6 @@ class TestRhelSysConfigRendering(CiTestCase): + BOOTPROTO=dhcp + DEVICE=eth1000 + HWADDR=07-1c-c6-75-a4-be +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -3148,7 +3122,6 @@ GATEWAY=10.0.2.2 + HWADDR=52:54:00:12:34:00 + IPADDR=10.0.2.15 + NETMASK=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -3218,7 +3191,6 @@ USERCTL=no + # + BOOTPROTO=dhcp + DEVICE=eth0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +-- +2.27.0 + diff --git a/0003-limit-permissions-on-def_log_file.patch b/0003-limit-permissions-on-def_log_file.patch new file mode 100644 index 0000000..941adaf --- /dev/null +++ b/0003-limit-permissions-on-def_log_file.patch @@ -0,0 +1,69 @@ +From 6134624f10ef56534e37624adc12f11b09910591 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:08 +0200 +Subject: limit permissions on def_log_file + +This sets a default mode of 0600 on def_log_file, and makes this +configurable via the def_log_file_mode option in cloud.cfg. + +LP: #1541196 +Resolves: rhbz#1424612 +X-approved-upstream: true + +Conflicts 21.1: + cloudinit/stages.py: adjusting call of ensure_file() to use more +recent version + +Signed-off-by: Eduardo Otubo +--- + cloudinit/settings.py | 1 + + cloudinit/stages.py | 1 + + doc/examples/cloud-config.txt | 4 ++++ + 3 files changed, 6 insertions(+) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index e690c0fd..43a1490c 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -46,6 +46,7 @@ CFG_BUILTIN = { + 'None', + ], + 'def_log_file': '/var/log/cloud-init.log', ++ 'def_log_file_mode': 0o600, + 'log_cfgs': [], + 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], + 'ssh_deletekeys': False, +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index 3ef4491c..83e25dd1 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -147,6 +147,7 @@ class Init(object): + def _initialize_filesystem(self): + util.ensure_dirs(self._initial_subdirs()) + log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') ++ log_file_mode = util.get_cfg_option_int(self.cfg, 'def_log_file_mode') + if log_file: + util.ensure_file(log_file, preserve_mode=True) + perms = self.cfg.get('syslog_fix_perms') +diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt +index de9a0f87..bb33ad45 100644 +--- a/doc/examples/cloud-config.txt ++++ b/doc/examples/cloud-config.txt +@@ -414,10 +414,14 @@ timezone: US/Eastern + # if syslog_fix_perms is a list, it will iterate through and use the + # first pair that does not raise error. + # ++# 'def_log_file' will be created with mode 'def_log_file_mode', which ++# is specified as a numeric value and defaults to 0600. ++# + # the default values are '/var/log/cloud-init.log' and 'syslog:adm' + # the value of 'def_log_file' should match what is configured in logging + # if either is empty, then no change of ownership will be done + def_log_file: /var/log/my-logging-file.log ++def_log_file_mode: 0600 + syslog_fix_perms: syslog:root + + # you can set passwords for a user or multiple users +-- +2.27.0 + diff --git a/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch b/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch new file mode 100644 index 0000000..4d5a0d2 --- /dev/null +++ b/0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch @@ -0,0 +1,36 @@ +From 699d37a6ff3e343e214943794aac09e4156c2b2b Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:10 +0200 +Subject: sysconfig: Don't write BOOTPROTO=dhcp for ipv6 dhcp + +Don't write BOOTPROTO=dhcp for ipv6 dhcp, as BOOTPROTO applies +only to ipv4. Explicitly write IPV6_AUTOCONF=no for dhcp on ipv6. + +X-downstream-only: yes + +Resolves: rhbz#1519271 +Signed-off-by: Ryan McCabe + +Merged patches (19.4): +- 6444df4 sysconfig: Don't disable IPV6_AUTOCONF + +Signed-off-by: Eduardo Otubo +--- + tests/unittests/test_net.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index c67b5fcc..4ea0e597 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -1729,6 +1729,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes ++ IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +2.27.0 + diff --git a/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch b/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch new file mode 100644 index 0000000..100d3a2 --- /dev/null +++ b/0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch @@ -0,0 +1,57 @@ +From ccc75c1be3ae08d813193071c798fc905b5c03e5 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:12 +0200 +Subject: DataSourceAzure.py: use hostnamectl to set hostname + +RH-Author: Vitaly Kuznetsov +Message-id: <20180417130754.12918-3-vkuznets@redhat.com> +Patchwork-id: 79659 +O-Subject: [RHEL7.6/7.5.z cloud-init PATCH 2/2] DataSourceAzure.py: use hostnamectl to set hostname +Bugzilla: 1568717 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Cathy Avery + +The right way to set hostname in RHEL7 is: + + $ hostnamectl set-hostname HOSTNAME + +DataSourceAzure, however, uses: + $ hostname HOSTSNAME + +instead and this causes problems. We can't simply change +'BUILTIN_DS_CONFIG' in DataSourceAzure.py as 'hostname' is being used +for both getting and setting the hostname. + +Long term, this should be fixed in a different way. Cloud-init +has distro-specific hostname setting/getting (see +cloudinit/distros/rhel.py) and DataSourceAzure.py needs to be switched +to use these. + +Resolves: rhbz#1434109 + +X-downstream-only: yes + +Signed-off-by: Eduardo Otubo +Signed-off-by: Vitaly Kuznetsov +Signed-off-by: Miroslav Rezanina +--- + cloudinit/sources/DataSourceAzure.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index cee630f7..553b5a7e 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -296,7 +296,7 @@ def get_hostname(hostname_command='hostname'): + + + def set_hostname(hostname, hostname_command='hostname'): +- subp.subp([hostname_command, hostname]) ++ util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + + + @azure_ds_telemetry_reporter +-- +2.27.0 + diff --git a/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch b/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch new file mode 100644 index 0000000..6276255 --- /dev/null +++ b/0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch @@ -0,0 +1,65 @@ +From dfea0490b899804761fbd7aa23822783d7c36ec5 Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:13 +0200 +Subject: include 'NOZEROCONF=yes' in /etc/sysconfig/network + +RH-Author: Eduardo Otubo +Message-id: <20190320114559.23708-1-otubo@redhat.com> +Patchwork-id: 84937 +O-Subject: [RHEL-7.7 cloud-init PATCH] include 'NOZEROCONF=yes' in /etc/sysconfig/network +Bugzilla: 1653131 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal +RH-Acked-by: Vitaly Kuznetsov + +The option NOZEROCONF=yes is not included by default in +/etc/sysconfig/network, which is required by Overcloud instances. The +patch also includes tests for the modifications. + +X-downstream-only: yes +Resolves: rhbz#1653131 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina +--- + cloudinit/net/sysconfig.py | 11 ++++++++++- + tests/unittests/test_net.py | 1 - + 2 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index 3d276666..d5440998 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -925,7 +925,16 @@ class Renderer(renderer.Renderer): + # Distros configuring /etc/sysconfig/network as a file e.g. Centos + if sysconfig_path.endswith('network'): + util.ensure_dir(os.path.dirname(sysconfig_path)) +- netcfg = [_make_header(), 'NETWORKING=yes'] ++ netcfg = [] ++ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): ++ if 'cloud-init' in line: ++ break ++ if not line.startswith(('NETWORKING=', ++ 'IPV6_AUTOCONF=', ++ 'NETWORKING_IPV6=')): ++ netcfg.append(line) ++ # Now generate the cloud-init portion of sysconfig/network ++ netcfg.extend([_make_header(), 'NETWORKING=yes']) + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 4ea0e597..c67b5fcc 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -1729,7 +1729,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + BOOTPROTO=none + DEVICE=bond0 + DHCPV6C=yes +- IPV6_AUTOCONF=no + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff + ONBOOT=yes +-- +2.27.0 + diff --git a/0007-Remove-race-condition-between-cloud-init-and-Network.patch b/0007-Remove-race-condition-between-cloud-init-and-Network.patch new file mode 100644 index 0000000..9c9e4cc --- /dev/null +++ b/0007-Remove-race-condition-between-cloud-init-and-Network.patch @@ -0,0 +1,148 @@ +From 24894dcf45a307f44e29dc5d5b2d864b75fd982c Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Fri, 7 May 2021 13:36:14 +0200 +Subject: Remove race condition between cloud-init and NetworkManager + +Message-id: <20200302104635.11648-1-otubo@redhat.com> +Patchwork-id: 94098 +O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Remove race condition between cloud-init and NetworkManager +Bugzilla: 1807797 +RH-Acked-by: Cathy Avery +RH-Acked-by: Mohammed Gamal + +BZ: 1748015 +BRANCH: rhel7/master-18.5 +BREW: 26924611 + +BZ: 1807797 +BRANCH: rhel820/master-18.5 +BREW: 26924957 + +cloud-init service is set to start before NetworkManager service starts, +but this does not avoid a race condition between them. NetworkManager +starts before cloud-init can write `dns=none' to the file: +/etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager +doesn't read the configuration and erases all resolv.conf values upon +shutdown. On the next reboot neither cloud-init or NetworkManager will +write anything to resolv.conf, leaving it blank. + +This patch introduces a NM reload (try-restart) at the end of cloud-init +start up so it won't erase resolv.conf upon first shutdown. + +x-downstream-only: yes +resolves: rhbz#1748015, rhbz#1807797 and rhbz#1804780 + +Signed-off-by: Eduardo Otubo +Signed-off-by: Miroslav Rezanina + +This commit is a squash and also includes the folloowing commits: + +commit 316a17b7c02a87fa9b2981535be0b20d165adc46 +Author: Eduardo Otubo +Date: Mon Jun 1 11:58:06 2020 +0200 + + Make cloud-init.service execute after network is up + + RH-Author: Eduardo Otubo + Message-id: <20200526090804.2047-1-otubo@redhat.com> + Patchwork-id: 96809 + O-Subject: [RHEL-8.2.1 cloud-init PATCH] Make cloud-init.service execute after network is up + Bugzilla: 1803928 + RH-Acked-by: Vitaly Kuznetsov + RH-Acked-by: Miroslav Rezanina + + cloud-init.service needs to wait until network is fully up before + continuing executing and configuring its service. + + Signed-off-by: Eduardo Otubo + + x-downstream-only: yes + Resolves: rhbz#1831646 + Signed-off-by: Miroslav Rezanina + +commit 0422ba0e773d1a8257a3f2bf3db05f3bc7917eb7 +Author: Eduardo Otubo +Date: Thu May 28 08:44:08 2020 +0200 + + Remove race condition between cloud-init and NetworkManager + + RH-Author: Eduardo Otubo + Message-id: <20200327121911.17699-1-otubo@redhat.com> + Patchwork-id: 94453 + O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCHv2] Remove race condition between cloud-init and NetworkManager + Bugzilla: 1840648 + RH-Acked-by: Vitaly Kuznetsov + RH-Acked-by: Miroslav Rezanina + RH-Acked-by: Cathy Avery + + cloud-init service is set to start before NetworkManager service starts, + but this does not avoid a race condition between them. NetworkManager + starts before cloud-init can write `dns=none' to the file: + /etc/NetworkManager/conf.d/99-cloud-init.conf. This way NetworkManager + doesn't read the configuration and erases all resolv.conf values upon + shutdown. On the next reboot neither cloud-init or NetworkManager will + write anything to resolv.conf, leaving it blank. + + This patch introduces a NM reload (try-reload-or-restart) at the end of cloud-init + start up so it won't erase resolv.conf upon first shutdown. + + x-downstream-only: yes + + Signed-off-by: Eduardo Otubo otubo@redhat.com + Signed-off-by: Miroslav Rezanina + +commit e0b48a936433faea7f56dbc29dda35acf7d375f7 +Author: Eduardo Otubo +Date: Thu May 28 08:44:06 2020 +0200 + + Enable ssh_deletekeys by default + + RH-Author: Eduardo Otubo + Message-id: <20200317091705.15715-1-otubo@redhat.com> + Patchwork-id: 94365 + O-Subject: [RHEL-7.9/RHEL-8.2.0 cloud-init PATCH] Enable ssh_deletekeys by default + Bugzilla: 1814152 + RH-Acked-by: Mohammed Gamal + RH-Acked-by: Vitaly Kuznetsov + + The configuration option ssh_deletekeys will trigger the generation + of new ssh keys for every new instance deployed. + + x-downstream-only: yes + resolves: rhbz#1814152 + + Signed-off-by: Eduardo Otubo + Signed-off-by: Miroslav Rezanina +--- + rhel/cloud.cfg | 2 +- + rhel/systemd/cloud-init.service | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 82e8bf62..9ecba215 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -6,7 +6,7 @@ ssh_pwauth: 0 + + mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] + resize_rootfs_tmp: /dev +-ssh_deletekeys: 0 ++ssh_deletekeys: 1 + ssh_genkeytypes: ~ + syslog_fix_perms: ~ + disable_vmware_customization: false +diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service +index d0023a05..0b3d796d 100644 +--- a/rhel/systemd/cloud-init.service ++++ b/rhel/systemd/cloud-init.service +@@ -5,6 +5,7 @@ Wants=sshd-keygen.service + Wants=sshd.service + After=cloud-init-local.service + After=NetworkManager.service network.service ++After=NetworkManager-wait-online.service + Before=network-online.target + Before=sshd-keygen.service + Before=sshd.service +-- +2.27.0 + diff --git a/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch b/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch new file mode 100644 index 0000000..38f08cc --- /dev/null +++ b/0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch @@ -0,0 +1,496 @@ +From b48dda73da94782d7ab0c455fa382d3a5ef3c419 Mon Sep 17 00:00:00 2001 +From: Daniel Watkins +Date: Mon, 8 Mar 2021 12:50:57 -0500 +Subject: net: exclude OVS internal interfaces in get_interfaces (#829) + +`get_interfaces` is used to in two ways, broadly: firstly, to determine +the available interfaces when converting cloud network configuration +formats to cloud-init's network configuration formats; and, secondly, to +ensure that any interfaces which are specified in network configuration +are (a) available, and (b) named correctly. The first of these is +unaffected by this commit, as no clouds support Open vSwitch +configuration in their network configuration formats. + +For the second, we check that MAC addresses of physical devices are +unique. In some OVS configurations, there are OVS-created devices which +have duplicate MAC addresses, either with each other or with physical +devices. As these interfaces are created by OVS, we can be confident +that (a) they will be available when appropriate, and (b) that OVS will +name them correctly. As such, this commit excludes any OVS-internal +interfaces from the set of interfaces returned by `get_interfaces`. + +LP: #1912844 +--- + cloudinit/net/__init__.py | 62 +++++++++ + cloudinit/net/tests/test_init.py | 119 ++++++++++++++++++ + .../sources/helpers/tests/test_openstack.py | 5 + + cloudinit/sources/tests/test_oracle.py | 4 + + .../integration_tests/bugs/test_lp1912844.py | 103 +++++++++++++++ + .../test_datasource/test_configdrive.py | 8 ++ + tests/unittests/test_net.py | 20 +++ + 7 files changed, 321 insertions(+) + create mode 100644 tests/integration_tests/bugs/test_lp1912844.py + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index de65e7af..385b7bcc 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -6,6 +6,7 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + import errno ++import functools + import ipaddress + import logging + import os +@@ -19,6 +20,19 @@ from cloudinit.url_helper import UrlError, readurl + LOG = logging.getLogger(__name__) + SYS_CLASS_NET = "/sys/class/net/" + DEFAULT_PRIMARY_INTERFACE = 'eth0' ++OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [ ++ "ovs-vsctl", ++ "--format", ++ "csv", ++ "--no-headings", ++ "--timeout", ++ "10", ++ "--columns", ++ "name", ++ "find", ++ "interface", ++ "type=internal", ++] + + + def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): +@@ -133,6 +147,52 @@ def master_is_openvswitch(devname): + return os.path.exists(ovs_path) + + ++@functools.lru_cache(maxsize=None) ++def openvswitch_is_installed() -> bool: ++ """Return a bool indicating if Open vSwitch is installed in the system.""" ++ ret = bool(subp.which("ovs-vsctl")) ++ if not ret: ++ LOG.debug( ++ "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces" ++ ) ++ return ret ++ ++ ++@functools.lru_cache(maxsize=None) ++def get_ovs_internal_interfaces() -> list: ++ """Return a list of the names of OVS internal interfaces on the system. ++ ++ These will all be strings, and are used to exclude OVS-specific interface ++ from cloud-init's network configuration handling. ++ """ ++ try: ++ out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD) ++ except subp.ProcessExecutionError as exc: ++ if "database connection failed" in exc.stderr: ++ LOG.info( ++ "Open vSwitch is not yet up; no interfaces will be detected as" ++ " OVS-internal" ++ ) ++ return [] ++ raise ++ else: ++ return out.splitlines() ++ ++ ++def is_openvswitch_internal_interface(devname: str) -> bool: ++ """Returns True if this is an OVS internal interface. ++ ++ If OVS is not installed or not yet running, this will return False. ++ """ ++ if not openvswitch_is_installed(): ++ return False ++ ovs_bridges = get_ovs_internal_interfaces() ++ if devname in ovs_bridges: ++ LOG.debug("Detected %s as an OVS interface", devname) ++ return True ++ return False ++ ++ + def is_netfailover(devname, driver=None): + """ netfailover driver uses 3 nics, master, primary and standby. + this returns True if the device is either the primary or standby +@@ -884,6 +944,8 @@ def get_interfaces(blacklist_drivers=None) -> list: + # skip nics that have no mac (00:00....) + if name != 'lo' and mac == zero_mac[:len(mac)]: + continue ++ if is_openvswitch_internal_interface(name): ++ continue + # skip nics that have drivers blacklisted + driver = device_driver(name) + if driver in blacklist_drivers: +diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py +index 0535387a..946f8ee2 100644 +--- a/cloudinit/net/tests/test_init.py ++++ b/cloudinit/net/tests/test_init.py +@@ -391,6 +391,10 @@ class TestGetDeviceList(CiTestCase): + self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False), ++) + class TestGetInterfaceMAC(CiTestCase): + + def setUp(self): +@@ -1224,6 +1228,121 @@ class TestNetFailOver(CiTestCase): + self.assertFalse(net.is_netfailover(devname, driver)) + + ++class TestOpenvswitchIsInstalled: ++ """Test cloudinit.net.openvswitch_is_installed. ++ ++ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test ++ despite the ``lru_cache`` decorator on the unit under test. ++ """ ++ ++ @pytest.fixture(autouse=True) ++ def clear_lru_cache(self): ++ net.openvswitch_is_installed.cache_clear() ++ ++ @pytest.mark.parametrize( ++ "expected,which_return", [(True, "/some/path"), (False, None)] ++ ) ++ @mock.patch("cloudinit.net.subp.which") ++ def test_mirrors_which_result(self, m_which, expected, which_return): ++ m_which.return_value = which_return ++ assert expected == net.openvswitch_is_installed() ++ ++ @mock.patch("cloudinit.net.subp.which") ++ def test_only_calls_which_once(self, m_which): ++ net.openvswitch_is_installed() ++ net.openvswitch_is_installed() ++ assert 1 == m_which.call_count ++ ++ ++@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) ++class TestGetOVSInternalInterfaces: ++ """Test cloudinit.net.get_ovs_internal_interfaces. ++ ++ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test ++ despite the ``lru_cache`` decorator on the unit under test. ++ """ ++ @pytest.fixture(autouse=True) ++ def clear_lru_cache(self): ++ net.get_ovs_internal_interfaces.cache_clear() ++ ++ def test_command_used(self, m_subp): ++ """Test we use the correct command when we call subp""" ++ net.get_ovs_internal_interfaces() ++ ++ assert [ ++ mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) ++ ] == m_subp.call_args_list ++ ++ def test_subp_contents_split_and_returned(self, m_subp): ++ """Test that the command output is appropriately mangled.""" ++ stdout = "iface1\niface2\niface3\n" ++ m_subp.return_value = (stdout, "") ++ ++ assert [ ++ "iface1", ++ "iface2", ++ "iface3", ++ ] == net.get_ovs_internal_interfaces() ++ ++ def test_database_connection_error_handled_gracefully(self, m_subp): ++ """Test that the error indicating OVS is down is handled gracefully.""" ++ m_subp.side_effect = ProcessExecutionError( ++ stderr="database connection failed" ++ ) ++ ++ assert [] == net.get_ovs_internal_interfaces() ++ ++ def test_other_errors_raised(self, m_subp): ++ """Test that only database connection errors are handled.""" ++ m_subp.side_effect = ProcessExecutionError() ++ ++ with pytest.raises(ProcessExecutionError): ++ net.get_ovs_internal_interfaces() ++ ++ def test_only_runs_once(self, m_subp): ++ """Test that we cache the value.""" ++ net.get_ovs_internal_interfaces() ++ net.get_ovs_internal_interfaces() ++ ++ assert 1 == m_subp.call_count ++ ++ ++@mock.patch("cloudinit.net.get_ovs_internal_interfaces") ++@mock.patch("cloudinit.net.openvswitch_is_installed") ++class TestIsOpenVSwitchInternalInterface: ++ def test_false_if_ovs_not_installed( ++ self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces ++ ): ++ """Test that OVS' absence returns False.""" ++ m_openvswitch_is_installed.return_value = False ++ ++ assert not net.is_openvswitch_internal_interface("devname") ++ ++ @pytest.mark.parametrize( ++ "detected_interfaces,devname,expected_return", ++ [ ++ ([], "devname", False), ++ (["notdevname"], "devname", False), ++ (["devname"], "devname", True), ++ (["some", "other", "devices", "and", "ours"], "ours", True), ++ ], ++ ) ++ def test_return_value_based_on_detected_interfaces( ++ self, ++ m_openvswitch_is_installed, ++ m_get_ovs_internal_interfaces, ++ detected_interfaces, ++ devname, ++ expected_return, ++ ): ++ """Test that the detected interfaces are used correctly.""" ++ m_openvswitch_is_installed.return_value = True ++ m_get_ovs_internal_interfaces.return_value = detected_interfaces ++ assert expected_return == net.is_openvswitch_internal_interface( ++ devname ++ ) ++ ++ + class TestIsIpAddress: + """Tests for net.is_ip_address. + +diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py +index 2bde1e3f..95fb9743 100644 +--- a/cloudinit/sources/helpers/tests/test_openstack.py ++++ b/cloudinit/sources/helpers/tests/test_openstack.py +@@ -1,10 +1,15 @@ + # This file is part of cloud-init. See LICENSE file for license information. + # ./cloudinit/sources/helpers/tests/test_openstack.py ++from unittest import mock + + from cloudinit.sources.helpers import openstack + from cloudinit.tests import helpers as test_helpers + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestConvertNetJson(test_helpers.CiTestCase): + + def test_phy_types(self): +diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py +index a7bbdfd9..dcf33b9b 100644 +--- a/cloudinit/sources/tests/test_oracle.py ++++ b/cloudinit/sources/tests/test_oracle.py +@@ -173,6 +173,10 @@ class TestIsPlatformViable(test_helpers.CiTestCase): + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestNetworkConfigFromOpcImds: + def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): + oracle_ds._vnics_data = [{}] +diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py +new file mode 100644 +index 00000000..efafae50 +--- /dev/null ++++ b/tests/integration_tests/bugs/test_lp1912844.py +@@ -0,0 +1,103 @@ ++"""Integration test for LP: #1912844 ++ ++cloud-init should ignore OVS-internal interfaces when performing its own ++interface determination: these interfaces are handled fully by OVS, so ++cloud-init should never need to touch them. ++ ++This test is a semi-synthetic reproducer for the bug. It uses a similar ++network configuration, tweaked slightly to DHCP in a way that will succeed even ++on "failed" boots. The exact bug doesn't reproduce with the NoCloud ++datasource, because it runs at init-local time (whereas the MAAS datasource, ++from the report, runs only at init (network) time): this means that the ++networking code runs before OVS creates its interfaces (which happens after ++init-local but, of course, before networking is up), and so doesn't generate ++the traceback that they cause. We work around this by calling ++``get_interfaces_by_mac` directly in the test code. ++""" ++import pytest ++ ++from tests.integration_tests import random_mac_address ++ ++MAC_ADDRESS = random_mac_address() ++ ++NETWORK_CONFIG = """\ ++bonds: ++ bond0: ++ interfaces: ++ - enp5s0 ++ macaddress: {0} ++ mtu: 1500 ++bridges: ++ ovs-br: ++ interfaces: ++ - bond0 ++ macaddress: {0} ++ mtu: 1500 ++ openvswitch: {{}} ++ dhcp4: true ++ethernets: ++ enp5s0: ++ mtu: 1500 ++ set-name: enp5s0 ++ match: ++ macaddress: {0} ++version: 2 ++vlans: ++ ovs-br.100: ++ id: 100 ++ link: ovs-br ++ mtu: 1500 ++ ovs-br.200: ++ id: 200 ++ link: ovs-br ++ mtu: 1500 ++""".format(MAC_ADDRESS) ++ ++ ++SETUP_USER_DATA = """\ ++#cloud-config ++packages: ++- openvswitch-switch ++""" ++ ++ ++@pytest.fixture ++def ovs_enabled_session_cloud(session_cloud): ++ """A session_cloud wrapper, to use an OVS-enabled image for tests. ++ ++ This implementation is complicated by wanting to use ``session_cloud``s ++ snapshot cleanup/retention logic, to avoid having to reimplement that here. ++ """ ++ old_snapshot_id = session_cloud.snapshot_id ++ with session_cloud.launch( ++ user_data=SETUP_USER_DATA, ++ ) as instance: ++ instance.instance.clean() ++ session_cloud.snapshot_id = instance.snapshot() ++ ++ yield session_cloud ++ ++ try: ++ session_cloud.delete_snapshot() ++ finally: ++ session_cloud.snapshot_id = old_snapshot_id ++ ++ ++@pytest.mark.lxd_vm ++def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud): ++ """Launch our OVS-enabled image and confirm the bug doesn't reproduce.""" ++ launch_kwargs = { ++ "config_dict": { ++ "user.network-config": NETWORK_CONFIG, ++ "volatile.eth0.hwaddr": MAC_ADDRESS, ++ }, ++ } ++ with ovs_enabled_session_cloud.launch( ++ launch_kwargs=launch_kwargs, ++ ) as client: ++ result = client.execute( ++ "python3 -c" ++ "'from cloudinit.net import get_interfaces_by_mac;" ++ "get_interfaces_by_mac()'" ++ ) ++ assert result.ok +diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py +index 6f830cc6..2e2b7847 100644 +--- a/tests/unittests/test_datasource/test_configdrive.py ++++ b/tests/unittests/test_datasource/test_configdrive.py +@@ -494,6 +494,10 @@ class TestConfigDriveDataSource(CiTestCase): + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestNetJson(CiTestCase): + def setUp(self): + super(TestNetJson, self).setUp() +@@ -654,6 +658,10 @@ class TestNetJson(CiTestCase): + self.assertEqual(out_data, conv_data) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestConvertNetworkData(CiTestCase): + + with_logs = True +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index c67b5fcc..14d3462f 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -2908,6 +2908,10 @@ iface eth1 inet dhcp + self.assertEqual(0, mock_settle.call_count) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestRhelSysConfigRendering(CiTestCase): + + with_logs = True +@@ -3592,6 +3596,10 @@ USERCTL=no + expected, self._render_and_read(network_config=v2data)) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestOpenSuseSysConfigRendering(CiTestCase): + + with_logs = True +@@ -5009,6 +5017,10 @@ class TestNetRenderers(CiTestCase): + self.assertTrue(result) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestGetInterfaces(CiTestCase): + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], +@@ -5158,6 +5170,10 @@ class TestInterfaceHasOwnMac(CiTestCase): + self.assertFalse(interface_has_own_mac("eth0")) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestGetInterfacesByMac(CiTestCase): + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], +@@ -5314,6 +5330,10 @@ class TestInterfacesSorting(CiTestCase): + ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) + + ++@mock.patch( ++ "cloudinit.net.is_openvswitch_internal_interface", ++ mock.Mock(return_value=False) ++) + class TestGetIBHwaddrsByInterface(CiTestCase): + + _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' +-- +2.27.0 + diff --git a/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch b/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch new file mode 100644 index 0000000..0d474bc --- /dev/null +++ b/0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch @@ -0,0 +1,87 @@ +From bec5fb60ffae3d1137c7261e5571c2751c5dda25 Mon Sep 17 00:00:00 2001 +From: James Falcon +Date: Mon, 8 Mar 2021 14:09:47 -0600 +Subject: Fix requiring device-number on EC2 derivatives (#836) + +#342 (70dbccbb) introduced the ability to determine route-metrics based on +the `device-number` provided by the EC2 IMDS. Not all datasources that +subclass EC2 will have this attribute, so allow the old behavior if +`device-number` is not present. + +LP: #1917875 +--- + cloudinit/sources/DataSourceEc2.py | 3 +- + .../unittests/test_datasource/test_aliyun.py | 30 +++++++++++++++++++ + 2 files changed, 32 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 1930a509..a2105dc7 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -765,13 +765,14 @@ def convert_ec2_metadata_network_config( + netcfg['ethernets'][nic_name] = dev_config + return netcfg + # Apply network config for all nics and any secondary IPv4/v6 addresses ++ nic_idx = 0 + for mac, nic_name in sorted(macs_to_nics.items()): + nic_metadata = macs_metadata.get(mac) + if not nic_metadata: + continue # Not a physical nic represented in metadata + # device-number is zero-indexed, we want it 1-indexed for the + # multiplication on the following line +- nic_idx = int(nic_metadata['device-number']) + 1 ++ nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1 + dhcp_override = {'route-metric': nic_idx * 100} + dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, + 'dhcp6': False, +diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py +index eb2828d5..cab1ac2b 100644 +--- a/tests/unittests/test_datasource/test_aliyun.py ++++ b/tests/unittests/test_datasource/test_aliyun.py +@@ -7,6 +7,7 @@ from unittest import mock + + from cloudinit import helpers + from cloudinit.sources import DataSourceAliYun as ay ++from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config + from cloudinit.tests import helpers as test_helpers + + DEFAULT_METADATA = { +@@ -183,6 +184,35 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) + ++ def test_route_metric_calculated_without_device_number(self): ++ """Test that route-metric code works without `device-number` ++ ++ `device-number` is part of EC2 metadata, but not supported on aliyun. ++ Attempting to access it will raise a KeyError. ++ ++ LP: #1917875 ++ """ ++ netcfg = convert_ec2_metadata_network_config( ++ {"interfaces": {"macs": { ++ "06:17:04:d7:26:09": { ++ "interface-id": "eni-e44ef49e", ++ }, ++ "06:17:04:d7:26:08": { ++ "interface-id": "eni-e44ef49f", ++ } ++ }}}, ++ macs_to_nics={ ++ '06:17:04:d7:26:09': 'eth0', ++ '06:17:04:d7:26:08': 'eth1', ++ } ++ ) ++ ++ met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] ++ met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] ++ ++ # route-metric numbers should be 100 apart ++ assert 100 == abs(met0 - met1) ++ + + class TestIsAliYun(test_helpers.CiTestCase): + ALIYUN_PRODUCT = 'Alibaba Cloud ECS' +-- +2.27.0 + diff --git a/EMPTY b/EMPTY deleted file mode 100644 index 0519ecb..0000000 --- a/EMPTY +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/ci-Add-flexibility-to-IMDS-api-version-793.patch b/ci-Add-flexibility-to-IMDS-api-version-793.patch new file mode 100644 index 0000000..9dd373f --- /dev/null +++ b/ci-Add-flexibility-to-IMDS-api-version-793.patch @@ -0,0 +1,295 @@ +From 2a2a5cdec0de0b96d503f9357c1641043574f90a Mon Sep 17 00:00:00 2001 +From: Thomas Stringer +Date: Wed, 3 Mar 2021 11:07:43 -0500 +Subject: [PATCH 1/7] Add flexibility to IMDS api-version (#793) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [1/7] 9aa42581c4ff175fb6f8f4a78d94cac9c9971062 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +Add flexibility to IMDS api-version by having both a desired IMDS +api-version and a minimum api-version. The desired api-version will +be used first, and if that fails it will fall back to the minimum +api-version. +--- + cloudinit/sources/DataSourceAzure.py | 113 ++++++++++++++---- + tests/unittests/test_datasource/test_azure.py | 42 ++++++- + 2 files changed, 129 insertions(+), 26 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 553b5a7e..de1452ce 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent' + # In the event where the IMDS primary server is not + # available, it takes 1s to fallback to the secondary one + IMDS_TIMEOUT_IN_SECONDS = 2 +-IMDS_URL = "http://169.254.169.254/metadata/" +-IMDS_VER = "2019-06-01" +-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER) ++IMDS_URL = "http://169.254.169.254/metadata" ++IMDS_VER_MIN = "2019-06-01" ++IMDS_VER_WANT = "2020-09-01" + + + class metadata_type(Enum): +- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM) +- network = "{}instance/network?{}".format(IMDS_URL, +- IMDS_VER_PARAM) +- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL, +- IMDS_VER_PARAM) ++ compute = "{}/instance".format(IMDS_URL) ++ network = "{}/instance/network".format(IMDS_URL) ++ reprovisiondata = "{}/reprovisiondata".format(IMDS_URL) + + + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" +@@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource): + self.update_events['network'].add(EventType.BOOT) + self._ephemeral_dhcp_ctx = None + ++ self.failed_desired_api_version = False ++ + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) +@@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource): + self._wait_for_all_nics_ready() + ret = self._reprovision() + +- imds_md = get_metadata_from_imds( +- self.fallback_interface, retries=10) ++ imds_md = self.get_imds_data_with_api_fallback( ++ self.fallback_interface, ++ retries=10 ++ ) + (md, userdata_raw, cfg, files) = ret + self.seed = cdev + crawled_data.update({ +@@ -652,6 +654,57 @@ class DataSourceAzure(sources.DataSource): + self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) + return True + ++ @azure_ds_telemetry_reporter ++ def get_imds_data_with_api_fallback( ++ self, ++ fallback_nic, ++ retries, ++ md_type=metadata_type.compute): ++ """ ++ Wrapper for get_metadata_from_imds so that we can have flexibility ++ in which IMDS api-version we use. If a particular instance of IMDS ++ does not have the api version that is desired, we want to make ++ this fault tolerant and fall back to a good known minimum api ++ version. ++ """ ++ ++ if not self.failed_desired_api_version: ++ for _ in range(retries): ++ try: ++ LOG.info( ++ "Attempting IMDS api-version: %s", ++ IMDS_VER_WANT ++ ) ++ return get_metadata_from_imds( ++ fallback_nic=fallback_nic, ++ retries=0, ++ md_type=md_type, ++ api_version=IMDS_VER_WANT ++ ) ++ except UrlError as err: ++ LOG.info( ++ "UrlError with IMDS api-version: %s", ++ IMDS_VER_WANT ++ ) ++ if err.code == 400: ++ log_msg = "Fall back to IMDS api-version: {}".format( ++ IMDS_VER_MIN ++ ) ++ report_diagnostic_event( ++ log_msg, ++ logger_func=LOG.info ++ ) ++ self.failed_desired_api_version = True ++ break ++ ++ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN) ++ return get_metadata_from_imds( ++ fallback_nic=fallback_nic, ++ retries=retries, ++ md_type=md_type, ++ api_version=IMDS_VER_MIN ++ ) ++ + def device_name_to_device(self, name): + return self.ds_cfg['disk_aliases'].get(name) + +@@ -880,10 +933,11 @@ class DataSourceAzure(sources.DataSource): + # primary nic is being attached first helps here. Otherwise each nic + # could add several seconds of delay. + try: +- imds_md = get_metadata_from_imds( ++ imds_md = self.get_imds_data_with_api_fallback( + ifname, + 5, +- metadata_type.network) ++ metadata_type.network ++ ) + except Exception as e: + LOG.warning( + "Failed to get network metadata using nic %s. Attempt to " +@@ -1017,7 +1071,10 @@ class DataSourceAzure(sources.DataSource): + def _poll_imds(self): + """Poll IMDS for the new provisioning data until we get a valid + response. Then return the returned JSON object.""" +- url = metadata_type.reprovisiondata.value ++ url = "{}?api-version={}".format( ++ metadata_type.reprovisiondata.value, ++ IMDS_VER_MIN ++ ) + headers = {"Metadata": "true"} + nl_sock = None + report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) +@@ -2059,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict: + @azure_ds_telemetry_reporter + def get_metadata_from_imds(fallback_nic, + retries, +- md_type=metadata_type.compute): ++ md_type=metadata_type.compute, ++ api_version=IMDS_VER_MIN): + """Query Azure's instance metadata service, returning a dictionary. + + If network is not up, setup ephemeral dhcp on fallback_nic to talk to the +@@ -2069,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic, + @param fallback_nic: String. The name of the nic which requires active + network in order to query IMDS. + @param retries: The number of retries of the IMDS_URL. ++ @param md_type: Metadata type for IMDS request. ++ @param api_version: IMDS api-version to use in the request. + + @return: A dict of instance metadata containing compute and network + info. + """ + kwargs = {'logfunc': LOG.debug, + 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', +- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)} ++ 'func': _get_metadata_from_imds, ++ 'args': (retries, md_type, api_version,)} + if net.is_up(fallback_nic): + return util.log_time(**kwargs) + else: +@@ -2091,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic, + + + @azure_ds_telemetry_reporter +-def _get_metadata_from_imds(retries, md_type=metadata_type.compute): +- +- url = md_type.value ++def _get_metadata_from_imds( ++ retries, ++ md_type=metadata_type.compute, ++ api_version=IMDS_VER_MIN): ++ url = "{}?api-version={}".format(md_type.value, api_version) + headers = {"Metadata": "true"} + try: + response = readurl( + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) + except Exception as e: +- report_diagnostic_event( +- 'Ignoring IMDS instance metadata. ' +- 'Get metadata from IMDS failed: %s' % e, +- logger_func=LOG.warning) +- return {} ++ # pylint:disable=no-member ++ if isinstance(e, UrlError) and e.code == 400: ++ raise ++ else: ++ report_diagnostic_event( ++ 'Ignoring IMDS instance metadata. ' ++ 'Get metadata from IMDS failed: %s' % e, ++ logger_func=LOG.warning) ++ return {} + try: + from json.decoder import JSONDecodeError + json_decode_error = JSONDecodeError +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index f597c723..dedebeb1 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -408,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() +- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01" ++ self.network_md_url = "{}/instance?api-version=2019-06-01".format( ++ dsaz.IMDS_URL ++ ) + + @mock.patch(MOCKPATH + 'readurl') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) +@@ -518,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, +- dsaz.IMDS_URL + 'instance?api-version=2017-12-01', ++ dsaz.IMDS_URL + '/instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp +@@ -1877,6 +1879,40 @@ scbus-1 on xpt0 bus 0 + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ['key2']) + ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_imds_api_version_wanted_nonexistent( ++ self, ++ m_get_metadata_from_imds): ++ def get_metadata_from_imds_side_eff(*args, **kwargs): ++ if kwargs['api_version'] == dsaz.IMDS_VER_WANT: ++ raise url_helper.UrlError("No IMDS version", code=400) ++ return NETWORK_METADATA ++ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ self.assertIsNotNone(dsrc.metadata) ++ self.assertTrue(dsrc.failed_desired_api_version) ++ ++ @mock.patch( ++ MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) ++ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ self.assertIsNotNone(dsrc.metadata) ++ self.assertFalse(dsrc.failed_desired_api_version) ++ + + class TestAzureBounce(CiTestCase): + +@@ -2657,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase): + @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') + @mock.patch('cloudinit.sources.net.find_fallback_nic') +- @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch('os.path.isfile') +-- +2.27.0 + diff --git a/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch b/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch new file mode 100644 index 0000000..de27366 --- /dev/null +++ b/ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch @@ -0,0 +1,397 @@ +From 3ec4ddbc595c5fe781b3dc501631d23569849818 Mon Sep 17 00:00:00 2001 +From: Thomas Stringer +Date: Mon, 26 Apr 2021 09:41:38 -0400 +Subject: [PATCH 5/7] Azure: Retrieve username and hostname from IMDS (#865) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [5/7] 6fab7ef28c7fd340bda4f82dbf828f10716cb3f1 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +This change allows us to retrieve the username and hostname from +IMDS instead of having to rely on the mounted OVF. +--- + cloudinit/sources/DataSourceAzure.py | 149 ++++++++++++++---- + tests/unittests/test_datasource/test_azure.py | 87 +++++++++- + 2 files changed, 205 insertions(+), 31 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 39e67c4f..6d7954ee 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -5,6 +5,7 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + import base64 ++from collections import namedtuple + import contextlib + import crypt + from functools import partial +@@ -25,6 +26,7 @@ from cloudinit.net import device_driver + from cloudinit.net.dhcp import EphemeralDHCPv4 + from cloudinit import sources + from cloudinit.sources.helpers import netlink ++from cloudinit import ssh_util + from cloudinit import subp + from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc + from cloudinit import util +@@ -80,7 +82,12 @@ AGENT_SEED_DIR = '/var/lib/waagent' + IMDS_TIMEOUT_IN_SECONDS = 2 + IMDS_URL = "http://169.254.169.254/metadata" + IMDS_VER_MIN = "2019-06-01" +-IMDS_VER_WANT = "2020-09-01" ++IMDS_VER_WANT = "2020-10-01" ++ ++ ++# This holds SSH key data including if the source was ++# from IMDS, as well as the SSH key data itself. ++SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys")) + + + class metadata_type(Enum): +@@ -391,6 +398,8 @@ class DataSourceAzure(sources.DataSource): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): + subplatform_type = 'config-disk' ++ elif self.seed.lower() == 'imds': ++ subplatform_type = 'imds' + else: + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) +@@ -433,9 +442,11 @@ class DataSourceAzure(sources.DataSource): + + found = None + reprovision = False ++ ovf_is_accessible = True + reprovision_after_nic_attach = False + for cdev in candidates: + try: ++ LOG.debug("cdev: %s", cdev) + if cdev == "IMDS": + ret = None + reprovision = True +@@ -462,8 +473,18 @@ class DataSourceAzure(sources.DataSource): + raise sources.InvalidMetaDataException(msg) + except util.MountFailedError: + report_diagnostic_event( +- '%s was not mountable' % cdev, logger_func=LOG.warning) +- continue ++ '%s was not mountable' % cdev, logger_func=LOG.debug) ++ cdev = 'IMDS' ++ ovf_is_accessible = False ++ empty_md = {'local-hostname': ''} ++ empty_cfg = dict( ++ system_info=dict( ++ default_user=dict( ++ name='' ++ ) ++ ) ++ ) ++ ret = (empty_md, '', empty_cfg, {}) + + report_diagnostic_event("Found provisioning metadata in %s" % cdev, + logger_func=LOG.debug) +@@ -490,6 +511,10 @@ class DataSourceAzure(sources.DataSource): + self.fallback_interface, + retries=10 + ) ++ if not imds_md and not ovf_is_accessible: ++ msg = 'No OVF or IMDS available' ++ report_diagnostic_event(msg) ++ raise sources.InvalidMetaDataException(msg) + (md, userdata_raw, cfg, files) = ret + self.seed = cdev + crawled_data.update({ +@@ -498,6 +523,21 @@ class DataSourceAzure(sources.DataSource): + 'metadata': util.mergemanydict( + [md, {'imds': imds_md}]), + 'userdata_raw': userdata_raw}) ++ imds_username = _username_from_imds(imds_md) ++ imds_hostname = _hostname_from_imds(imds_md) ++ imds_disable_password = _disable_password_from_imds(imds_md) ++ if imds_username: ++ LOG.debug('Username retrieved from IMDS: %s', imds_username) ++ cfg['system_info']['default_user']['name'] = imds_username ++ if imds_hostname: ++ LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname) ++ crawled_data['metadata']['local-hostname'] = imds_hostname ++ if imds_disable_password: ++ LOG.debug( ++ 'Disable password retrieved from IMDS: %s', ++ imds_disable_password ++ ) ++ crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 + found = cdev + + report_diagnostic_event( +@@ -676,6 +716,13 @@ class DataSourceAzure(sources.DataSource): + + @azure_ds_telemetry_reporter + def get_public_ssh_keys(self): ++ """ ++ Retrieve public SSH keys. ++ """ ++ ++ return self._get_public_ssh_keys_and_source().ssh_keys ++ ++ def _get_public_ssh_keys_and_source(self): + """ + Try to get the ssh keys from IMDS first, and if that fails + (i.e. IMDS is unavailable) then fallback to getting the ssh +@@ -685,30 +732,50 @@ class DataSourceAzure(sources.DataSource): + advantage, so this is a strong preference. But we must keep + OVF as a second option for environments that don't have IMDS. + """ ++ + LOG.debug('Retrieving public SSH keys') + ssh_keys = [] ++ keys_from_imds = True ++ LOG.debug('Attempting to get SSH keys from IMDS') + try: +- raise KeyError( +- "Not using public SSH keys from IMDS" +- ) +- # pylint:disable=unreachable + ssh_keys = [ + public_key['keyData'] + for public_key + in self.metadata['imds']['compute']['publicKeys'] + ] +- LOG.debug('Retrieved SSH keys from IMDS') ++ for key in ssh_keys: ++ if not _key_is_openssh_formatted(key=key): ++ keys_from_imds = False ++ break ++ ++ if not keys_from_imds: ++ log_msg = 'Keys not in OpenSSH format, using OVF' ++ else: ++ log_msg = 'Retrieved {} keys from IMDS'.format( ++ len(ssh_keys) ++ if ssh_keys is not None ++ else 0 ++ ) + except KeyError: + log_msg = 'Unable to get keys from IMDS, falling back to OVF' ++ keys_from_imds = False ++ finally: + report_diagnostic_event(log_msg, logger_func=LOG.debug) ++ ++ if not keys_from_imds: ++ LOG.debug('Attempting to get SSH keys from OVF') + try: + ssh_keys = self.metadata['public-keys'] +- LOG.debug('Retrieved keys from OVF') ++ log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys)) + except KeyError: + log_msg = 'No keys available from OVF' ++ finally: + report_diagnostic_event(log_msg, logger_func=LOG.debug) + +- return ssh_keys ++ return SSHKeys( ++ keys_from_imds=keys_from_imds, ++ ssh_keys=ssh_keys ++ ) + + def get_config_obj(self): + return self.cfg +@@ -1325,30 +1392,21 @@ class DataSourceAzure(sources.DataSource): + self.bounce_network_with_azure_hostname() + + pubkey_info = None +- try: +- raise KeyError( +- "Not using public SSH keys from IMDS" +- ) +- # pylint:disable=unreachable +- public_keys = self.metadata['imds']['compute']['publicKeys'] +- LOG.debug( +- 'Successfully retrieved %s key(s) from IMDS', +- len(public_keys) +- if public_keys is not None ++ ssh_keys_and_source = self._get_public_ssh_keys_and_source() ++ ++ if not ssh_keys_and_source.keys_from_imds: ++ pubkey_info = self.cfg.get('_pubkeys', None) ++ log_msg = 'Retrieved {} fingerprints from OVF'.format( ++ len(pubkey_info) ++ if pubkey_info is not None + else 0 + ) +- except KeyError: +- LOG.debug( +- 'Unable to retrieve SSH keys from IMDS during ' +- 'negotiation, falling back to OVF' +- ) +- pubkey_info = self.cfg.get('_pubkeys', None) ++ report_diagnostic_event(log_msg, logger_func=LOG.debug) + + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. + dhclient_lease_file, +- pubkey_info=pubkey_info, +- iso_dev=self.iso_dev) ++ pubkey_info=pubkey_info) + + LOG.debug("negotiating with fabric via agent command %s", + self.ds_cfg['agent_command']) +@@ -1404,6 +1462,41 @@ class DataSourceAzure(sources.DataSource): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + + ++def _username_from_imds(imds_data): ++ try: ++ return imds_data['compute']['osProfile']['adminUsername'] ++ except KeyError: ++ return None ++ ++ ++def _hostname_from_imds(imds_data): ++ try: ++ return imds_data['compute']['osProfile']['computerName'] ++ except KeyError: ++ return None ++ ++ ++def _disable_password_from_imds(imds_data): ++ try: ++ return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501 ++ except KeyError: ++ return None ++ ++ ++def _key_is_openssh_formatted(key): ++ """ ++ Validate whether or not the key is OpenSSH-formatted. ++ """ ++ ++ parser = ssh_util.AuthKeyLineParser() ++ try: ++ akl = parser.parse(key) ++ except TypeError: ++ return False ++ ++ return akl.keytype is not None ++ ++ + def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index 320fa857..d9817d84 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -108,7 +108,7 @@ NETWORK_METADATA = { + "zone": "", + "publicKeys": [ + { +- "keyData": "key1", ++ "keyData": "ssh-rsa key1", + "path": "path1" + } + ] +@@ -1761,8 +1761,29 @@ scbus-1 on xpt0 bus 0 + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() +- # Temporarily alter this test so that SSH public keys +- # from IMDS are *not* going to be in use to fix a regression. ++ self.assertEqual(ssh_keys, ["ssh-rsa key1"]) ++ self.assertEqual(m_parse_certificates.call_count, 0) ++ ++ @mock.patch( ++ 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_get_public_ssh_keys_with_no_openssh_format( ++ self, ++ m_get_metadata_from_imds, ++ m_parse_certificates): ++ imds_data = copy.deepcopy(NETWORK_METADATA) ++ imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' ++ m_get_metadata_from_imds.return_value = imds_data ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ dsrc.setup(True) ++ ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, []) + self.assertEqual(m_parse_certificates.call_count, 0) + +@@ -1818,6 +1839,66 @@ scbus-1 on xpt0 bus 0 + self.assertIsNotNone(dsrc.metadata) + self.assertFalse(dsrc.failed_desired_api_version) + ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_hostname_from_imds(self, m_get_metadata_from_imds): ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) ++ imds_data_with_os_profile["compute"]["osProfile"] = dict( ++ adminUsername="username1", ++ computerName="hostname1", ++ disablePasswordAuthentication="true" ++ ) ++ m_get_metadata_from_imds.return_value = imds_data_with_os_profile ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") ++ ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_username_from_imds(self, m_get_metadata_from_imds): ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) ++ imds_data_with_os_profile["compute"]["osProfile"] = dict( ++ adminUsername="username1", ++ computerName="hostname1", ++ disablePasswordAuthentication="true" ++ ) ++ m_get_metadata_from_imds.return_value = imds_data_with_os_profile ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ self.assertEqual( ++ dsrc.cfg["system_info"]["default_user"]["name"], ++ "username1" ++ ) ++ ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_disable_password_from_imds(self, m_get_metadata_from_imds): ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) ++ imds_data_with_os_profile["compute"]["osProfile"] = dict( ++ adminUsername="username1", ++ computerName="hostname1", ++ disablePasswordAuthentication="true" ++ ) ++ m_get_metadata_from_imds.return_value = imds_data_with_os_profile ++ dsrc = self._get_ds(data) ++ dsrc.get_data() ++ self.assertTrue(dsrc.metadata["disable_password"]) ++ + + class TestAzureBounce(CiTestCase): + +-- +2.27.0 + diff --git a/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch b/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch new file mode 100644 index 0000000..efc9fc2 --- /dev/null +++ b/ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch @@ -0,0 +1,315 @@ +From ca5b83cee7b45bf56eec258db739cb5fe51b3231 Mon Sep 17 00:00:00 2001 +From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> +Date: Mon, 26 Apr 2021 07:28:39 -0700 +Subject: [PATCH 6/7] Azure: Retry net metadata during nic attach for + non-timeout errs (#878) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [6/7] 4e6e44f017d5ffcb72ac8959a94f80c71fef9560 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +When network interfaces are hot-attached to the VM, attempting to get +network metadata might return 410 (or 500, 503 etc) because the info +is not yet available. In those cases, we retry getting the metadata +before giving up. The only case where we can move on to wait for more +nic attach events is if the call times out despite retries, which +means the interface is not likely a primary interface, and we should +try for more nic attach events. +--- + cloudinit/sources/DataSourceAzure.py | 65 +++++++++++-- + tests/unittests/test_datasource/test_azure.py | 95 ++++++++++++++++--- + 2 files changed, 140 insertions(+), 20 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 6d7954ee..d0be6d84 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -17,6 +17,7 @@ from time import sleep + from xml.dom import minidom + import xml.etree.ElementTree as ET + from enum import Enum ++import requests + + from cloudinit import dmi + from cloudinit import log as logging +@@ -665,7 +666,9 @@ class DataSourceAzure(sources.DataSource): + self, + fallback_nic, + retries, +- md_type=metadata_type.compute): ++ md_type=metadata_type.compute, ++ exc_cb=retry_on_url_exc, ++ infinite=False): + """ + Wrapper for get_metadata_from_imds so that we can have flexibility + in which IMDS api-version we use. If a particular instance of IMDS +@@ -685,7 +688,8 @@ class DataSourceAzure(sources.DataSource): + fallback_nic=fallback_nic, + retries=0, + md_type=md_type, +- api_version=IMDS_VER_WANT ++ api_version=IMDS_VER_WANT, ++ exc_cb=exc_cb + ) + except UrlError as err: + LOG.info( +@@ -708,7 +712,9 @@ class DataSourceAzure(sources.DataSource): + fallback_nic=fallback_nic, + retries=retries, + md_type=md_type, +- api_version=IMDS_VER_MIN ++ api_version=IMDS_VER_MIN, ++ exc_cb=exc_cb, ++ infinite=infinite + ) + + def device_name_to_device(self, name): +@@ -938,6 +944,9 @@ class DataSourceAzure(sources.DataSource): + is_primary = False + expected_nic_count = -1 + imds_md = None ++ metadata_poll_count = 0 ++ metadata_logging_threshold = 1 ++ metadata_timeout_count = 0 + + # For now, only a VM's primary NIC can contact IMDS and WireServer. If + # DHCP fails for a NIC, we have no mechanism to determine if the NIC is +@@ -962,14 +971,48 @@ class DataSourceAzure(sources.DataSource): + % (ifname, e), logger_func=LOG.error) + raise + ++ # Retry polling network metadata for a limited duration only when the ++ # calls fail due to timeout. This is because the platform drops packets ++ # going towards IMDS when it is not a primary nic. If the calls fail ++ # due to other issues like 410, 503 etc, then it means we are primary ++ # but IMDS service is unavailable at the moment. Retry indefinitely in ++ # those cases since we cannot move on without the network metadata. ++ def network_metadata_exc_cb(msg, exc): ++ nonlocal metadata_timeout_count, metadata_poll_count ++ nonlocal metadata_logging_threshold ++ ++ metadata_poll_count = metadata_poll_count + 1 ++ ++ # Log when needed but back off exponentially to avoid exploding ++ # the log file. ++ if metadata_poll_count >= metadata_logging_threshold: ++ metadata_logging_threshold *= 2 ++ report_diagnostic_event( ++ "Ran into exception when attempting to reach %s " ++ "after %d polls." % (msg, metadata_poll_count), ++ logger_func=LOG.error) ++ ++ if isinstance(exc, UrlError): ++ report_diagnostic_event("poll IMDS with %s failed. " ++ "Exception: %s and code: %s" % ++ (msg, exc.cause, exc.code), ++ logger_func=LOG.error) ++ ++ if exc.cause and isinstance(exc.cause, requests.Timeout): ++ metadata_timeout_count = metadata_timeout_count + 1 ++ return (metadata_timeout_count <= 10) ++ return True ++ + # Primary nic detection will be optimized in the future. The fact that + # primary nic is being attached first helps here. Otherwise each nic + # could add several seconds of delay. + try: + imds_md = self.get_imds_data_with_api_fallback( + ifname, +- 5, +- metadata_type.network ++ 0, ++ metadata_type.network, ++ network_metadata_exc_cb, ++ True + ) + except Exception as e: + LOG.warning( +@@ -2139,7 +2182,9 @@ def _generate_network_config_from_fallback_config() -> dict: + def get_metadata_from_imds(fallback_nic, + retries, + md_type=metadata_type.compute, +- api_version=IMDS_VER_MIN): ++ api_version=IMDS_VER_MIN, ++ exc_cb=retry_on_url_exc, ++ infinite=False): + """Query Azure's instance metadata service, returning a dictionary. + + If network is not up, setup ephemeral dhcp on fallback_nic to talk to the +@@ -2158,7 +2203,7 @@ def get_metadata_from_imds(fallback_nic, + kwargs = {'logfunc': LOG.debug, + 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', + 'func': _get_metadata_from_imds, +- 'args': (retries, md_type, api_version,)} ++ 'args': (retries, exc_cb, md_type, api_version, infinite)} + if net.is_up(fallback_nic): + return util.log_time(**kwargs) + else: +@@ -2176,14 +2221,16 @@ def get_metadata_from_imds(fallback_nic, + @azure_ds_telemetry_reporter + def _get_metadata_from_imds( + retries, ++ exc_cb, + md_type=metadata_type.compute, +- api_version=IMDS_VER_MIN): ++ api_version=IMDS_VER_MIN, ++ infinite=False): + url = "{}?api-version={}".format(md_type.value, api_version) + headers = {"Metadata": "true"} + try: + response = readurl( + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, +- retries=retries, exception_cb=retry_on_url_exc) ++ retries=retries, exception_cb=exc_cb, infinite=infinite) + except Exception as e: + # pylint:disable=no-member + if isinstance(e, UrlError) and e.code == 400: +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index d9817d84..c4a8e08d 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, +- timeout=mock.ANY) ++ timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') +@@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + "http://169.254.169.254/metadata/instance/network?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, +- timeout=mock.ANY) ++ timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') +@@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, +- timeout=mock.ANY) ++ timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) +@@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, +- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) ++ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) +@@ -2694,15 +2694,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase): + + def nic_attach_ret(nl_sock, nics_found): + nonlocal m_attach_call_count +- if m_attach_call_count == 0: +- m_attach_call_count = m_attach_call_count + 1 ++ m_attach_call_count = m_attach_call_count + 1 ++ if m_attach_call_count == 1: + return "eth0" +- return "eth1" ++ elif m_attach_call_count == 2: ++ return "eth1" ++ raise RuntimeError("Must have found primary nic by now.") ++ ++ # Simulate two NICs by adding the same one twice. ++ md = { ++ "interface": [ ++ IMDS_NETWORK_METADATA['interface'][0], ++ IMDS_NETWORK_METADATA['interface'][0] ++ ] ++ } + +- def network_metadata_ret(ifname, retries, type): +- # Simulate two NICs by adding the same one twice. +- md = IMDS_NETWORK_METADATA +- md['interface'].append(md['interface'][0]) ++ def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + if ifname == "eth0": + return md + raise requests.Timeout('Fake connection timeout') +@@ -2724,6 +2731,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase): + self.assertEqual(1, m_imds.call_count) + self.assertEqual(2, m_link_up.call_count) + ++ @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') ++ @mock.patch(MOCKPATH + 'EphemeralDHCPv4') ++ def test_check_if_nic_is_primary_retries_on_failures( ++ self, m_dhcpv4, m_imds): ++ """Retry polling for network metadata on all failures except timeout""" ++ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) ++ lease = { ++ 'interface': 'eth9', 'fixed-address': '192.168.2.9', ++ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', ++ 'unknown-245': '624c3620'} ++ ++ eth0Retries = [] ++ eth1Retries = [] ++ # Simulate two NICs by adding the same one twice. ++ md = { ++ "interface": [ ++ IMDS_NETWORK_METADATA['interface'][0], ++ IMDS_NETWORK_METADATA['interface'][0] ++ ] ++ } ++ ++ def network_metadata_ret(ifname, retries, type, exc_cb, infinite): ++ nonlocal eth0Retries, eth1Retries ++ ++ # Simulate readurl functionality with retries and ++ # exception callbacks so that the callback logic can be ++ # validated. ++ if ifname == "eth0": ++ cause = requests.HTTPError() ++ for _ in range(0, 15): ++ error = url_helper.UrlError(cause=cause, code=410) ++ eth0Retries.append(exc_cb("No goal state.", error)) ++ else: ++ cause = requests.Timeout('Fake connection timeout') ++ for _ in range(0, 10): ++ error = url_helper.UrlError(cause=cause) ++ eth1Retries.append(exc_cb("Connection timeout", error)) ++ # Should stop retrying after 10 retries ++ eth1Retries.append(exc_cb("Connection timeout", error)) ++ raise cause ++ return md ++ ++ m_imds.side_effect = network_metadata_ret ++ ++ dhcp_ctx = mock.MagicMock(lease=lease) ++ dhcp_ctx.obtain_lease.return_value = lease ++ m_dhcpv4.return_value = dhcp_ctx ++ ++ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") ++ self.assertEqual(True, is_primary) ++ self.assertEqual(2, expected_nic_count) ++ ++ # All Eth0 errors are non-timeout errors. So we should have been ++ # retrying indefinitely until success. ++ for i in eth0Retries: ++ self.assertTrue(i) ++ ++ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") ++ self.assertEqual(False, is_primary) ++ ++ # All Eth1 errors are timeout errors. Retry happens for a max of 10 and ++ # then we should have moved on assuming it is not the primary nic. ++ for i in range(0, 10): ++ self.assertTrue(eth1Retries[i]) ++ self.assertFalse(eth1Retries[10]) ++ + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_returns_if_already_up( + self, m_is_link_up): +-- +2.27.0 + diff --git a/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch b/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch new file mode 100644 index 0000000..d4e7e37 --- /dev/null +++ b/ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch @@ -0,0 +1,129 @@ +From c0df7233fa99d4191b5d4142e209e7465d8db5f6 Mon Sep 17 00:00:00 2001 +From: Anh Vo +Date: Tue, 27 Apr 2021 13:40:59 -0400 +Subject: [PATCH 7/7] Azure: adding support for consuming userdata from IMDS + (#884) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [7/7] 32f840412da1a0f49b9ab5ba1d6f1bcb1bfacc16 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy +--- + cloudinit/sources/DataSourceAzure.py | 23 ++++++++- + tests/unittests/test_datasource/test_azure.py | 50 +++++++++++++++++++ + 2 files changed, 72 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index d0be6d84..a66f023d 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -83,7 +83,7 @@ AGENT_SEED_DIR = '/var/lib/waagent' + IMDS_TIMEOUT_IN_SECONDS = 2 + IMDS_URL = "http://169.254.169.254/metadata" + IMDS_VER_MIN = "2019-06-01" +-IMDS_VER_WANT = "2020-10-01" ++IMDS_VER_WANT = "2021-01-01" + + + # This holds SSH key data including if the source was +@@ -539,6 +539,20 @@ class DataSourceAzure(sources.DataSource): + imds_disable_password + ) + crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 ++ ++ # only use userdata from imds if OVF did not provide custom data ++ # userdata provided by IMDS is always base64 encoded ++ if not userdata_raw: ++ imds_userdata = _userdata_from_imds(imds_md) ++ if imds_userdata: ++ LOG.debug("Retrieved userdata from IMDS") ++ try: ++ crawled_data['userdata_raw'] = base64.b64decode( ++ ''.join(imds_userdata.split())) ++ except Exception: ++ report_diagnostic_event( ++ "Bad userdata in IMDS", ++ logger_func=LOG.warning) + found = cdev + + report_diagnostic_event( +@@ -1512,6 +1526,13 @@ def _username_from_imds(imds_data): + return None + + ++def _userdata_from_imds(imds_data): ++ try: ++ return imds_data['compute']['userData'] ++ except KeyError: ++ return None ++ ++ + def _hostname_from_imds(imds_data): + try: + return imds_data['compute']['osProfile']['computerName'] +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index c4a8e08d..f8433690 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -1899,6 +1899,56 @@ scbus-1 on xpt0 bus 0 + dsrc.get_data() + self.assertTrue(dsrc.metadata["disable_password"]) + ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_userdata_from_imds(self, m_get_metadata_from_imds): ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ odata = {'HostName': "myhost", 'UserName': "myuser"} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ userdata = "userdataImds" ++ imds_data = copy.deepcopy(NETWORK_METADATA) ++ imds_data["compute"]["osProfile"] = dict( ++ adminUsername="username1", ++ computerName="hostname1", ++ disablePasswordAuthentication="true", ++ ) ++ imds_data["compute"]["userData"] = b64e(userdata) ++ m_get_metadata_from_imds.return_value = imds_data ++ dsrc = self._get_ds(data) ++ ret = dsrc.get_data() ++ self.assertTrue(ret) ++ self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) ++ ++ @mock.patch(MOCKPATH + 'get_metadata_from_imds') ++ def test_userdata_from_imds_with_customdata_from_OVF( ++ self, m_get_metadata_from_imds): ++ userdataOVF = "userdataOVF" ++ odata = { ++ 'HostName': "myhost", 'UserName': "myuser", ++ 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} ++ } ++ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} ++ data = { ++ 'ovfcontent': construct_valid_ovf_env(data=odata), ++ 'sys_cfg': sys_cfg ++ } ++ ++ userdataImds = "userdataImds" ++ imds_data = copy.deepcopy(NETWORK_METADATA) ++ imds_data["compute"]["osProfile"] = dict( ++ adminUsername="username1", ++ computerName="hostname1", ++ disablePasswordAuthentication="true", ++ ) ++ imds_data["compute"]["userData"] = b64e(userdataImds) ++ m_get_metadata_from_imds.return_value = imds_data ++ dsrc = self._get_ds(data) ++ ret = dsrc.get_data() ++ self.assertTrue(ret) ++ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) ++ + + class TestAzureBounce(CiTestCase): + +-- +2.27.0 + diff --git a/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch b/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch new file mode 100644 index 0000000..6f6c109 --- /dev/null +++ b/ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch @@ -0,0 +1,177 @@ +From 01489fb91f64f6137ddf88c39feabe4296f3a156 Mon Sep 17 00:00:00 2001 +From: Anh Vo +Date: Fri, 23 Apr 2021 10:18:05 -0400 +Subject: [PATCH 4/7] Azure: eject the provisioning iso before reporting ready + (#861) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [4/7] ba830546a62ac5bea33b91d133d364a897b9f6c0 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +Due to hyper-v implementations, iso ejection is more efficient if performed +from within the guest. The code will attempt to perform a best-effort ejection. +Failure during ejection will not prevent reporting ready from happening. If iso +ejection is successful, later iso ejection from the platform will be a no-op. +In the event the iso ejection from the guest fails, iso ejection will still happen at +the platform level. +--- + cloudinit/sources/DataSourceAzure.py | 22 +++++++++++++++--- + cloudinit/sources/helpers/azure.py | 23 ++++++++++++++++--- + .../test_datasource/test_azure_helper.py | 13 +++++++++-- + 3 files changed, 50 insertions(+), 8 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index 020b7006..39e67c4f 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -332,6 +332,7 @@ class DataSourceAzure(sources.DataSource): + dsname = 'Azure' + _negotiated = False + _metadata_imds = sources.UNSET ++ _ci_pkl_version = 1 + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) +@@ -346,8 +347,13 @@ class DataSourceAzure(sources.DataSource): + # Regenerate network config new_instance boot and every boot + self.update_events['network'].add(EventType.BOOT) + self._ephemeral_dhcp_ctx = None +- + self.failed_desired_api_version = False ++ self.iso_dev = None ++ ++ def _unpickle(self, ci_pkl_version: int) -> None: ++ super()._unpickle(ci_pkl_version) ++ if "iso_dev" not in self.__dict__: ++ self.iso_dev = None + + def __str__(self): + root = sources.DataSource.__str__(self) +@@ -459,6 +465,13 @@ class DataSourceAzure(sources.DataSource): + '%s was not mountable' % cdev, logger_func=LOG.warning) + continue + ++ report_diagnostic_event("Found provisioning metadata in %s" % cdev, ++ logger_func=LOG.debug) ++ ++ # save the iso device for ejection before reporting ready ++ if cdev.startswith("/dev"): ++ self.iso_dev = cdev ++ + perform_reprovision = reprovision or self._should_reprovision(ret) + perform_reprovision_after_nic_attach = ( + reprovision_after_nic_attach or +@@ -1226,7 +1239,9 @@ class DataSourceAzure(sources.DataSource): + @return: The success status of sending the ready signal. + """ + try: +- get_metadata_from_fabric(None, lease['unknown-245']) ++ get_metadata_from_fabric(fallback_lease_file=None, ++ dhcp_opts=lease['unknown-245'], ++ iso_dev=self.iso_dev) + return True + except Exception as e: + report_diagnostic_event( +@@ -1332,7 +1347,8 @@ class DataSourceAzure(sources.DataSource): + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. + dhclient_lease_file, +- pubkey_info=pubkey_info) ++ pubkey_info=pubkey_info, ++ iso_dev=self.iso_dev) + + LOG.debug("negotiating with fabric via agent command %s", + self.ds_cfg['agent_command']) +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +index 03e7156b..ad476076 100755 +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -865,7 +865,19 @@ class WALinuxAgentShim: + return endpoint_ip_address + + @azure_ds_telemetry_reporter +- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict: ++ def eject_iso(self, iso_dev) -> None: ++ try: ++ LOG.debug("Ejecting the provisioning iso") ++ subp.subp(['eject', iso_dev]) ++ except Exception as e: ++ report_diagnostic_event( ++ "Failed ejecting the provisioning iso: %s" % e, ++ logger_func=LOG.debug) ++ ++ @azure_ds_telemetry_reporter ++ def register_with_azure_and_fetch_data(self, ++ pubkey_info=None, ++ iso_dev=None) -> dict: + """Gets the VM's GoalState from Azure, uses the GoalState information + to report ready/send the ready signal/provisioning complete signal to + Azure, and then uses pubkey_info to filter and obtain the user's +@@ -891,6 +903,10 @@ class WALinuxAgentShim: + ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) + health_reporter = GoalStateHealthReporter( + goal_state, self.azure_endpoint_client, self.endpoint) ++ ++ if iso_dev is not None: ++ self.eject_iso(iso_dev) ++ + health_reporter.send_ready_signal() + return {'public-keys': ssh_keys} + +@@ -1046,11 +1062,12 @@ class WALinuxAgentShim: + + @azure_ds_telemetry_reporter + def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, +- pubkey_info=None): ++ pubkey_info=None, iso_dev=None): + shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, + dhcp_options=dhcp_opts) + try: +- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) ++ return shim.register_with_azure_and_fetch_data( ++ pubkey_info=pubkey_info, iso_dev=iso_dev) + finally: + shim.clean_up() + +diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py +index 63482c6c..552c7905 100644 +--- a/tests/unittests/test_datasource/test_azure_helper.py ++++ b/tests/unittests/test_datasource/test_azure_helper.py +@@ -1009,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase): + self.GoalState.return_value.container_id = self.test_container_id + self.GoalState.return_value.instance_id = self.test_instance_id + ++ def test_eject_iso_is_called(self): ++ shim = wa_shim() ++ with mock.patch.object( ++ shim, 'eject_iso', autospec=True ++ ) as m_eject_iso: ++ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") ++ m_eject_iso.assert_called_once_with("/dev/sr0") ++ + def test_http_client_does_not_use_certificate_for_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() +@@ -1283,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): + + def test_calls_shim_register_with_azure_and_fetch_data(self): + m_pubkey_info = mock.MagicMock() +- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info) ++ azure_helper.get_metadata_from_fabric( ++ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") + self.assertEqual( + 1, + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_count) + self.assertEqual( +- mock.call(pubkey_info=m_pubkey_info), ++ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_args) + +-- +2.27.0 + diff --git a/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch b/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch new file mode 100644 index 0000000..627fd2b --- /dev/null +++ b/ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch @@ -0,0 +1,90 @@ +From f11bbe7f04a48eebcb446e283820d7592f76cf86 Mon Sep 17 00:00:00 2001 +From: Johnson Shi +Date: Thu, 25 Mar 2021 07:20:10 -0700 +Subject: [PATCH 2/7] Azure helper: Ensure Azure http handler sleeps between + retries (#842) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [2/7] e8f8bb658b629a8444bd2ba19f109952acf33311 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +Ensure that the Azure helper's http handler sleeps a fixed duration +between retry failure attempts. The http handler will sleep a fixed +duration between failed attempts regardless of whether the attempt +failed due to (1) request timing out or (2) instant failure (no +timeout). + +Due to certain platform issues, the http request to the Azure endpoint +may instantly fail without reaching the http timeout duration. Without +sleeping a fixed duration in between retry attempts, the http handler +will loop through the max retry attempts quickly. This causes the +communication between cloud-init and the Azure platform to be less +resilient due to the short total duration if there is no sleep in +between retries. +--- + cloudinit/sources/helpers/azure.py | 2 ++ + tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++-- + 2 files changed, 11 insertions(+), 2 deletions(-) + +diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py +index d3055d08..03e7156b 100755 +--- a/cloudinit/sources/helpers/azure.py ++++ b/cloudinit/sources/helpers/azure.py +@@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str: + + max_readurl_attempts = 240 + default_readurl_timeout = 5 ++ sleep_duration_between_retries = 5 + periodic_logging_attempts = 12 + + if 'timeout' not in kwargs: +@@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str: + 'attempt %d with exception: %s' % + (url, attempt, e), + logger_func=LOG.debug) ++ time.sleep(sleep_duration_between_retries) + + raise exc + +diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py +index b8899807..63482c6c 100644 +--- a/tests/unittests/test_datasource/test_azure_helper.py ++++ b/tests/unittests/test_datasource/test_azure_helper.py +@@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase): + + max_readurl_attempts = 240 + default_readurl_timeout = 5 ++ sleep_duration_between_retries = 5 + periodic_logging_attempts = 12 + + def setUp(self): +@@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase): + self.m_readurl = patches.enter_context( + mock.patch.object( + azure_helper.url_helper, 'readurl', mock.MagicMock())) +- patches.enter_context( +- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) ++ self.m_sleep = patches.enter_context( ++ mock.patch.object(azure_helper.time, 'sleep', autospec=True)) + + def test_http_with_retries(self): + self.m_readurl.return_value = 'TestResp' +@@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase): + self.m_readurl.call_count, + self.periodic_logging_attempts + 1) + ++ # Ensure that cloud-init did sleep between each failed request ++ self.assertEqual( ++ self.m_sleep.call_count, ++ self.periodic_logging_attempts) ++ self.m_sleep.assert_called_with(self.sleep_duration_between_retries) ++ + def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): + self.m_readurl.side_effect = \ + [SentinelException] * self.periodic_logging_attempts + \ +-- +2.27.0 + diff --git a/ci-Change-netifaces-dependency-to-0.10.4-965.patch b/ci-Change-netifaces-dependency-to-0.10.4-965.patch new file mode 100644 index 0000000..32fe4ac --- /dev/null +++ b/ci-Change-netifaces-dependency-to-0.10.4-965.patch @@ -0,0 +1,47 @@ +From c3d41dc6b18df0d74f569b1a0ba43c8118437948 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 14 Jan 2022 16:40:24 +0100 +Subject: [PATCH 3/6] Change netifaces dependency to 0.10.4 (#965) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 44: Datasource for VMware +RH-Commit: [3/6] d25d68427ab8b86ee1521c66483e9300e8fcc735 +RH-Bugzilla: 2026587 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +commit b9d308b4d61d22bacc05bcae59819755975631f8 +Author: Andrew Kutz <101085+akutz@users.noreply.github.com> +Date: Tue Aug 10 15:10:44 2021 -0500 + + Change netifaces dependency to 0.10.4 (#965) + + Change netifaces dependency to 0.10.4 + + Currently versions Ubuntu <=20.10 use netifaces 0.10.4 By requiring + netifaces 0.10.9, the VMware datasource omitted itself from cloud-init + on Ubuntu <=20.10. + + This patch changes the netifaces dependency to 0.10.4. While it is true + there are patches to netifaces post 0.10.4 that are desirable, testing + against the most common network configuration was performed to verify + the VMware datasource will still function with netifaces 0.10.4. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + requirements.txt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/requirements.txt b/requirements.txt +index 41d01d62..c4adc455 100644 +--- a/requirements.txt ++++ b/requirements.txt +@@ -40,4 +40,4 @@ jsonschema + # and still participate in instance-data by gathering the network in detail at + # runtime and merge that information into the metadata and repersist that to + # disk. +-netifaces>=0.10.9 ++netifaces>=0.10.4 +-- +2.27.0 + diff --git a/ci-Datasource-for-VMware-953.patch b/ci-Datasource-for-VMware-953.patch new file mode 100644 index 0000000..137ee07 --- /dev/null +++ b/ci-Datasource-for-VMware-953.patch @@ -0,0 +1,2198 @@ +From 1917af220242840ec1b21f82f80532cf6548cc00 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 14 Jan 2022 16:34:49 +0100 +Subject: [PATCH 2/6] Datasource for VMware (#953) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 44: Datasource for VMware +RH-Commit: [2/6] bb6e58dfeaf8b64d2801ddb4cb73868cf31de3ef +RH-Bugzilla: 2026587 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +commit 8b4a9bc7b81e61943af873bad92e2133f8275b0b +Author: Andrew Kutz <101085+akutz@users.noreply.github.com> +Date: Mon Aug 9 21:24:07 2021 -0500 + + Datasource for VMware (#953) + + This patch finally introduces the Cloud-Init Datasource for VMware + GuestInfo as a part of cloud-init proper. This datasource has existed + since 2018, and rapidly became the de facto datasource for developers + working with Packer, Terraform, for projects like kube-image-builder, + and the de jure datasource for Photon OS. + + The major change to the datasource from its previous incarnation is + the name. Now named DatasourceVMware, this new version of the + datasource will allow multiple transport types in addition to + GuestInfo keys. + + This datasource includes several unique features developed to address + real-world situations: + + * Support for reading any key (metadata, userdata, vendordata) both + from the guestinfo table when running on a VM in vSphere as well as + from an environment variable when running inside of a container, + useful for rapid dev/test. + + * Allows booting with DHCP while still providing full participation + in Cloud-Init instance data and Jinja queries. The netifaces library + provides the ability to inspect the network after it is online, + and the runtime network configuration is then merged into the + existing metadata and persisted to disk. + + * Advertises the local_ipv4 and local_ipv6 addresses via guestinfo + as well. This is useful as Guest Tools is not always able to + identify what would be considered the local address. + + The primary author and current steward of this datasource spoke at + Cloud-Init Con 2020 where there was interest in contributing this datasource + to the Cloud-Init codebase. + + The datasource currently lives in its own GitHub repository at + https://github.com/vmware/cloud-init-vmware-guestinfo. Once the datasource + is merged into Cloud-Init, the old repository will be deprecated. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + README.md | 2 +- + cloudinit/settings.py | 1 + + cloudinit/sources/DataSourceVMware.py | 871 ++++++++++++++++++ + doc/rtd/topics/availability.rst | 1 + + doc/rtd/topics/datasources.rst | 2 +- + doc/rtd/topics/datasources/vmware.rst | 359 ++++++++ + requirements.txt | 9 + + .../unittests/test_datasource/test_common.py | 3 + + .../unittests/test_datasource/test_vmware.py | 377 ++++++++ + tests/unittests/test_ds_identify.py | 279 +++++- + tools/.github-cla-signers | 1 + + tools/ds-identify | 76 +- + 12 files changed, 1977 insertions(+), 4 deletions(-) + create mode 100644 cloudinit/sources/DataSourceVMware.py + create mode 100644 doc/rtd/topics/datasources/vmware.rst + create mode 100644 tests/unittests/test_datasource/test_vmware.py + +diff --git a/README.md b/README.md +index 435405da..aa4fad63 100644 +--- a/README.md ++++ b/README.md +@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! + + | Supported OSes | Supported Public Clouds | Supported Private Clouds | + | --- | --- | --- | +-| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ++| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| + + ## To start developing cloud-init + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 2acf2615..d5f32dbb 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -42,6 +42,7 @@ CFG_BUILTIN = { + 'Exoscale', + 'RbxCloud', + 'UpCloud', ++ 'VMware', + # At the end to act as a 'catch' when none of the above work... + 'None', + ], +diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py +new file mode 100644 +index 00000000..22ca63de +--- /dev/null ++++ b/cloudinit/sources/DataSourceVMware.py +@@ -0,0 +1,871 @@ ++# Cloud-Init DataSource for VMware ++# ++# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved. ++# ++# Authors: Anish Swaminathan ++# Andrew Kutz ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++"""Cloud-Init DataSource for VMware ++ ++This module provides a cloud-init datasource for VMware systems and supports ++multiple transports types, including: ++ ++ * EnvVars ++ * GuestInfo ++ ++Netifaces (https://github.com/al45tair/netifaces) ++ ++ Please note this module relies on the netifaces project to introspect the ++ runtime, network configuration of the host on which this datasource is ++ running. This is in contrast to the rest of cloud-init which uses the ++ cloudinit/netinfo module. ++ ++ The reasons for using netifaces include: ++ ++ * Netifaces is built in C and is more portable across multiple systems ++ and more deterministic than shell exec'ing local network commands and ++ parsing their output. ++ ++ * Netifaces provides a stable way to determine the view of the host's ++ network after DHCP has brought the network online. Unlike most other ++ datasources, this datasource still provides support for JINJA queries ++ based on networking information even when the network is based on a ++ DHCP lease. While this does not tie this datasource directly to ++ netifaces, it does mean the ability to consistently obtain the ++ correct information is paramount. ++ ++ * It is currently possible to execute this datasource on macOS ++ (which many developers use today) to print the output of the ++ get_host_info function. This function calls netifaces to obtain ++ the same runtime network configuration that the datasource would ++ persist to the local system's instance data. ++ ++ However, the netinfo module fails on macOS. The result is either a ++ hung operation that requires a SIGINT to return control to the user, ++ or, if brew is used to install iproute2mac, the ip commands are used ++ but produce output the netinfo module is unable to parse. ++ ++ While macOS is not a target of cloud-init, this feature is quite ++ useful when working on this datasource. ++ ++ For more information about this behavior, please see the following ++ PR comment, https://bit.ly/3fG7OVh. ++ ++ The authors of this datasource are not opposed to moving away from ++ netifaces. The goal may be to eventually do just that. This proviso was ++ added to the top of this module as a way to remind future-us and others ++ why netifaces was used in the first place in order to either smooth the ++ transition away from netifaces or embrace it further up the cloud-init ++ stack. ++""" ++ ++import collections ++import copy ++from distutils.spawn import find_executable ++import ipaddress ++import json ++import os ++import socket ++import time ++ ++from cloudinit import dmi, log as logging ++from cloudinit import sources ++from cloudinit import util ++from cloudinit.subp import subp, ProcessExecutionError ++ ++import netifaces ++ ++ ++PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" ++ ++LOG = logging.getLogger(__name__) ++NOVAL = "No value found" ++ ++DATA_ACCESS_METHOD_ENVVAR = "envvar" ++DATA_ACCESS_METHOD_GUESTINFO = "guestinfo" ++ ++VMWARE_RPCTOOL = find_executable("vmware-rpctool") ++REDACT = "redact" ++CLEANUP_GUESTINFO = "cleanup-guestinfo" ++VMX_GUESTINFO = "VMX_GUESTINFO" ++GUESTINFO_EMPTY_YAML_VAL = "---" ++ ++LOCAL_IPV4 = "local-ipv4" ++LOCAL_IPV6 = "local-ipv6" ++WAIT_ON_NETWORK = "wait-on-network" ++WAIT_ON_NETWORK_IPV4 = "ipv4" ++WAIT_ON_NETWORK_IPV6 = "ipv6" ++ ++ ++class DataSourceVMware(sources.DataSource): ++ """ ++ Setting the hostname: ++ The hostname is set by way of the metadata key "local-hostname". ++ ++ Setting the instance ID: ++ The instance ID may be set by way of the metadata key "instance-id". ++ However, if this value is absent then the instance ID is read ++ from the file /sys/class/dmi/id/product_uuid. ++ ++ Configuring the network: ++ The network is configured by setting the metadata key "network" ++ with a value consistent with Network Config Versions 1 or 2, ++ depending on the Linux distro's version of cloud-init: ++ ++ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1 ++ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2 ++ ++ For example, CentOS 7's official cloud-init package is version ++ 0.7.9 and does not support Network Config Version 2. However, ++ this datasource still supports supplying Network Config Version 2 ++ data as long as the Linux distro's cloud-init package is new ++ enough to parse the data. ++ ++ The metadata key "network.encoding" may be used to indicate the ++ format of the metadata key "network". Valid encodings are base64 ++ and gzip+base64. ++ """ ++ ++ dsname = "VMware" ++ ++ def __init__(self, sys_cfg, distro, paths, ud_proc=None): ++ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) ++ ++ self.data_access_method = None ++ self.vmware_rpctool = VMWARE_RPCTOOL ++ ++ def _get_data(self): ++ """ ++ _get_data loads the metadata, userdata, and vendordata from one of ++ the following locations in the given order: ++ ++ * envvars ++ * guestinfo ++ ++ Please note when updating this function with support for new data ++ transports, the order should match the order in the dscheck_VMware ++ function from the file ds-identify. ++ """ ++ ++ # Initialize the locally scoped metadata, userdata, and vendordata ++ # variables. They are assigned below depending on the detected data ++ # access method. ++ md, ud, vd = None, None, None ++ ++ # First check to see if there is data via env vars. ++ if os.environ.get(VMX_GUESTINFO, ""): ++ md = guestinfo_envvar("metadata") ++ ud = guestinfo_envvar("userdata") ++ vd = guestinfo_envvar("vendordata") ++ ++ if md or ud or vd: ++ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR ++ ++ # At this point, all additional data transports are valid only on ++ # a VMware platform. ++ if not self.data_access_method: ++ system_type = dmi.read_dmi_data("system-product-name") ++ if system_type is None: ++ LOG.debug("No system-product-name found") ++ return False ++ if "vmware" not in system_type.lower(): ++ LOG.debug("Not a VMware platform") ++ return False ++ ++ # If no data was detected, check the guestinfo transport next. ++ if not self.data_access_method: ++ if self.vmware_rpctool: ++ md = guestinfo("metadata", self.vmware_rpctool) ++ ud = guestinfo("userdata", self.vmware_rpctool) ++ vd = guestinfo("vendordata", self.vmware_rpctool) ++ ++ if md or ud or vd: ++ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO ++ ++ if not self.data_access_method: ++ LOG.error("failed to find a valid data access method") ++ return False ++ ++ LOG.info("using data access method %s", self._get_subplatform()) ++ ++ # Get the metadata. ++ self.metadata = process_metadata(load_json_or_yaml(md)) ++ ++ # Get the user data. ++ self.userdata_raw = ud ++ ++ # Get the vendor data. ++ self.vendordata_raw = vd ++ ++ # Redact any sensitive information. ++ self.redact_keys() ++ ++ # get_data returns true if there is any available metadata, ++ # userdata, or vendordata. ++ if self.metadata or self.userdata_raw or self.vendordata_raw: ++ return True ++ else: ++ return False ++ ++ def setup(self, is_new_instance): ++ """setup(is_new_instance) ++ ++ This is called before user-data and vendor-data have been processed. ++ ++ Unless the datasource has set mode to 'local', then networking ++ per 'fallback' or per 'network_config' will have been written and ++ brought up the OS at this point. ++ """ ++ ++ host_info = wait_on_network(self.metadata) ++ LOG.info("got host-info: %s", host_info) ++ ++ # Reflect any possible local IPv4 or IPv6 addresses in the guest ++ # info. ++ advertise_local_ip_addrs(host_info) ++ ++ # Ensure the metadata gets updated with information about the ++ # host, including the network interfaces, default IP addresses, ++ # etc. ++ self.metadata = util.mergemanydict([self.metadata, host_info]) ++ ++ # Persist the instance data for versions of cloud-init that support ++ # doing so. This occurs here rather than in the get_data call in ++ # order to ensure that the network interfaces are up and can be ++ # persisted with the metadata. ++ self.persist_instance_data() ++ ++ def _get_subplatform(self): ++ get_key_name_fn = None ++ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR: ++ get_key_name_fn = get_guestinfo_envvar_key_name ++ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: ++ get_key_name_fn = get_guestinfo_key_name ++ else: ++ return sources.METADATA_UNKNOWN ++ ++ return "%s (%s)" % ( ++ self.data_access_method, ++ get_key_name_fn("metadata"), ++ ) ++ ++ @property ++ def network_config(self): ++ if "network" in self.metadata: ++ LOG.debug("using metadata network config") ++ else: ++ LOG.debug("using fallback network config") ++ self.metadata["network"] = { ++ "config": self.distro.generate_fallback_config(), ++ } ++ return self.metadata["network"]["config"] ++ ++ def get_instance_id(self): ++ # Pull the instance ID out of the metadata if present. Otherwise ++ # read the file /sys/class/dmi/id/product_uuid for the instance ID. ++ if self.metadata and "instance-id" in self.metadata: ++ return self.metadata["instance-id"] ++ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file: ++ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower() ++ return self.metadata["instance-id"] ++ ++ def get_public_ssh_keys(self): ++ for key_name in ( ++ "public-keys-data", ++ "public_keys_data", ++ "public-keys", ++ "public_keys", ++ ): ++ if key_name in self.metadata: ++ return sources.normalize_pubkey_data(self.metadata[key_name]) ++ return [] ++ ++ def redact_keys(self): ++ # Determine if there are any keys to redact. ++ keys_to_redact = None ++ if REDACT in self.metadata: ++ keys_to_redact = self.metadata[REDACT] ++ elif CLEANUP_GUESTINFO in self.metadata: ++ # This is for backwards compatibility. ++ keys_to_redact = self.metadata[CLEANUP_GUESTINFO] ++ ++ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: ++ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool) ++ ++ ++def decode(key, enc_type, data): ++ """ ++ decode returns the decoded string value of data ++ key is a string used to identify the data being decoded in log messages ++ """ ++ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type) ++ ++ raw_data = None ++ if enc_type in ["gzip+base64", "gz+b64"]: ++ LOG.debug("Decoding %s format %s", enc_type, key) ++ raw_data = util.decomp_gzip(util.b64d(data)) ++ elif enc_type in ["base64", "b64"]: ++ LOG.debug("Decoding %s format %s", enc_type, key) ++ raw_data = util.b64d(data) ++ else: ++ LOG.debug("Plain-text data %s", key) ++ raw_data = data ++ ++ return util.decode_binary(raw_data) ++ ++ ++def get_none_if_empty_val(val): ++ """ ++ get_none_if_empty_val returns None if the provided value, once stripped ++ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL. ++ ++ The return value is always a string, regardless of whether the input is ++ a bytes class or a string. ++ """ ++ ++ # If the provided value is a bytes class, convert it to a string to ++ # simplify the rest of this function's logic. ++ val = util.decode_binary(val) ++ val = val.rstrip() ++ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL: ++ return None ++ return val ++ ++ ++def advertise_local_ip_addrs(host_info): ++ """ ++ advertise_local_ip_addrs gets the local IP address information from ++ the provided host_info map and sets the addresses in the guestinfo ++ namespace ++ """ ++ if not host_info: ++ return ++ ++ # Reflect any possible local IPv4 or IPv6 addresses in the guest ++ # info. ++ local_ipv4 = host_info.get(LOCAL_IPV4) ++ if local_ipv4: ++ guestinfo_set_value(LOCAL_IPV4, local_ipv4) ++ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4) ++ ++ local_ipv6 = host_info.get(LOCAL_IPV6) ++ if local_ipv6: ++ guestinfo_set_value(LOCAL_IPV6, local_ipv6) ++ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6) ++ ++ ++def handle_returned_guestinfo_val(key, val): ++ """ ++ handle_returned_guestinfo_val returns the provided value if it is ++ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is ++ returned ++ """ ++ val = get_none_if_empty_val(val) ++ if val: ++ return val ++ LOG.debug("No value found for key %s", key) ++ return None ++ ++ ++def get_guestinfo_key_name(key): ++ return "guestinfo." + key ++ ++ ++def get_guestinfo_envvar_key_name(key): ++ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1) ++ ++ ++def guestinfo_envvar(key): ++ val = guestinfo_envvar_get_value(key) ++ if not val: ++ return None ++ enc_type = guestinfo_envvar_get_value(key + ".encoding") ++ return decode(get_guestinfo_envvar_key_name(key), enc_type, val) ++ ++ ++def guestinfo_envvar_get_value(key): ++ env_key = get_guestinfo_envvar_key_name(key) ++ return handle_returned_guestinfo_val(key, os.environ.get(env_key, "")) ++ ++ ++def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL): ++ """ ++ guestinfo returns the guestinfo value for the provided key, decoding ++ the value when required ++ """ ++ val = guestinfo_get_value(key, vmware_rpctool) ++ if not val: ++ return None ++ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool) ++ return decode(get_guestinfo_key_name(key), enc_type, val) ++ ++ ++def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL): ++ """ ++ Returns a guestinfo value for the specified key. ++ """ ++ LOG.debug("Getting guestinfo value for key %s", key) ++ ++ try: ++ (stdout, stderr) = subp( ++ [ ++ vmware_rpctool, ++ "info-get " + get_guestinfo_key_name(key), ++ ] ++ ) ++ if stderr == NOVAL: ++ LOG.debug("No value found for key %s", key) ++ elif not stdout: ++ LOG.error("Failed to get guestinfo value for key %s", key) ++ return handle_returned_guestinfo_val(key, stdout) ++ except ProcessExecutionError as error: ++ if error.stderr == NOVAL: ++ LOG.debug("No value found for key %s", key) ++ else: ++ util.logexc( ++ LOG, ++ "Failed to get guestinfo value for key %s: %s", ++ key, ++ error, ++ ) ++ except Exception: ++ util.logexc( ++ LOG, ++ "Unexpected error while trying to get " ++ + "guestinfo value for key %s", ++ key, ++ ) ++ ++ return None ++ ++ ++def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL): ++ """ ++ Sets a guestinfo value for the specified key. Set value to an empty string ++ to clear an existing guestinfo key. ++ """ ++ ++ # If value is an empty string then set it to a single space as it is not ++ # possible to set a guestinfo key to an empty string. Setting a guestinfo ++ # key to a single space is as close as it gets to clearing an existing ++ # guestinfo key. ++ if value == "": ++ value = " " ++ ++ LOG.debug("Setting guestinfo key=%s to value=%s", key, value) ++ ++ try: ++ subp( ++ [ ++ vmware_rpctool, ++ ("info-set %s %s" % (get_guestinfo_key_name(key), value)), ++ ] ++ ) ++ return True ++ except ProcessExecutionError as error: ++ util.logexc( ++ LOG, ++ "Failed to set guestinfo key=%s to value=%s: %s", ++ key, ++ value, ++ error, ++ ) ++ except Exception: ++ util.logexc( ++ LOG, ++ "Unexpected error while trying to set " ++ + "guestinfo key=%s to value=%s", ++ key, ++ value, ++ ) ++ ++ return None ++ ++ ++def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL): ++ """ ++ guestinfo_redact_keys redacts guestinfo of all of the keys in the given ++ list. each key will have its value set to "---". Since the value is valid ++ YAML, cloud-init can still read it if it tries. ++ """ ++ if not keys: ++ return ++ if not type(keys) in (list, tuple): ++ keys = [keys] ++ for key in keys: ++ key_name = get_guestinfo_key_name(key) ++ LOG.info("clearing %s", key_name) ++ if not guestinfo_set_value( ++ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool ++ ): ++ LOG.error("failed to clear %s", key_name) ++ LOG.info("clearing %s.encoding", key_name) ++ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool): ++ LOG.error("failed to clear %s.encoding", key_name) ++ ++ ++def load_json_or_yaml(data): ++ """ ++ load first attempts to unmarshal the provided data as JSON, and if ++ that fails then attempts to unmarshal the data as YAML. If data is ++ None then a new dictionary is returned. ++ """ ++ if not data: ++ return {} ++ try: ++ return util.load_json(data) ++ except (json.JSONDecodeError, TypeError): ++ return util.load_yaml(data) ++ ++ ++def process_metadata(data): ++ """ ++ process_metadata processes metadata and loads the optional network ++ configuration. ++ """ ++ network = None ++ if "network" in data: ++ network = data["network"] ++ del data["network"] ++ ++ network_enc = None ++ if "network.encoding" in data: ++ network_enc = data["network.encoding"] ++ del data["network.encoding"] ++ ++ if network: ++ if isinstance(network, collections.abc.Mapping): ++ LOG.debug("network data copied to 'config' key") ++ network = {"config": copy.deepcopy(network)} ++ else: ++ LOG.debug("network data to be decoded %s", network) ++ dec_net = decode("metadata.network", network_enc, network) ++ network = { ++ "config": load_json_or_yaml(dec_net), ++ } ++ ++ LOG.debug("network data %s", network) ++ data["network"] = network ++ ++ return data ++ ++ ++# Used to match classes to dependencies ++datasources = [ ++ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local ++ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ++] ++ ++ ++def get_datasource_list(depends): ++ """ ++ Return a list of data sources that match this set of dependencies ++ """ ++ return sources.list_from_depends(depends, datasources) ++ ++ ++def get_default_ip_addrs(): ++ """ ++ Returns the default IPv4 and IPv6 addresses based on the device(s) used for ++ the default route. Please note that None may be returned for either address ++ family if that family has no default route or if there are multiple ++ addresses associated with the device used by the default route for a given ++ address. ++ """ ++ # TODO(promote and use netifaces in cloudinit.net* modules) ++ gateways = netifaces.gateways() ++ if "default" not in gateways: ++ return None, None ++ ++ default_gw = gateways["default"] ++ if ( ++ netifaces.AF_INET not in default_gw ++ and netifaces.AF_INET6 not in default_gw ++ ): ++ return None, None ++ ++ ipv4 = None ++ ipv6 = None ++ ++ gw4 = default_gw.get(netifaces.AF_INET) ++ if gw4: ++ _, dev4 = gw4 ++ addr4_fams = netifaces.ifaddresses(dev4) ++ if addr4_fams: ++ af_inet4 = addr4_fams.get(netifaces.AF_INET) ++ if af_inet4: ++ if len(af_inet4) > 1: ++ LOG.warning( ++ "device %s has more than one ipv4 address: %s", ++ dev4, ++ af_inet4, ++ ) ++ elif "addr" in af_inet4[0]: ++ ipv4 = af_inet4[0]["addr"] ++ ++ # Try to get the default IPv6 address by first seeing if there is a default ++ # IPv6 route. ++ gw6 = default_gw.get(netifaces.AF_INET6) ++ if gw6: ++ _, dev6 = gw6 ++ addr6_fams = netifaces.ifaddresses(dev6) ++ if addr6_fams: ++ af_inet6 = addr6_fams.get(netifaces.AF_INET6) ++ if af_inet6: ++ if len(af_inet6) > 1: ++ LOG.warning( ++ "device %s has more than one ipv6 address: %s", ++ dev6, ++ af_inet6, ++ ) ++ elif "addr" in af_inet6[0]: ++ ipv6 = af_inet6[0]["addr"] ++ ++ # If there is a default IPv4 address but not IPv6, then see if there is a ++ # single IPv6 address associated with the same device associated with the ++ # default IPv4 address. ++ if ipv4 and not ipv6: ++ af_inet6 = addr4_fams.get(netifaces.AF_INET6) ++ if af_inet6: ++ if len(af_inet6) > 1: ++ LOG.warning( ++ "device %s has more than one ipv6 address: %s", ++ dev4, ++ af_inet6, ++ ) ++ elif "addr" in af_inet6[0]: ++ ipv6 = af_inet6[0]["addr"] ++ ++ # If there is a default IPv6 address but not IPv4, then see if there is a ++ # single IPv4 address associated with the same device associated with the ++ # default IPv6 address. ++ if not ipv4 and ipv6: ++ af_inet4 = addr6_fams.get(netifaces.AF_INET) ++ if af_inet4: ++ if len(af_inet4) > 1: ++ LOG.warning( ++ "device %s has more than one ipv4 address: %s", ++ dev6, ++ af_inet4, ++ ) ++ elif "addr" in af_inet4[0]: ++ ipv4 = af_inet4[0]["addr"] ++ ++ return ipv4, ipv6 ++ ++ ++# patched socket.getfqdn() - see https://bugs.python.org/issue5004 ++ ++ ++def getfqdn(name=""): ++ """Get fully qualified domain name from name. ++ An empty argument is interpreted as meaning the local host. ++ """ ++ # TODO(may want to promote this function to util.getfqdn) ++ # TODO(may want to extend util.get_hostname to accept fqdn=True param) ++ name = name.strip() ++ if not name or name == "0.0.0.0": ++ name = util.get_hostname() ++ try: ++ addrs = socket.getaddrinfo( ++ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME ++ ) ++ except socket.error: ++ pass ++ else: ++ for addr in addrs: ++ if addr[3]: ++ name = addr[3] ++ break ++ return name ++ ++ ++def is_valid_ip_addr(val): ++ """ ++ Returns false if the address is loopback, link local or unspecified; ++ otherwise true is returned. ++ """ ++ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc) ++ # TODO(migrate to use cloudinit.net.is_ip_addr)# ++ ++ addr = None ++ try: ++ addr = ipaddress.ip_address(val) ++ except ipaddress.AddressValueError: ++ addr = ipaddress.ip_address(str(val)) ++ except Exception: ++ return None ++ ++ if addr.is_link_local or addr.is_loopback or addr.is_unspecified: ++ return False ++ return True ++ ++ ++def get_host_info(): ++ """ ++ Returns host information such as the host name and network interfaces. ++ """ ++ # TODO(look to promote netifices use up in cloud-init netinfo funcs) ++ host_info = { ++ "network": { ++ "interfaces": { ++ "by-mac": collections.OrderedDict(), ++ "by-ipv4": collections.OrderedDict(), ++ "by-ipv6": collections.OrderedDict(), ++ }, ++ }, ++ } ++ hostname = getfqdn(util.get_hostname()) ++ if hostname: ++ host_info["hostname"] = hostname ++ host_info["local-hostname"] = hostname ++ host_info["local_hostname"] = hostname ++ ++ default_ipv4, default_ipv6 = get_default_ip_addrs() ++ if default_ipv4: ++ host_info[LOCAL_IPV4] = default_ipv4 ++ if default_ipv6: ++ host_info[LOCAL_IPV6] = default_ipv6 ++ ++ by_mac = host_info["network"]["interfaces"]["by-mac"] ++ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"] ++ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"] ++ ++ ifaces = netifaces.interfaces() ++ for dev_name in ifaces: ++ addr_fams = netifaces.ifaddresses(dev_name) ++ af_link = addr_fams.get(netifaces.AF_LINK) ++ af_inet4 = addr_fams.get(netifaces.AF_INET) ++ af_inet6 = addr_fams.get(netifaces.AF_INET6) ++ ++ mac = None ++ if af_link and "addr" in af_link[0]: ++ mac = af_link[0]["addr"] ++ ++ # Do not bother recording localhost ++ if mac == "00:00:00:00:00:00": ++ continue ++ ++ if mac and (af_inet4 or af_inet6): ++ key = mac ++ val = {} ++ if af_inet4: ++ af_inet4_vals = [] ++ for ip_info in af_inet4: ++ if not is_valid_ip_addr(ip_info["addr"]): ++ continue ++ af_inet4_vals.append(ip_info) ++ val["ipv4"] = af_inet4_vals ++ if af_inet6: ++ af_inet6_vals = [] ++ for ip_info in af_inet6: ++ if not is_valid_ip_addr(ip_info["addr"]): ++ continue ++ af_inet6_vals.append(ip_info) ++ val["ipv6"] = af_inet6_vals ++ by_mac[key] = val ++ ++ if af_inet4: ++ for ip_info in af_inet4: ++ key = ip_info["addr"] ++ if not is_valid_ip_addr(key): ++ continue ++ val = copy.deepcopy(ip_info) ++ del val["addr"] ++ if mac: ++ val["mac"] = mac ++ by_ipv4[key] = val ++ ++ if af_inet6: ++ for ip_info in af_inet6: ++ key = ip_info["addr"] ++ if not is_valid_ip_addr(key): ++ continue ++ val = copy.deepcopy(ip_info) ++ del val["addr"] ++ if mac: ++ val["mac"] = mac ++ by_ipv6[key] = val ++ ++ return host_info ++ ++ ++def wait_on_network(metadata): ++ # Determine whether we need to wait on the network coming online. ++ wait_on_ipv4 = False ++ wait_on_ipv6 = False ++ if WAIT_ON_NETWORK in metadata: ++ wait_on_network = metadata[WAIT_ON_NETWORK] ++ if WAIT_ON_NETWORK_IPV4 in wait_on_network: ++ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4] ++ if isinstance(wait_on_ipv4_val, bool): ++ wait_on_ipv4 = wait_on_ipv4_val ++ else: ++ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val) ++ if WAIT_ON_NETWORK_IPV6 in wait_on_network: ++ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6] ++ if isinstance(wait_on_ipv6_val, bool): ++ wait_on_ipv6 = wait_on_ipv6_val ++ else: ++ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val) ++ ++ # Get information about the host. ++ host_info = None ++ while host_info is None: ++ # This loop + sleep results in two logs every second while waiting ++ # for either ipv4 or ipv6 up. Do we really need to log each iteration ++ # or can we log once and log on successful exit? ++ host_info = get_host_info() ++ ++ network = host_info.get("network") or {} ++ interfaces = network.get("interfaces") or {} ++ by_ipv4 = interfaces.get("by-ipv4") or {} ++ by_ipv6 = interfaces.get("by-ipv6") or {} ++ ++ if wait_on_ipv4: ++ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False ++ if not ipv4_ready: ++ host_info = None ++ ++ if wait_on_ipv6: ++ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False ++ if not ipv6_ready: ++ host_info = None ++ ++ if host_info is None: ++ LOG.debug( ++ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s", ++ wait_on_ipv4, ++ ipv4_ready, ++ wait_on_ipv6, ++ ipv6_ready, ++ ) ++ time.sleep(1) ++ ++ LOG.debug("waiting on network complete") ++ return host_info ++ ++ ++def main(): ++ """ ++ Executed when this file is used as a program. ++ """ ++ try: ++ logging.setupBasicLogging() ++ except Exception: ++ pass ++ metadata = { ++ "wait-on-network": {"ipv4": True, "ipv6": "false"}, ++ "network": {"config": {"dhcp": True}}, ++ } ++ host_info = wait_on_network(metadata) ++ metadata = util.mergemanydict([metadata, host_info]) ++ print(util.json_dumps(metadata)) ++ ++ ++if __name__ == "__main__": ++ main() ++ ++# vi: ts=4 expandtab +diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst +index f58b2b38..6606367c 100644 +--- a/doc/rtd/topics/availability.rst ++++ b/doc/rtd/topics/availability.rst +@@ -64,5 +64,6 @@ Additionally, cloud-init is supported on these private clouds: + - LXD + - KVM + - Metal-as-a-Service (MAAS) ++- VMware + + .. vi: textwidth=79 +diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst +index 228173d2..8afed470 100644 +--- a/doc/rtd/topics/datasources.rst ++++ b/doc/rtd/topics/datasources.rst +@@ -49,7 +49,7 @@ The following is a list of documents for each supported datasource: + datasources/smartos.rst + datasources/upcloud.rst + datasources/zstack.rst +- ++ datasources/vmware.rst + + Creation + ======== +diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst +new file mode 100644 +index 00000000..996eb61f +--- /dev/null ++++ b/doc/rtd/topics/datasources/vmware.rst +@@ -0,0 +1,359 @@ ++.. _datasource_vmware: ++ ++VMware ++====== ++ ++This datasource is for use with systems running on a VMware platform such as ++vSphere and currently supports the following data transports: ++ ++ ++* `GuestInfo `_ keys ++ ++Configuration ++------------- ++ ++The configuration method is dependent upon the transport: ++ ++GuestInfo Keys ++^^^^^^^^^^^^^^ ++ ++One method of providing meta, user, and vendor data is by setting the following ++key/value pairs on a VM's ``extraConfig`` `property `_ : ++ ++.. list-table:: ++ :header-rows: 1 ++ ++ * - Property ++ - Description ++ * - ``guestinfo.metadata`` ++ - A YAML or JSON document containing the cloud-init metadata. ++ * - ``guestinfo.metadata.encoding`` ++ - The encoding type for ``guestinfo.metadata``. ++ * - ``guestinfo.userdata`` ++ - A YAML document containing the cloud-init user data. ++ * - ``guestinfo.userdata.encoding`` ++ - The encoding type for ``guestinfo.userdata``. ++ * - ``guestinfo.vendordata`` ++ - A YAML document containing the cloud-init vendor data. ++ * - ``guestinfo.vendordata.encoding`` ++ - The encoding type for ``guestinfo.vendordata``. ++ ++ ++All ``guestinfo.*.encoding`` values may be set to ``base64`` or ++``gzip+base64``. ++ ++Features ++-------- ++ ++This section reviews several features available in this datasource, regardless ++of how the meta, user, and vendor data was discovered. ++ ++Instance data and lazy networks ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++One of the hallmarks of cloud-init is `its use of instance-data and JINJA ++queries <../instancedata.html#using-instance-data>`_ ++-- the ability to write queries in user and vendor data that reference runtime ++information present in ``/run/cloud-init/instance-data.json``. This works well ++when the metadata provides all of the information up front, such as the network ++configuration. For systems that rely on DHCP, however, this information may not ++be available when the metadata is persisted to disk. ++ ++This datasource ensures that even if the instance is using DHCP to configure ++networking, the same details about the configured network are available in ++``/run/cloud-init/instance-data.json`` as if static networking was used. This ++information collected at runtime is easy to demonstrate by executing the ++datasource on the command line. From the root of this repository, run the ++following command: ++ ++.. code-block:: bash ++ ++ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py ++ ++The above command will result in output similar to the below JSON: ++ ++.. code-block:: json ++ ++ { ++ "hostname": "akutz.localhost", ++ "local-hostname": "akutz.localhost", ++ "local-ipv4": "192.168.0.188", ++ "local_hostname": "akutz.localhost", ++ "network": { ++ "config": { ++ "dhcp": true ++ }, ++ "interfaces": { ++ "by-ipv4": { ++ "172.0.0.2": { ++ "netmask": "255.255.255.255", ++ "peer": "172.0.0.2" ++ }, ++ "192.168.0.188": { ++ "broadcast": "192.168.0.255", ++ "mac": "64:4b:f0:18:9a:21", ++ "netmask": "255.255.255.0" ++ } ++ }, ++ "by-ipv6": { ++ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": { ++ "flags": 208, ++ "mac": "64:4b:f0:18:9a:21", ++ "netmask": "ffff:ffff:ffff:ffff::/64" ++ } ++ }, ++ "by-mac": { ++ "64:4b:f0:18:9a:21": { ++ "ipv4": [ ++ { ++ "addr": "192.168.0.188", ++ "broadcast": "192.168.0.255", ++ "netmask": "255.255.255.0" ++ } ++ ], ++ "ipv6": [ ++ { ++ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2", ++ "flags": 208, ++ "netmask": "ffff:ffff:ffff:ffff::/64" ++ } ++ ] ++ }, ++ "ac:de:48:00:11:22": { ++ "ipv6": [] ++ } ++ } ++ } ++ }, ++ "wait-on-network": { ++ "ipv4": true, ++ "ipv6": "false" ++ } ++ } ++ ++ ++Redacting sensitive information ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++Sometimes the cloud-init userdata might contain sensitive information, and it ++may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo ++keys) redacted as soon as its data is read by the datasource. This is possible ++by adding the following to the metadata: ++ ++.. code-block:: yaml ++ ++ redact: # formerly named cleanup-guestinfo, which will also work ++ - userdata ++ - vendordata ++ ++When the above snippet is added to the metadata, the datasource will iterate ++over the elements in the ``redact`` array and clear each of the keys. For ++example, when the guestinfo transport is used, the above snippet will cause ++the following commands to be executed: ++ ++.. code-block:: shell ++ ++ vmware-rpctool "info-set guestinfo.userdata ---" ++ vmware-rpctool "info-set guestinfo.userdata.encoding " ++ vmware-rpctool "info-set guestinfo.vendordata ---" ++ vmware-rpctool "info-set guestinfo.vendordata.encoding " ++ ++Please note that keys are set to the valid YAML string ``---`` as it is not ++possible remove an existing key from the guestinfo key-space. A key's analogous ++encoding property will be set to a single white-space character, causing the ++datasource to treat the actual key value as plain-text, thereby loading it as ++an empty YAML doc (hence the aforementioned ``---``\ ). ++ ++Reading the local IP addresses ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++This datasource automatically discovers the local IPv4 and IPv6 addresses for ++a guest operating system based on the default routes. However, when inspecting ++a VM externally, it's not possible to know what the *default* IP address is for ++the guest OS. That's why this datasource sets the discovered, local IPv4 and ++IPv6 addresses back in the guestinfo namespace as the following keys: ++ ++ ++* ``guestinfo.local-ipv4`` ++* ``guestinfo.local-ipv6`` ++ ++It is possible that a host may not have any default, local IP addresses. It's ++also possible the reported, local addresses are link-local addresses. But these ++two keys may be used to discover what this datasource determined were the local ++IPv4 and IPv6 addresses for a host. ++ ++Waiting on the network ++^^^^^^^^^^^^^^^^^^^^^^ ++ ++Sometimes cloud-init may bring up the network, but it will not finish coming ++online before the datasource's ``setup`` function is called, resulting in an ++``/var/run/cloud-init/instance-data.json`` file that does not have the correct ++network information. It is possible to instruct the datasource to wait until an ++IPv4 or IPv6 address is available before writing the instance data with the ++following metadata properties: ++ ++.. code-block:: yaml ++ ++ wait-on-network: ++ ipv4: true ++ ipv6: true ++ ++If either of the above values are true, then the datasource will sleep for a ++second, check the network status, and repeat until one or both addresses from ++the specified families are available. ++ ++Walkthrough ++----------- ++ ++The following series of steps is a demonstration on how to configure a VM with ++this datasource: ++ ++ ++#. Create the metadata file for the VM. Save the following YAML to a file named ++ ``metadata.yaml``\ : ++ ++ .. code-block:: yaml ++ ++ instance-id: cloud-vm ++ local-hostname: cloud-vm ++ network: ++ version: 2 ++ ethernets: ++ nics: ++ match: ++ name: ens* ++ dhcp4: yes ++ ++#. Create the userdata file ``userdata.yaml``\ : ++ ++ .. code-block:: yaml ++ ++ #cloud-config ++ ++ users: ++ - default ++ - name: akutz ++ primary_group: akutz ++ sudo: ALL=(ALL) NOPASSWD:ALL ++ groups: sudo, wheel ++ ssh_import_id: None ++ lock_passwd: true ++ ssh_authorized_keys: ++ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com ++ ++#. Please note this step requires that the VM be powered off. All of the ++ commands below use the VMware CLI tool, `govc `_. ++ ++ Go ahead and assign the path to the VM to the environment variable ``VM``\ : ++ ++ .. code-block:: shell ++ ++ export VM="/inventory/path/to/the/vm" ++ ++#. Power off the VM: ++ ++ .. raw:: html ++ ++
++ ++ ⚠️ First Boot Mode ++ ++ To ensure the next power-on operation results in a first-boot scenario for ++ cloud-init, it may be necessary to run the following command just before ++ powering off the VM: ++ ++ .. code-block:: bash ++ ++ cloud-init clean ++ ++ Otherwise cloud-init may not run in first-boot mode. For more information ++ on how the boot mode is determined, please see the ++ `First Boot Documentation <../boot.html#first-boot-determination>`_. ++ ++ .. raw:: html ++ ++
++ ++ .. code-block:: shell ++ ++ govc vm.power -off "${VM}" ++ ++#. ++ Export the environment variables that contain the cloud-init metadata and ++ userdata: ++ ++ .. code-block:: shell ++ ++ export METADATA=$(gzip -c9 /dev/null || base64; }) \ ++ USERDATA=$(gzip -c9 /dev/null || base64; }) ++ ++#. ++ Assign the metadata and userdata to the VM: ++ ++ .. code-block:: shell ++ ++ govc vm.change -vm "${VM}" \ ++ -e guestinfo.metadata="${METADATA}" \ ++ -e guestinfo.metadata.encoding="gzip+base64" \ ++ -e guestinfo.userdata="${USERDATA}" \ ++ -e guestinfo.userdata.encoding="gzip+base64" ++ ++ Please note the above commands include specifying the encoding for the ++ properties. This is important as it informs the datasource how to decode ++ the data for cloud-init. Valid values for ``metadata.encoding`` and ++ ``userdata.encoding`` include: ++ ++ ++ * ``base64`` ++ * ``gzip+base64`` ++ ++#. ++ Power on the VM: ++ ++ .. code-block:: shell ++ ++ govc vm.power -vm "${VM}" -on ++ ++If all went according to plan, the CentOS box is: ++ ++* Locked down, allowing SSH access only for the user in the userdata ++* Configured for a dynamic IP address via DHCP ++* Has a hostname of ``cloud-vm`` ++ ++Examples ++-------- ++ ++This section reviews common configurations: ++ ++Setting the hostname ++^^^^^^^^^^^^^^^^^^^^ ++ ++The hostname is set by way of the metadata key ``local-hostname``. ++ ++Setting the instance ID ++^^^^^^^^^^^^^^^^^^^^^^^ ++ ++The instance ID may be set by way of the metadata key ``instance-id``. However, ++if this value is absent then then the instance ID is read from the file ++``/sys/class/dmi/id/product_uuid``. ++ ++Providing public SSH keys ++^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++The public SSH keys may be set by way of the metadata key ``public-keys-data``. ++Each newline-terminated string will be interpreted as a separate SSH public ++key, which will be placed in distro's default user's ++``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will ++be written to ``~/.ssh/authorized_keys``. ++ ++Configuring the network ++^^^^^^^^^^^^^^^^^^^^^^^ ++ ++The network is configured by setting the metadata key ``network`` with a value ++consistent with Network Config Versions ++`1 <../network-config-format-v1.html>`_ or ++`2 <../network-config-format-v2.html>`_\ , depending on the Linux ++distro's version of cloud-init. ++ ++The metadata key ``network.encoding`` may be used to indicate the format of ++the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``. +diff --git a/requirements.txt b/requirements.txt +index 5817da3b..41d01d62 100644 +--- a/requirements.txt ++++ b/requirements.txt +@@ -32,3 +32,12 @@ jsonpatch + + # For validating cloud-config sections per schema definitions + jsonschema ++ ++# Used by DataSourceVMware to inspect the host's network configuration during ++# the "setup()" function. ++# ++# This allows a host that uses DHCP to bring up the network during BootLocal ++# and still participate in instance-data by gathering the network in detail at ++# runtime and merge that information into the metadata and repersist that to ++# disk. ++netifaces>=0.10.9 +diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py +index 5912f7ee..475a2cf8 100644 +--- a/tests/unittests/test_datasource/test_common.py ++++ b/tests/unittests/test_datasource/test_common.py +@@ -28,6 +28,7 @@ from cloudinit.sources import ( + DataSourceScaleway as Scaleway, + DataSourceSmartOS as SmartOS, + DataSourceUpCloud as UpCloud, ++ DataSourceVMware as VMware, + ) + from cloudinit.sources import DataSourceNone as DSNone + +@@ -50,6 +51,7 @@ DEFAULT_LOCAL = [ + RbxCloud.DataSourceRbxCloud, + Scaleway.DataSourceScaleway, + UpCloud.DataSourceUpCloudLocal, ++ VMware.DataSourceVMware, + ] + + DEFAULT_NETWORK = [ +@@ -66,6 +68,7 @@ DEFAULT_NETWORK = [ + OpenStack.DataSourceOpenStack, + OVF.DataSourceOVFNet, + UpCloud.DataSourceUpCloud, ++ VMware.DataSourceVMware, + ] + + +diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py +new file mode 100644 +index 00000000..597db7c8 +--- /dev/null ++++ b/tests/unittests/test_datasource/test_vmware.py +@@ -0,0 +1,377 @@ ++# Copyright (c) 2021 VMware, Inc. All Rights Reserved. ++# ++# Authors: Andrew Kutz ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++import base64 ++import gzip ++from cloudinit import dmi, helpers, safeyaml ++from cloudinit import settings ++from cloudinit.sources import DataSourceVMware ++from cloudinit.tests.helpers import ( ++ mock, ++ CiTestCase, ++ FilesystemMockingTestCase, ++ populate_dir, ++) ++ ++import os ++ ++PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" ++PRODUCT_NAME = "VMware7,1" ++PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" ++REROOT_FILES = { ++ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, ++ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, ++} ++ ++VMW_MULTIPLE_KEYS = [ ++ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", ++ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", ++] ++VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" ++ ++VMW_METADATA_YAML = """instance-id: cloud-vm ++local-hostname: cloud-vm ++network: ++ version: 2 ++ ethernets: ++ nics: ++ match: ++ name: ens* ++ dhcp4: yes ++""" ++ ++VMW_USERDATA_YAML = """## template: jinja ++#cloud-config ++users: ++- default ++""" ++ ++VMW_VENDORDATA_YAML = """## template: jinja ++#cloud-config ++runcmd: ++- echo "Hello, world." ++""" ++ ++ ++class TestDataSourceVMware(CiTestCase): ++ """ ++ Test common functionality that is not transport specific. ++ """ ++ ++ def setUp(self): ++ super(TestDataSourceVMware, self).setUp() ++ self.tmp = self.tmp_dir() ++ ++ def test_no_data_access_method(self): ++ ds = get_ds(self.tmp) ++ ds.vmware_rpctool = None ++ ret = ds.get_data() ++ self.assertFalse(ret) ++ ++ def test_get_host_info(self): ++ host_info = DataSourceVMware.get_host_info() ++ self.assertTrue(host_info) ++ self.assertTrue(host_info["hostname"]) ++ self.assertTrue(host_info["local-hostname"]) ++ self.assertTrue(host_info["local_hostname"]) ++ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) ++ ++ ++class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): ++ """ ++ Test the envvar transport. ++ """ ++ ++ def setUp(self): ++ super(TestDataSourceVMwareEnvVars, self).setUp() ++ self.tmp = self.tmp_dir() ++ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" ++ self.create_system_files() ++ ++ def tearDown(self): ++ del os.environ[DataSourceVMware.VMX_GUESTINFO] ++ return super(TestDataSourceVMwareEnvVars, self).tearDown() ++ ++ def create_system_files(self): ++ rootd = self.tmp_dir() ++ populate_dir( ++ rootd, ++ { ++ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, ++ }, ++ ) ++ self.assertTrue(self.reRoot(rootd)) ++ ++ def assert_get_data_ok(self, m_fn, m_fn_call_count=6): ++ ds = get_ds(self.tmp) ++ ds.vmware_rpctool = None ++ ret = ds.get_data() ++ self.assertTrue(ret) ++ self.assertEqual(m_fn_call_count, m_fn.call_count) ++ self.assertEqual( ++ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR ++ ) ++ return ds ++ ++ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): ++ ds = self.assert_get_data_ok(m_fn, m_fn_call_count) ++ assert_metadata(self, ds, metadata) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_subplatform(self, m_fn): ++ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] ++ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ self.assertEqual( ++ ds.subplatform, ++ "%s (%s)" ++ % ( ++ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, ++ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), ++ ), ++ ) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_metadata_only(self, m_fn): ++ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_userdata_only(self, m_fn): ++ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_vendordata_only(self, m_fn): ++ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_metadata_base64(self, m_fn): ++ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) ++ m_fn.side_effect = [data, "base64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_metadata_b64(self, m_fn): ++ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) ++ m_fn.side_effect = [data, "b64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_metadata_gzip_base64(self, m_fn): ++ data = VMW_METADATA_YAML.encode("utf-8") ++ data = gzip.compress(data) ++ data = base64.b64encode(data) ++ m_fn.side_effect = [data, "gzip+base64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_get_data_metadata_gz_b64(self, m_fn): ++ data = VMW_METADATA_YAML.encode("utf-8") ++ data = gzip.compress(data) ++ data = base64.b64encode(data) ++ m_fn.side_effect = [data, "gz+b64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_metadata_single_ssh_key(self, m_fn): ++ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) ++ metadata["public_keys"] = VMW_SINGLE_KEY ++ metadata_yaml = safeyaml.dumps(metadata) ++ m_fn.side_effect = [metadata_yaml, "", "", ""] ++ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) ++ ++ @mock.patch( ++ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" ++ ) ++ def test_metadata_multiple_ssh_keys(self, m_fn): ++ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) ++ metadata["public_keys"] = VMW_MULTIPLE_KEYS ++ metadata_yaml = safeyaml.dumps(metadata) ++ m_fn.side_effect = [metadata_yaml, "", "", ""] ++ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) ++ ++ ++class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): ++ """ ++ Test the guestinfo transport on a VMware platform. ++ """ ++ ++ def setUp(self): ++ super(TestDataSourceVMwareGuestInfo, self).setUp() ++ self.tmp = self.tmp_dir() ++ self.create_system_files() ++ ++ def create_system_files(self): ++ rootd = self.tmp_dir() ++ populate_dir( ++ rootd, ++ { ++ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, ++ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, ++ }, ++ ) ++ self.assertTrue(self.reRoot(rootd)) ++ ++ def assert_get_data_ok(self, m_fn, m_fn_call_count=6): ++ ds = get_ds(self.tmp) ++ ds.vmware_rpctool = "vmware-rpctool" ++ ret = ds.get_data() ++ self.assertTrue(ret) ++ self.assertEqual(m_fn_call_count, m_fn.call_count) ++ self.assertEqual( ++ ds.data_access_method, ++ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, ++ ) ++ return ds ++ ++ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): ++ ds = self.assert_get_data_ok(m_fn, m_fn_call_count) ++ assert_metadata(self, ds, metadata) ++ ++ def test_ds_valid_on_vmware_platform(self): ++ system_type = dmi.read_dmi_data("system-product-name") ++ self.assertEqual(system_type, PRODUCT_NAME) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_subplatform(self, m_fn): ++ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] ++ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ self.assertEqual( ++ ds.subplatform, ++ "%s (%s)" ++ % ( ++ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, ++ DataSourceVMware.get_guestinfo_key_name("metadata"), ++ ), ++ ) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_userdata_only(self, m_fn): ++ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_vendordata_only(self, m_fn): ++ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_metadata_single_ssh_key(self, m_fn): ++ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) ++ metadata["public_keys"] = VMW_SINGLE_KEY ++ metadata_yaml = safeyaml.dumps(metadata) ++ m_fn.side_effect = [metadata_yaml, "", "", ""] ++ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_metadata_multiple_ssh_keys(self, m_fn): ++ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) ++ metadata["public_keys"] = VMW_MULTIPLE_KEYS ++ metadata_yaml = safeyaml.dumps(metadata) ++ m_fn.side_effect = [metadata_yaml, "", "", ""] ++ self.assert_metadata(metadata, m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_metadata_base64(self, m_fn): ++ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) ++ m_fn.side_effect = [data, "base64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_metadata_b64(self, m_fn): ++ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) ++ m_fn.side_effect = [data, "b64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_metadata_gzip_base64(self, m_fn): ++ data = VMW_METADATA_YAML.encode("utf-8") ++ data = gzip.compress(data) ++ data = base64.b64encode(data) ++ m_fn.side_effect = [data, "gzip+base64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_get_data_metadata_gz_b64(self, m_fn): ++ data = VMW_METADATA_YAML.encode("utf-8") ++ data = gzip.compress(data) ++ data = base64.b64encode(data) ++ m_fn.side_effect = [data, "gz+b64", "", ""] ++ self.assert_get_data_ok(m_fn, m_fn_call_count=4) ++ ++ ++class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): ++ """ ++ Test the guestinfo transport on a non-VMware platform. ++ """ ++ ++ def setUp(self): ++ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() ++ self.tmp = self.tmp_dir() ++ self.create_system_files() ++ ++ def create_system_files(self): ++ rootd = self.tmp_dir() ++ populate_dir( ++ rootd, ++ { ++ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, ++ }, ++ ) ++ self.assertTrue(self.reRoot(rootd)) ++ ++ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") ++ def test_ds_invalid_on_non_vmware_platform(self, m_fn): ++ system_type = dmi.read_dmi_data("system-product-name") ++ self.assertEqual(system_type, None) ++ ++ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] ++ ds = get_ds(self.tmp) ++ ds.vmware_rpctool = "vmware-rpctool" ++ ret = ds.get_data() ++ self.assertFalse(ret) ++ ++ ++def assert_metadata(test_obj, ds, metadata): ++ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) ++ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) ++ ++ expected_public_keys = metadata.get("public_keys") ++ if not isinstance(expected_public_keys, list): ++ expected_public_keys = [expected_public_keys] ++ ++ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) ++ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) ++ ++ ++def get_ds(temp_dir): ++ ds = DataSourceVMware.DataSourceVMware( ++ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) ++ ) ++ ds.vmware_rpctool = "vmware-rpctool" ++ return ds ++ ++ ++# vi: ts=4 expandtab +diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py +index 1d8aaf18..8617d7bd 100644 +--- a/tests/unittests/test_ds_identify.py ++++ b/tests/unittests/test_ds_identify.py +@@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + ++ def test_vmware_no_valid_transports(self): ++ """VMware: no valid transports""" ++ self._test_ds_not_found('VMware-NoValidTransports') ++ ++ def test_vmware_envvar_no_data(self): ++ """VMware: envvar transport no data""" ++ self._test_ds_not_found('VMware-EnvVar-NoData') ++ ++ def test_vmware_envvar_no_virt_id(self): ++ """VMware: envvar transport success if no virt id""" ++ self._test_ds_found('VMware-EnvVar-NoVirtID') ++ ++ def test_vmware_envvar_activated_by_metadata(self): ++ """VMware: envvar transport activated by metadata""" ++ self._test_ds_found('VMware-EnvVar-Metadata') ++ ++ def test_vmware_envvar_activated_by_userdata(self): ++ """VMware: envvar transport activated by userdata""" ++ self._test_ds_found('VMware-EnvVar-Userdata') ++ ++ def test_vmware_envvar_activated_by_vendordata(self): ++ """VMware: envvar transport activated by vendordata""" ++ self._test_ds_found('VMware-EnvVar-Vendordata') ++ ++ def test_vmware_guestinfo_no_data(self): ++ """VMware: guestinfo transport no data""" ++ self._test_ds_not_found('VMware-GuestInfo-NoData') ++ ++ def test_vmware_guestinfo_no_virt_id(self): ++ """VMware: guestinfo transport fails if no virt id""" ++ self._test_ds_not_found('VMware-GuestInfo-NoVirtID') ++ ++ def test_vmware_guestinfo_activated_by_metadata(self): ++ """VMware: guestinfo transport activated by metadata""" ++ self._test_ds_found('VMware-GuestInfo-Metadata') ++ ++ def test_vmware_guestinfo_activated_by_userdata(self): ++ """VMware: guestinfo transport activated by userdata""" ++ self._test_ds_found('VMware-GuestInfo-Userdata') ++ ++ def test_vmware_guestinfo_activated_by_vendordata(self): ++ """VMware: guestinfo transport activated by vendordata""" ++ self._test_ds_found('VMware-GuestInfo-Vendordata') ++ + + class TestBSDNoSys(DsIdentifyBase): + """Test *BSD code paths +@@ -1136,7 +1180,240 @@ VALID_CFG = { + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, +- } ++ }, ++ 'VMware-NoValidTransports': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-EnvVar-NoData': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-EnvVar-NoVirtID': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ ], ++ }, ++ 'VMware-EnvVar-Metadata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-EnvVar-Userdata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-EnvVar-Vendordata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo', ++ 'ret': 0, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', ++ 'ret': 0, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-GuestInfo-NoData': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_rpctool', ++ 'ret': 0, ++ 'out': '/usr/bin/vmware-rpctool', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-GuestInfo-NoVirtID': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_rpctool', ++ 'ret': 0, ++ 'out': '/usr/bin/vmware-rpctool', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_metadata', ++ 'ret': 0, ++ 'out': '---', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ ], ++ }, ++ 'VMware-GuestInfo-Metadata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_rpctool', ++ 'ret': 0, ++ 'out': '/usr/bin/vmware-rpctool', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_metadata', ++ 'ret': 0, ++ 'out': '---', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-GuestInfo-Userdata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_rpctool', ++ 'ret': 0, ++ 'out': '/usr/bin/vmware-rpctool', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_userdata', ++ 'ret': 0, ++ 'out': '---', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_vendordata', ++ 'ret': 1, ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, ++ 'VMware-GuestInfo-Vendordata': { ++ 'ds': 'VMware', ++ 'mocks': [ ++ { ++ 'name': 'vmware_has_rpctool', ++ 'ret': 0, ++ 'out': '/usr/bin/vmware-rpctool', ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_metadata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_userdata', ++ 'ret': 1, ++ }, ++ { ++ 'name': 'vmware_rpctool_guestinfo_vendordata', ++ 'ret': 0, ++ 'out': '---', ++ }, ++ MOCK_VIRT_IS_VMWARE, ++ ], ++ }, + } + + # vi: ts=4 expandtab +diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers +index 689d7902..cbfa883c 100644 +--- a/tools/.github-cla-signers ++++ b/tools/.github-cla-signers +@@ -1,5 +1,6 @@ + ader1990 + ajmyyra ++akutz + AlexBaranowski + Aman306 + andrewbogott +diff --git a/tools/ds-identify b/tools/ds-identify +index 2f2486f7..c01eae3d 100755 +--- a/tools/ds-identify ++++ b/tools/ds-identify +@@ -125,7 +125,7 @@ DI_DSNAME="" + # be searched if there is no setting found in config. + DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ + CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ +-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" ++OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware" + DI_DSLIST="" + DI_MODE="" + DI_ON_FOUND="" +@@ -1350,6 +1350,80 @@ dscheck_IBMCloud() { + return ${DS_NOT_FOUND} + } + ++vmware_has_envvar_vmx_guestinfo() { ++ [ -n "${VMX_GUESTINFO:-}" ] ++} ++ ++vmware_has_envvar_vmx_guestinfo_metadata() { ++ [ -n "${VMX_GUESTINFO_METADATA:-}" ] ++} ++ ++vmware_has_envvar_vmx_guestinfo_userdata() { ++ [ -n "${VMX_GUESTINFO_USERDATA:-}" ] ++} ++ ++vmware_has_envvar_vmx_guestinfo_vendordata() { ++ [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ] ++} ++ ++vmware_has_rpctool() { ++ command -v vmware-rpctool >/dev/null 2>&1 ++} ++ ++vmware_rpctool_guestinfo_metadata() { ++ vmware-rpctool "info-get guestinfo.metadata" ++} ++ ++vmware_rpctool_guestinfo_userdata() { ++ vmware-rpctool "info-get guestinfo.userdata" ++} ++ ++vmware_rpctool_guestinfo_vendordata() { ++ vmware-rpctool "info-get guestinfo.vendordata" ++} ++ ++dscheck_VMware() { ++ # Checks to see if there is valid data for the VMware datasource. ++ # The data transports are checked in the following order: ++ # ++ # * envvars ++ # * guestinfo ++ # ++ # Please note when updating this function with support for new data ++ # transports, the order should match the order in the _get_data ++ # function from the file DataSourceVMware.py. ++ ++ # Check to see if running in a container and the VMware ++ # datasource is configured via environment variables. ++ if vmware_has_envvar_vmx_guestinfo; then ++ if vmware_has_envvar_vmx_guestinfo_metadata || \ ++ vmware_has_envvar_vmx_guestinfo_userdata || \ ++ vmware_has_envvar_vmx_guestinfo_vendordata; then ++ return "${DS_FOUND}" ++ fi ++ fi ++ ++ # Do not proceed unless the detected platform is VMware. ++ if [ ! "${DI_VIRT}" = "vmware" ]; then ++ return "${DS_NOT_FOUND}" ++ fi ++ ++ # Do not proceed if the vmware-rpctool command is not present. ++ if ! vmware_has_rpctool; then ++ return "${DS_NOT_FOUND}" ++ fi ++ ++ # Activate the VMware datasource only if any of the fields used ++ # by the datasource are present in the guestinfo table. ++ if { vmware_rpctool_guestinfo_metadata || \ ++ vmware_rpctool_guestinfo_userdata || \ ++ vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then ++ return "${DS_FOUND}" ++ fi ++ ++ return "${DS_NOT_FOUND}" ++} ++ + collect_info() { + read_uname_info + read_virt +-- +2.27.0 + diff --git a/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch b/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch new file mode 100644 index 0000000..a691f26 --- /dev/null +++ b/ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch @@ -0,0 +1,180 @@ +From b226448134b5182ba685702e7b7a486db772d956 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 4 Mar 2022 11:21:16 +0100 +Subject: [PATCH 1/2] - Detect a Python version change and clear the cache + (#857) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857) +RH-Commit: [1/2] c562cd802eabae9dc14079de0b26d471d2229ca8 +RH-Bugzilla: 1935826 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +commit 78e89b03ecb29e7df3181b1219a0b5f44b9d7532 +Author: Robert Schweikert +Date: Thu Jul 1 12:35:40 2021 -0400 + + - Detect a Python version change and clear the cache (#857) + + summary: Clear cache when a Python version change is detected + + When a distribution gets updated it is possible that the Python version + changes. Python makes no guarantee that pickle is consistent across + versions as such we need to purge the cache and start over. + + Co-authored-by: James Falcon +Conflicts: + tests/integration_tests/util.py: test is not present downstream + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/cmd/main.py | 30 ++++++++++ + cloudinit/cmd/tests/test_main.py | 2 + + .../assets/test_version_change.pkl | Bin 0 -> 21 bytes + .../modules/test_ssh_auth_key_fingerprints.py | 2 +- + .../modules/test_version_change.py | 56 ++++++++++++++++++ + 5 files changed, 89 insertions(+), 1 deletion(-) + create mode 100644 tests/integration_tests/assets/test_version_change.pkl + create mode 100644 tests/integration_tests/modules/test_version_change.py + +diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py +index baf1381f..21213a4a 100644 +--- a/cloudinit/cmd/main.py ++++ b/cloudinit/cmd/main.py +@@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None): + (cmdline_name, url, path)) + + ++def purge_cache_on_python_version_change(init): ++ """Purge the cache if python version changed on us. ++ ++ There could be changes not represented in our cache (obj.pkl) after we ++ upgrade to a new version of python, so at that point clear the cache ++ """ ++ current_python_version = '%d.%d' % ( ++ sys.version_info.major, sys.version_info.minor ++ ) ++ python_version_path = os.path.join( ++ init.paths.get_cpath('data'), 'python-version' ++ ) ++ if os.path.exists(python_version_path): ++ cached_python_version = open(python_version_path).read() ++ # The Python version has changed out from under us, anything that was ++ # pickled previously is likely useless due to API changes. ++ if cached_python_version != current_python_version: ++ LOG.debug('Python version change detected. Purging cache') ++ init.purge_cache(True) ++ util.write_file(python_version_path, current_python_version) ++ else: ++ if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): ++ LOG.info( ++ 'Writing python-version file. ' ++ 'Cache compatibility status is currently unknown.' ++ ) ++ util.write_file(python_version_path, current_python_version) ++ ++ + def main_init(name, args): + deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] + if args.local: +@@ -276,6 +305,7 @@ def main_init(name, args): + util.logexc(LOG, "Failed to initialize, likely bad things to come!") + # Stage 4 + path_helper = init.paths ++ purge_cache_on_python_version_change(init) + mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK + + if mode == sources.DSMODE_NETWORK: +diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py +index 78b27441..1f5975b0 100644 +--- a/cloudinit/cmd/tests/test_main.py ++++ b/cloudinit/cmd/tests/test_main.py +@@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') + + + class TestMain(FilesystemMockingTestCase): ++ with_logs = True ++ allowed_subp = False + + def setUp(self): + super(TestMain, self).setUp() +diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +index b9b0d85e..e1946cb1 100644 +--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py ++++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +@@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\ + no_ssh_fingerprints: true + """ + +-USER_DATA_SSH_AUTHKEY_ENABLE="""\ ++USER_DATA_SSH_AUTHKEY_ENABLE = """\ + #cloud-config + ssh_genkeytypes: + - ecdsa +diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py +new file mode 100644 +index 00000000..4e9ab63f +--- /dev/null ++++ b/tests/integration_tests/modules/test_version_change.py +@@ -0,0 +1,56 @@ ++from pathlib import Path ++ ++from tests.integration_tests.instances import IntegrationInstance ++from tests.integration_tests.util import ASSETS_DIR ++ ++ ++PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') ++TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' ++ ++ ++def _assert_no_pickle_problems(log): ++ assert 'Failed loading pickled blob' not in log ++ assert 'Traceback' not in log ++ assert 'WARN' not in log ++ ++ ++def test_reboot_without_version_change(client: IntegrationInstance): ++ log = client.read_from_file('/var/log/cloud-init.log') ++ assert 'Python version change detected' not in log ++ assert 'Cache compatibility status is currently unknown.' not in log ++ _assert_no_pickle_problems(log) ++ ++ client.restart() ++ log = client.read_from_file('/var/log/cloud-init.log') ++ assert 'Python version change detected' not in log ++ assert 'Could not determine Python version used to write cache' not in log ++ _assert_no_pickle_problems(log) ++ ++ # Now ensure that loading a bad pickle gives us problems ++ client.push_file(TEST_PICKLE, PICKLE_PATH) ++ client.restart() ++ log = client.read_from_file('/var/log/cloud-init.log') ++ assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log ++ ++ ++def test_cache_purged_on_version_change(client: IntegrationInstance): ++ # Start by pushing the invalid pickle so we'll hit an error if the ++ # cache didn't actually get purged ++ client.push_file(TEST_PICKLE, PICKLE_PATH) ++ client.execute("echo '1.0' > /var/lib/cloud/data/python-version") ++ client.restart() ++ log = client.read_from_file('/var/log/cloud-init.log') ++ assert 'Python version change detected. Purging cache' in log ++ _assert_no_pickle_problems(log) ++ ++ ++def test_log_message_on_missing_version_file(client: IntegrationInstance): ++ # Start by pushing a pickle so we can see the log message ++ client.push_file(TEST_PICKLE, PICKLE_PATH) ++ client.execute("rm /var/lib/cloud/data/python-version") ++ client.restart() ++ log = client.read_from_file('/var/log/cloud-init.log') ++ assert ( ++ 'Writing python-version file. ' ++ 'Cache compatibility status is currently unknown.' ++ ) in log +-- +2.31.1 + diff --git a/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch b/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch new file mode 100644 index 0000000..d4ec623 --- /dev/null +++ b/ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch @@ -0,0 +1,474 @@ +From 7bd016008429f0a18393a070d88e669f3ed89caa Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 11 Feb 2022 14:37:46 +0100 +Subject: [PATCH] Fix IPv6 netmask format for sysconfig (#1215) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 48: Fix IPv6 netmask format for sysconfig (#1215) +RH-Commit: [1/1] 4c940bbcf85dba1fce9f4acb9fc7820c0d7777f6 +RH-Bugzilla: 2046540 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov + +commit b97a30f0a05c1dea918c46ca9c05c869d15fe2d5 +Author: Harald +Date: Tue Feb 8 15:49:00 2022 +0100 + + Fix IPv6 netmask format for sysconfig (#1215) + + This change converts the IPv6 netmask from the network_data.json[1] + format to the CIDR style, /. + + Using an IPv6 address like ffff:ffff:ffff:ffff:: does not work with + NetworkManager, nor networkscripts. + + NetworkManager will ignore the route, logging: + ifcfg-rh: ignoring invalid route at \ + "::/:: via fd00:fd00:fd00:2::fffe dev $DEV" \ + (/etc/sysconfig/network-scripts/route6-$DEV:3): \ + Argument for "::/::" is not ADDR/PREFIX format + + Similarly if using networkscripts, ip route fail with error: + Error: inet6 prefix is expected rather than \ + "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". + + Also a bit of refactoring ... + + cloudinit.net.sysconfig.Route.to_string: + * Move a couple of lines around to reduce repeated code. + * if "ADDRESS" not in key -> continute, so that the + code block following it can be de-indented. + cloudinit.net.network_state: + * Refactors the ipv4_mask_to_net_prefix, ipv6_mask_to_net_prefix + removes mask_to_net_prefix methods. Utilize ipaddress library to + do some of the heavy lifting. + + LP: #1959148 + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/net/__init__.py | 7 +- + cloudinit/net/network_state.py | 103 +++++++----------- + cloudinit/net/sysconfig.py | 91 ++++++++++------ + cloudinit/sources/DataSourceOpenNebula.py | 2 +- + .../sources/helpers/vmware/imc/config_nic.py | 4 +- + tests/unittests/test_net.py | 78 ++++++++++++- + 6 files changed, 176 insertions(+), 109 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index 003efa2a..12bf64de 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -14,7 +14,7 @@ import re + + from cloudinit import subp + from cloudinit import util +-from cloudinit.net.network_state import mask_to_net_prefix ++from cloudinit.net.network_state import ipv4_mask_to_net_prefix + from cloudinit.url_helper import UrlError, readurl + + LOG = logging.getLogger(__name__) +@@ -1048,10 +1048,11 @@ class EphemeralIPv4Network(object): + 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format( + interface, ip, prefix_or_mask, broadcast)) + try: +- self.prefix = mask_to_net_prefix(prefix_or_mask) ++ self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask) + except ValueError as e: + raise ValueError( +- 'Cannot setup network: {0}'.format(e) ++ "Cannot setup network, invalid prefix or " ++ "netmask: {0}".format(e) + ) from e + + self.connectivity_url = connectivity_url +diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py +index e8bf9e39..2768ef94 100644 +--- a/cloudinit/net/network_state.py ++++ b/cloudinit/net/network_state.py +@@ -6,6 +6,7 @@ + + import copy + import functools ++import ipaddress + import logging + import socket + import struct +@@ -872,12 +873,18 @@ def _normalize_net_keys(network, address_keys=()): + try: + prefix = int(maybe_prefix) + except ValueError: +- # this supports input of
/255.255.255.0 +- prefix = mask_to_net_prefix(maybe_prefix) +- elif netmask: +- prefix = mask_to_net_prefix(netmask) +- elif 'prefix' in net: +- prefix = int(net['prefix']) ++ if ipv6: ++ # this supports input of ffff:ffff:ffff:: ++ prefix = ipv6_mask_to_net_prefix(maybe_prefix) ++ else: ++ # this supports input of 255.255.255.0 ++ prefix = ipv4_mask_to_net_prefix(maybe_prefix) ++ elif netmask and not ipv6: ++ prefix = ipv4_mask_to_net_prefix(netmask) ++ elif netmask and ipv6: ++ prefix = ipv6_mask_to_net_prefix(netmask) ++ elif "prefix" in net: ++ prefix = int(net["prefix"]) + else: + prefix = 64 if ipv6 else 24 + +@@ -972,72 +979,42 @@ def ipv4_mask_to_net_prefix(mask): + str(24) => 24 + "24" => 24 + """ +- if isinstance(mask, int): +- return mask +- if isinstance(mask, str): +- try: +- return int(mask) +- except ValueError: +- pass +- else: +- raise TypeError("mask '%s' is not a string or int") +- +- if '.' not in mask: +- raise ValueError("netmask '%s' does not contain a '.'" % mask) +- +- toks = mask.split(".") +- if len(toks) != 4: +- raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks))) +- +- return sum([bin(int(x)).count('1') for x in toks]) ++ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen + + + def ipv6_mask_to_net_prefix(mask): + """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix. + +- If 'mask' is an integer or string representation of one then +- int(mask) will be returned. ++ If the input is already an integer or a string representation of ++ an integer, then int(mask) will be returned. ++ "ffff:ffff:ffff::" => 48 ++ "48" => 48 + """ +- +- if isinstance(mask, int): +- return mask +- if isinstance(mask, str): +- try: +- return int(mask) +- except ValueError: +- pass +- else: +- raise TypeError("mask '%s' is not a string or int") +- +- if ':' not in mask: +- raise ValueError("mask '%s' does not have a ':'") +- +- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, +- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc, +- 0xfffe, 0xffff] +- prefix = 0 +- for word in mask.split(':'): +- if not word or int(word, 16) == 0: +- break +- prefix += bitCount.index(int(word, 16)) +- +- return prefix +- +- +-def mask_to_net_prefix(mask): +- """Return the network prefix for the netmask provided. +- +- Supports ipv4 or ipv6 netmasks.""" + try: +- # if 'mask' is a prefix that is an integer. +- # then just return it. +- return int(mask) ++ # In the case the mask is already a prefix ++ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen ++ return prefixlen + except ValueError: ++ # ValueError means mask is an IPv6 address representation and need ++ # conversion. + pass +- if is_ipv6_addr(mask): +- return ipv6_mask_to_net_prefix(mask) +- else: +- return ipv4_mask_to_net_prefix(mask) ++ ++ netmask = ipaddress.ip_address(mask) ++ mask_int = int(netmask) ++ # If the mask is all zeroes, just return it ++ if mask_int == 0: ++ return mask_int ++ ++ trailing_zeroes = min( ++ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length() ++ ) ++ leading_ones = mask_int >> trailing_zeroes ++ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes ++ all_ones = (1 << prefixlen) - 1 ++ if leading_ones != all_ones: ++ raise ValueError("Invalid network mask '%s'" % mask) ++ ++ return prefixlen + + + def mask_and_ipv4_to_bcast_addr(mask, ip): +diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py +index d5440998..7ecbe1c3 100644 +--- a/cloudinit/net/sysconfig.py ++++ b/cloudinit/net/sysconfig.py +@@ -12,6 +12,7 @@ from cloudinit import util + from cloudinit import subp + from cloudinit.distros.parsers import networkmanager_conf + from cloudinit.distros.parsers import resolv_conf ++from cloudinit.net import network_state + + from . import renderer + from .network_state import ( +@@ -171,43 +172,61 @@ class Route(ConfigMap): + # (because Route can contain a mix of IPv4 and IPv6) + reindex = -1 + for key in sorted(self._conf.keys()): +- if 'ADDRESS' in key: +- index = key.replace('ADDRESS', '') +- address_value = str(self._conf[key]) +- # only accept combinations: +- # if proto ipv6 only display ipv6 routes +- # if proto ipv4 only display ipv4 routes +- # do not add ipv6 routes if proto is ipv4 +- # do not add ipv4 routes if proto is ipv6 +- # (this array will contain a mix of ipv4 and ipv6) +- if proto == "ipv4" and not self.is_ipv6_route(address_value): +- netmask_value = str(self._conf['NETMASK' + index]) +- gateway_value = str(self._conf['GATEWAY' + index]) +- # increase IPv4 index +- reindex = reindex + 1 +- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), +- _quote_value(address_value))) +- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), +- _quote_value(gateway_value))) +- buf.write("%s=%s\n" % ('NETMASK' + str(reindex), +- _quote_value(netmask_value))) +- metric_key = 'METRIC' + index +- if metric_key in self._conf: +- metric_value = str(self._conf['METRIC' + index]) +- buf.write("%s=%s\n" % ('METRIC' + str(reindex), +- _quote_value(metric_value))) +- elif proto == "ipv6" and self.is_ipv6_route(address_value): +- netmask_value = str(self._conf['NETMASK' + index]) +- gateway_value = str(self._conf['GATEWAY' + index]) +- metric_value = ( +- 'metric ' + str(self._conf['METRIC' + index]) +- if 'METRIC' + index in self._conf else '') ++ if "ADDRESS" not in key: ++ continue ++ ++ index = key.replace("ADDRESS", "") ++ address_value = str(self._conf[key]) ++ netmask_value = str(self._conf["NETMASK" + index]) ++ gateway_value = str(self._conf["GATEWAY" + index]) ++ ++ # only accept combinations: ++ # if proto ipv6 only display ipv6 routes ++ # if proto ipv4 only display ipv4 routes ++ # do not add ipv6 routes if proto is ipv4 ++ # do not add ipv4 routes if proto is ipv6 ++ # (this array will contain a mix of ipv4 and ipv6) ++ if proto == "ipv4" and not self.is_ipv6_route(address_value): ++ # increase IPv4 index ++ reindex = reindex + 1 ++ buf.write( ++ "%s=%s\n" ++ % ("ADDRESS" + str(reindex), _quote_value(address_value)) ++ ) ++ buf.write( ++ "%s=%s\n" ++ % ("GATEWAY" + str(reindex), _quote_value(gateway_value)) ++ ) ++ buf.write( ++ "%s=%s\n" ++ % ("NETMASK" + str(reindex), _quote_value(netmask_value)) ++ ) ++ metric_key = "METRIC" + index ++ if metric_key in self._conf: ++ metric_value = str(self._conf["METRIC" + index]) + buf.write( +- "%s/%s via %s %s dev %s\n" % (address_value, +- netmask_value, +- gateway_value, +- metric_value, +- self._route_name)) ++ "%s=%s\n" ++ % ("METRIC" + str(reindex), _quote_value(metric_value)) ++ ) ++ elif proto == "ipv6" and self.is_ipv6_route(address_value): ++ prefix_value = network_state.ipv6_mask_to_net_prefix( ++ netmask_value ++ ) ++ metric_value = ( ++ "metric " + str(self._conf["METRIC" + index]) ++ if "METRIC" + index in self._conf ++ else "" ++ ) ++ buf.write( ++ "%s/%s via %s %s dev %s\n" ++ % ( ++ address_value, ++ prefix_value, ++ gateway_value, ++ metric_value, ++ self._route_name, ++ ) ++ ) + + return buf.getvalue() + +diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py +index 730ec586..e7980ab1 100644 +--- a/cloudinit/sources/DataSourceOpenNebula.py ++++ b/cloudinit/sources/DataSourceOpenNebula.py +@@ -233,7 +233,7 @@ class OpenNebulaNetwork(object): + # Set IPv4 address + devconf['addresses'] = [] + mask = self.get_mask(c_dev) +- prefix = str(net.mask_to_net_prefix(mask)) ++ prefix = str(net.ipv4_mask_to_net_prefix(mask)) + devconf['addresses'].append( + self.get_ip(c_dev, mac) + '/' + prefix) + +diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py +index 9cd2c0c0..3a45c67e 100644 +--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py ++++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py +@@ -9,7 +9,7 @@ import logging + import os + import re + +-from cloudinit.net.network_state import mask_to_net_prefix ++from cloudinit.net.network_state import ipv4_mask_to_net_prefix + from cloudinit import subp + from cloudinit import util + +@@ -180,7 +180,7 @@ class NicConfigurator(object): + """ + route_list = [] + +- cidr = mask_to_net_prefix(netmask) ++ cidr = ipv4_mask_to_net_prefix(netmask) + + for gateway in gateways: + destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr) +diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py +index 14d3462f..a7f6a1f7 100644 +--- a/tests/unittests/test_net.py ++++ b/tests/unittests/test_net.py +@@ -2025,10 +2025,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 +- netmask: ffff:ffff:0 ++ netmask: "ffff:ffff::" + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 +- netmask: ffff:ffff:0 ++ netmask: "ffff:ffff::" + metric: 10000 + """), + 'expected_netplan': textwrap.dedent(""" +@@ -2295,8 +2295,8 @@ iface bond0 inet6 static + 'route6-bond0': textwrap.dedent("""\ + # Created by cloud-init on instance boot automatically, do not edit. + # +- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0 +- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0 ++ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0 ++ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0 + """), + 'route-bond0': textwrap.dedent("""\ + ADDRESS0=10.1.3.0 +@@ -3088,6 +3088,76 @@ USERCTL=no + renderer.render_network_state(ns, target=render_dir) + self.assertEqual([], os.listdir(render_dir)) + ++ def test_invalid_network_mask_ipv6(self): ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [ ++ { ++ "network_id": "public-ipv6", ++ "type": "ipv6", ++ "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ { ++ "gateway": "2001:DB8::1", ++ "netmask": "ff:ff:ff:ff::", ++ "network": "2001:DB8:1::1", ++ }, ++ ], ++ "ip_address": "2001:DB8::10", ++ "id": "network1", ++ } ++ ], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, ++ "type": "bridge", ++ "id": "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", ++ }, ++ ], ++ } ++ macs = {"fa:16:3e:ed:9a:59": "eth0"} ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ with self.assertRaises(ValueError): ++ network_state.parse_net_config_data(network_cfg, skip_broken=False) ++ ++ def test_invalid_network_mask_ipv4(self): ++ net_json = { ++ "services": [{"type": "dns", "address": "172.19.0.12"}], ++ "networks": [ ++ { ++ "network_id": "public-ipv4", ++ "type": "ipv4", ++ "netmask": "", ++ "link": "tap1a81968a-79", ++ "routes": [ ++ { ++ "gateway": "172.20.0.1", ++ "netmask": "255.234.255.0", ++ "network": "172.19.0.0", ++ }, ++ ], ++ "ip_address": "172.20.0.10", ++ "id": "network1", ++ } ++ ], ++ "links": [ ++ { ++ "ethernet_mac_address": "fa:16:3e:ed:9a:59", ++ "mtu": None, ++ "type": "bridge", ++ "id": "tap1a81968a-79", ++ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", ++ }, ++ ], ++ } ++ macs = {"fa:16:3e:ed:9a:59": "eth0"} ++ network_cfg = openstack.convert_net_json(net_json, known_macs=macs) ++ with self.assertRaises(ValueError): ++ network_state.parse_net_config_data(network_cfg, skip_broken=False) ++ + def test_openstack_rendering_samples(self): + for os_sample in OS_SAMPLES: + render_dir = self.tmp_dir() +-- +2.27.0 + diff --git a/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch b/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch new file mode 100644 index 0000000..889b8db --- /dev/null +++ b/ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch @@ -0,0 +1,705 @@ +From 04a4cc7b8da04ba4103118cf9d975d8e9548e0dc Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 4 Mar 2022 11:23:22 +0100 +Subject: [PATCH 2/2] Fix MIME policy failure on python version upgrade (#934) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 54: - Detect a Python version change and clear the cache (#857) +RH-Commit: [2/2] 05fc8c52a39b5ad464ad146488703467e39d73b1 +RH-Bugzilla: 1935826 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +commit eacb0353803263934aa2ac827c37e461c87cb107 +Author: James Falcon +Date: Thu Jul 15 17:52:21 2021 -0500 + + Fix MIME policy failure on python version upgrade (#934) + + Python 3.6 added a new `policy` attribute to `MIMEMultipart`. + MIMEMultipart may be part of the cached object pickle of a datasource. + Upgrading from an old version of python to 3.6+ will cause the + datasource to be invalid after pickle load. + + This commit uses the upgrade framework to attempt to access the mime + message and fail early (thus discarding the cache) if we cannot. + Commit 78e89b03 should fix this issue more generally. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/sources/__init__.py | 18 + + cloudinit/stages.py | 2 + + .../assets/trusty_with_mime.pkl | 572 ++++++++++++++++++ + .../modules/test_persistence.py | 30 + + 4 files changed, 622 insertions(+) + create mode 100644 tests/integration_tests/assets/trusty_with_mime.pkl + create mode 100644 tests/integration_tests/modules/test_persistence.py + +diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py +index 7d74f8d9..338861e6 100644 +--- a/cloudinit/sources/__init__.py ++++ b/cloudinit/sources/__init__.py +@@ -74,6 +74,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource', + _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) + + ++class DatasourceUnpickleUserDataError(Exception): ++ """Raised when userdata is unable to be unpickled due to python upgrades""" ++ ++ + class DataSourceNotFoundException(Exception): + pass + +@@ -227,6 +231,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): + self.vendordata2 = None + if not hasattr(self, 'vendordata2_raw'): + self.vendordata2_raw = None ++ if hasattr(self, 'userdata') and self.userdata is not None: ++ # If userdata stores MIME data, on < python3.6 it will be ++ # missing the 'policy' attribute that exists on >=python3.6. ++ # Calling str() on the userdata will attempt to access this ++ # policy attribute. This will raise an exception, causing ++ # the pickle load to fail, so cloud-init will discard the cache ++ try: ++ str(self.userdata) ++ except AttributeError as e: ++ LOG.debug( ++ "Unable to unpickle datasource: %s." ++ " Ignoring current cache.", e ++ ) ++ raise DatasourceUnpickleUserDataError() from e + + def __str__(self): + return type_utils.obj_name(self) +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index 83e25dd1..e709a5cf 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -980,6 +980,8 @@ def _pkl_load(fname): + return None + try: + return pickle.loads(pickle_contents) ++ except sources.DatasourceUnpickleUserDataError: ++ return None + except Exception: + util.logexc(LOG, "Failed loading pickled blob from %s", fname) + return None +diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl +new file mode 100644 +index 00000000..a4089ecf +--- /dev/null ++++ b/tests/integration_tests/assets/trusty_with_mime.pkl +@@ -0,0 +1,572 @@ ++ccopy_reg ++_reconstructor ++p1 ++(ccloudinit.sources.DataSourceNoCloud ++DataSourceNoCloudNet ++p2 ++c__builtin__ ++object ++p3 ++NtRp4 ++(dp5 ++S'paths' ++p6 ++g1 ++(ccloudinit.helpers ++Paths ++p7 ++g3 ++NtRp8 ++(dp9 ++S'lookups' ++p10 ++(dp11 ++S'cloud_config' ++p12 ++S'cloud-config.txt' ++p13 ++sS'userdata' ++p14 ++S'user-data.txt.i' ++p15 ++sS'vendordata' ++p16 ++S'vendor-data.txt.i' ++p17 ++sS'userdata_raw' ++p18 ++S'user-data.txt' ++p19 ++sS'boothooks' ++p20 ++g20 ++sS'scripts' ++p21 ++g21 ++sS'sem' ++p22 ++g22 ++sS'data' ++p23 ++g23 ++sS'vendor_scripts' ++p24 ++S'scripts/vendor' ++p25 ++sS'handlers' ++p26 ++g26 ++sS'obj_pkl' ++p27 ++S'obj.pkl' ++p28 ++sS'vendordata_raw' ++p29 ++S'vendor-data.txt' ++p30 ++sS'vendor_cloud_config' ++p31 ++S'vendor-cloud-config.txt' ++p32 ++ssS'template_tpl' ++p33 ++S'/etc/cloud/templates/%s.tmpl' ++p34 ++sS'cfgs' ++p35 ++(dp36 ++S'cloud_dir' ++p37 ++S'/var/lib/cloud/' ++p38 ++sS'templates_dir' ++p39 ++S'/etc/cloud/templates/' ++p40 ++sS'upstart_dir' ++p41 ++S'/etc/init/' ++p42 ++ssS'cloud_dir' ++p43 ++g38 ++sS'datasource' ++p44 ++NsS'upstart_conf_d' ++p45 ++g42 ++sS'boot_finished' ++p46 ++S'/var/lib/cloud/instance/boot-finished' ++p47 ++sS'instance_link' ++p48 ++S'/var/lib/cloud/instance' ++p49 ++sS'seed_dir' ++p50 ++S'/var/lib/cloud/seed' ++p51 ++sbsS'supported_seed_starts' ++p52 ++(S'http://' ++p53 ++S'https://' ++p54 ++S'ftp://' ++p55 ++tp56 ++sS'sys_cfg' ++p57 ++(dp58 ++S'output' ++p59 ++(dp60 ++S'all' ++p61 ++S'| tee -a /var/log/cloud-init-output.log' ++p62 ++ssS'users' ++p63 ++(lp64 ++S'default' ++p65 ++asS'def_log_file' ++p66 ++S'/var/log/cloud-init.log' ++p67 ++sS'cloud_final_modules' ++p68 ++(lp69 ++S'rightscale_userdata' ++p70 ++aS'scripts-vendor' ++p71 ++aS'scripts-per-once' ++p72 ++aS'scripts-per-boot' ++p73 ++aS'scripts-per-instance' ++p74 ++aS'scripts-user' ++p75 ++aS'ssh-authkey-fingerprints' ++p76 ++aS'keys-to-console' ++p77 ++aS'phone-home' ++p78 ++aS'final-message' ++p79 ++aS'power-state-change' ++p80 ++asS'disable_root' ++p81 ++I01 ++sS'syslog_fix_perms' ++p82 ++S'syslog:adm' ++p83 ++sS'log_cfgs' ++p84 ++(lp85 ++(lp86 ++S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n' ++p87 ++aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n' ++p88 ++aa(lp89 ++g87 ++aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" ++p90 ++aasS'cloud_init_modules' ++p91 ++(lp92 ++S'migrator' ++p93 ++aS'seed_random' ++p94 ++aS'bootcmd' ++p95 ++aS'write-files' ++p96 ++aS'growpart' ++p97 ++aS'resizefs' ++p98 ++aS'set_hostname' ++p99 ++aS'update_hostname' ++p100 ++aS'update_etc_hosts' ++p101 ++aS'ca-certs' ++p102 ++aS'rsyslog' ++p103 ++aS'users-groups' ++p104 ++aS'ssh' ++p105 ++asS'preserve_hostname' ++p106 ++I00 ++sS'_log' ++p107 ++(lp108 ++g87 ++ag90 ++ag88 ++asS'datasource_list' ++p109 ++(lp110 ++S'NoCloud' ++p111 ++aS'ConfigDrive' ++p112 ++aS'OpenNebula' ++p113 ++aS'Azure' ++p114 ++aS'AltCloud' ++p115 ++aS'OVF' ++p116 ++aS'MAAS' ++p117 ++aS'GCE' ++p118 ++aS'OpenStack' ++p119 ++aS'CloudSigma' ++p120 ++aS'Ec2' ++p121 ++aS'CloudStack' ++p122 ++aS'SmartOS' ++p123 ++aS'None' ++p124 ++asS'vendor_data' ++p125 ++(dp126 ++S'prefix' ++p127 ++(lp128 ++sS'enabled' ++p129 ++I01 ++ssS'cloud_config_modules' ++p130 ++(lp131 ++S'emit_upstart' ++p132 ++aS'disk_setup' ++p133 ++aS'mounts' ++p134 ++aS'ssh-import-id' ++p135 ++aS'locale' ++p136 ++aS'set-passwords' ++p137 ++aS'grub-dpkg' ++p138 ++aS'apt-pipelining' ++p139 ++aS'apt-configure' ++p140 ++aS'package-update-upgrade-install' ++p141 ++aS'landscape' ++p142 ++aS'timezone' ++p143 ++aS'puppet' ++p144 ++aS'chef' ++p145 ++aS'salt-minion' ++p146 ++aS'mcollective' ++p147 ++aS'disable-ec2-metadata' ++p148 ++aS'runcmd' ++p149 ++aS'byobu' ++p150 ++assg14 ++(iemail.mime.multipart ++MIMEMultipart ++p151 ++(dp152 ++S'_headers' ++p153 ++(lp154 ++(S'Content-Type' ++p155 ++S'multipart/mixed; boundary="===============4291038100093149247=="' ++tp156 ++a(S'MIME-Version' ++p157 ++S'1.0' ++p158 ++tp159 ++a(S'Number-Attachments' ++p160 ++S'1' ++tp161 ++asS'_payload' ++p162 ++(lp163 ++(iemail.mime.base ++MIMEBase ++p164 ++(dp165 ++g153 ++(lp166 ++(g157 ++g158 ++tp167 ++a(S'Content-Type' ++p168 ++S'text/x-not-multipart' ++tp169 ++a(S'Content-Disposition' ++p170 ++S'attachment; filename="part-001"' ++tp171 ++asg162 ++S'' ++sS'_charset' ++p172 ++NsS'_default_type' ++p173 ++S'text/plain' ++p174 ++sS'preamble' ++p175 ++NsS'defects' ++p176 ++(lp177 ++sS'_unixfrom' ++p178 ++NsS'epilogue' ++p179 ++Nsbasg172 ++Nsg173 ++g174 ++sg175 ++Nsg176 ++(lp180 ++sg178 ++Nsg179 ++Nsbsg16 ++S'#cloud-config\n{}\n\n' ++p181 ++sg18 ++S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n' ++p182 ++sg29 ++NsS'dsmode' ++p183 ++S'net' ++p184 ++sS'seed' ++p185 ++S'/var/lib/cloud/seed/nocloud-net' ++p186 ++sS'cmdline_id' ++p187 ++S'ds=nocloud-net' ++p188 ++sS'ud_proc' ++p189 ++g1 ++(ccloudinit.user_data ++UserDataProcessor ++p190 ++g3 ++NtRp191 ++(dp192 ++g6 ++g8 ++sS'ssl_details' ++p193 ++(dp194 ++sbsg50 ++g186 ++sS'ds_cfg' ++p195 ++(dp196 ++sS'distro' ++p197 ++g1 ++(ccloudinit.distros.ubuntu ++Distro ++p198 ++g3 ++NtRp199 ++(dp200 ++S'osfamily' ++p201 ++S'debian' ++p202 ++sS'_paths' ++p203 ++g8 ++sS'name' ++p204 ++S'ubuntu' ++p205 ++sS'_runner' ++p206 ++g1 ++(ccloudinit.helpers ++Runners ++p207 ++g3 ++NtRp208 ++(dp209 ++g6 ++g8 ++sS'sems' ++p210 ++(dp211 ++sbsS'_cfg' ++p212 ++(dp213 ++S'paths' ++p214 ++(dp215 ++g37 ++g38 ++sg39 ++g40 ++sg41 ++g42 ++ssS'default_user' ++p216 ++(dp217 ++S'shell' ++p218 ++S'/bin/bash' ++p219 ++sS'name' ++p220 ++S'ubuntu' ++p221 ++sS'sudo' ++p222 ++(lp223 ++S'ALL=(ALL) NOPASSWD:ALL' ++p224 ++asS'lock_passwd' ++p225 ++I01 ++sS'gecos' ++p226 ++S'Ubuntu' ++p227 ++sS'groups' ++p228 ++(lp229 ++S'adm' ++p230 ++aS'audio' ++p231 ++aS'cdrom' ++p232 ++aS'dialout' ++p233 ++aS'dip' ++p234 ++aS'floppy' ++p235 ++aS'netdev' ++p236 ++aS'plugdev' ++p237 ++aS'sudo' ++p238 ++aS'video' ++p239 ++assS'package_mirrors' ++p240 ++(lp241 ++(dp242 ++S'arches' ++p243 ++(lp244 ++S'i386' ++p245 ++aS'amd64' ++p246 ++asS'failsafe' ++p247 ++(dp248 ++S'security' ++p249 ++S'http://security.ubuntu.com/ubuntu' ++p250 ++sS'primary' ++p251 ++S'http://archive.ubuntu.com/ubuntu' ++p252 ++ssS'search' ++p253 ++(dp254 ++S'security' ++p255 ++(lp256 ++sS'primary' ++p257 ++(lp258 ++S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/' ++p259 ++aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/' ++p260 ++aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/' ++p261 ++assa(dp262 ++S'arches' ++p263 ++(lp264 ++S'armhf' ++p265 ++aS'armel' ++p266 ++aS'default' ++p267 ++asS'failsafe' ++p268 ++(dp269 ++S'security' ++p270 ++S'http://ports.ubuntu.com/ubuntu-ports' ++p271 ++sS'primary' ++p272 ++S'http://ports.ubuntu.com/ubuntu-ports' ++p273 ++ssasS'ssh_svcname' ++p274 ++S'ssh' ++p275 ++ssbsS'metadata' ++p276 ++(dp277 ++g183 ++g184 ++sS'local-hostname' ++p278 ++S'me' ++p279 ++sS'instance-id' ++p280 ++S'me' ++p281 ++ssb. +\ No newline at end of file +diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py +new file mode 100644 +index 00000000..00fdeaea +--- /dev/null ++++ b/tests/integration_tests/modules/test_persistence.py +@@ -0,0 +1,30 @@ ++# This file is part of cloud-init. See LICENSE file for license information. ++"""Test the behavior of loading/discarding pickle data""" ++from pathlib import Path ++ ++import pytest ++ ++from tests.integration_tests.instances import IntegrationInstance ++from tests.integration_tests.util import ( ++ ASSETS_DIR, ++ verify_ordered_items_in_text, ++) ++ ++ ++PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') ++TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl' ++ ++ ++@pytest.mark.lxd_container ++def test_log_message_on_missing_version_file(client: IntegrationInstance): ++ client.push_file(TEST_PICKLE, PICKLE_PATH) ++ client.restart() ++ assert client.execute('cloud-init status --wait').ok ++ log = client.read_from_file('/var/log/cloud-init.log') ++ verify_ordered_items_in_text([ ++ "Unable to unpickle datasource: 'MIMEMultipart' object has no " ++ "attribute 'policy'. Ignoring current cache.", ++ 'no cache found', ++ 'Searching for local data source', ++ 'SUCCESS: found local data from DataSourceNoCloud' ++ ], log) +-- +2.31.1 + diff --git a/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch b/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch new file mode 100644 index 0000000..6a9cfcc --- /dev/null +++ b/ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch @@ -0,0 +1,262 @@ +From 71989367e7a634fdd2af8ef58473975e0ef60464 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Sat, 21 Aug 2021 13:53:27 +0200 +Subject: [PATCH] Fix home permissions modified by ssh module (SC-338) (#984) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 29: Fix home permissions modified by ssh module (SC-338) (#984) +RH-Commit: [1/1] c409f2609b1d7e024eba77b55a196a4cafadd1d7 (eesposit/cloud-init) +RH-Bugzilla: 1995840 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +TESTED: By me and QA +BREW: 39178090 + +Fix home permissions modified by ssh module (SC-338) (#984) + +commit 7d3f5d750f6111c2716143364ea33486df67c927 +Author: James Falcon +Date: Fri Aug 20 17:09:49 2021 -0500 + + Fix home permissions modified by ssh module (SC-338) (#984) + + Fix home permissions modified by ssh module + + In #956, we updated the file and directory permissions for keys not in + the user's home directory. We also unintentionally modified the + permissions within the home directory as well. These should not change, + and this commit changes that back. + + LP: #1940233 + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/ssh_util.py | 35 ++++- + .../modules/test_ssh_keysfile.py | 132 +++++++++++++++--- + 2 files changed, 146 insertions(+), 21 deletions(-) + +diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py +index b8a3c8f7..9ccadf09 100644 +--- a/cloudinit/ssh_util.py ++++ b/cloudinit/ssh_util.py +@@ -321,23 +321,48 @@ def check_create_path(username, filename, strictmodes): + home_folder = os.path.dirname(user_pwent.pw_dir) + for directory in directories: + parent_folder += "/" + directory +- if home_folder.startswith(parent_folder): ++ ++ # security check, disallow symlinks in the AuthorizedKeysFile path. ++ if os.path.islink(parent_folder): ++ LOG.debug( ++ "Invalid directory. Symlink exists in path: %s", ++ parent_folder) ++ return False ++ ++ if os.path.isfile(parent_folder): ++ LOG.debug( ++ "Invalid directory. File exists in path: %s", ++ parent_folder) ++ return False ++ ++ if (home_folder.startswith(parent_folder) or ++ parent_folder == user_pwent.pw_dir): + continue + +- if not os.path.isdir(parent_folder): ++ if not os.path.exists(parent_folder): + # directory does not exist, and permission so far are good: + # create the directory, and make it accessible by everyone + # but owned by root, as it might be used by many users. + with util.SeLinuxGuard(parent_folder): +- os.makedirs(parent_folder, mode=0o755, exist_ok=True) +- util.chownbyid(parent_folder, root_pwent.pw_uid, +- root_pwent.pw_gid) ++ mode = 0o755 ++ uid = root_pwent.pw_uid ++ gid = root_pwent.pw_gid ++ if parent_folder.startswith(user_pwent.pw_dir): ++ mode = 0o700 ++ uid = user_pwent.pw_uid ++ gid = user_pwent.pw_gid ++ os.makedirs(parent_folder, mode=mode, exist_ok=True) ++ util.chownbyid(parent_folder, uid, gid) + + permissions = check_permissions(username, parent_folder, + filename, False, strictmodes) + if not permissions: + return False + ++ if os.path.islink(filename) or os.path.isdir(filename): ++ LOG.debug("%s is not a file!", filename) ++ return False ++ + # check the file + if not os.path.exists(filename): + # if file does not exist: we need to create it, since the +diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py +index f82d7649..3159feb9 100644 +--- a/tests/integration_tests/modules/test_ssh_keysfile.py ++++ b/tests/integration_tests/modules/test_ssh_keysfile.py +@@ -10,10 +10,10 @@ TEST_USER1_KEYS = get_test_rsa_keypair('test1') + TEST_USER2_KEYS = get_test_rsa_keypair('test2') + TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') + +-USERDATA = """\ ++_USERDATA = """\ + #cloud-config + bootcmd: +- - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config ++ - {bootcmd} + ssh_authorized_keys: + - {default} + users: +@@ -24,27 +24,17 @@ users: + - name: test_user2 + ssh_authorized_keys: + - {user2} +-""".format( # noqa: E501 ++""".format( ++ bootcmd='{bootcmd}', + default=TEST_DEFAULT_KEYS.public_key, + user1=TEST_USER1_KEYS.public_key, + user2=TEST_USER2_KEYS.public_key, + ) + + +-@pytest.mark.ubuntu +-@pytest.mark.user_data(USERDATA) +-def test_authorized_keys(client: IntegrationInstance): +- expected_keys = [ +- ('test_user1', '/home/test_user1/.ssh/authorized_keys2', +- TEST_USER1_KEYS), +- ('test_user2', '/home/test_user2/.ssh/authorized_keys2', +- TEST_USER2_KEYS), +- ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', +- TEST_DEFAULT_KEYS), +- ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), +- ] +- ++def common_verify(client, expected_keys): + for user, filename, keys in expected_keys: ++ # Ensure key is in the key file + contents = client.read_from_file(filename) + if user in ['ubuntu', 'root']: + # Our personal public key gets added by pycloudlib +@@ -83,3 +73,113 @@ def test_authorized_keys(client: IntegrationInstance): + look_for_keys=False, + allow_agent=False, + ) ++ ++ # Ensure we haven't messed with any /home permissions ++ # See LP: #1940233 ++ home_dir = '/home/{}'.format(user) ++ home_perms = '755' ++ if user == 'root': ++ home_dir = '/root' ++ home_perms = '700' ++ assert '{} {}'.format(user, home_perms) == client.execute( ++ 'stat -c "%U %a" {}'.format(home_dir) ++ ) ++ if client.execute("test -d {}/.ssh".format(home_dir)).ok: ++ assert '{} 700'.format(user) == client.execute( ++ 'stat -c "%U %a" {}/.ssh'.format(home_dir) ++ ) ++ assert '{} 600'.format(user) == client.execute( ++ 'stat -c "%U %a" {}'.format(filename) ++ ) ++ ++ # Also ensure ssh-keygen works as expected ++ client.execute('mkdir {}/.ssh'.format(home_dir)) ++ assert client.execute( ++ "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format( ++ home_dir) ++ ).ok ++ assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir)) ++ assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir)) ++ ++ assert 'root 755' == client.execute('stat -c "%U %a" /home') ++ ++ ++DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') ++ ++ ++@pytest.mark.ubuntu ++@pytest.mark.user_data(DEFAULT_KEYS_USERDATA) ++def test_authorized_keys_default(client: IntegrationInstance): ++ expected_keys = [ ++ ('test_user1', '/home/test_user1/.ssh/authorized_keys', ++ TEST_USER1_KEYS), ++ ('test_user2', '/home/test_user2/.ssh/authorized_keys', ++ TEST_USER2_KEYS), ++ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys', ++ TEST_DEFAULT_KEYS), ++ ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS), ++ ] ++ common_verify(client, expected_keys) ++ ++ ++AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=( ++ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " ++ "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " ++ "/etc/ssh/sshd_config")) ++ ++ ++@pytest.mark.ubuntu ++@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA) ++def test_authorized_keys2(client: IntegrationInstance): ++ expected_keys = [ ++ ('test_user1', '/home/test_user1/.ssh/authorized_keys2', ++ TEST_USER1_KEYS), ++ ('test_user2', '/home/test_user2/.ssh/authorized_keys2', ++ TEST_USER2_KEYS), ++ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', ++ TEST_DEFAULT_KEYS), ++ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), ++ ] ++ common_verify(client, expected_keys) ++ ++ ++NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=( ++ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " ++ "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " ++ "/etc/ssh/sshd_config")) ++ ++ ++@pytest.mark.ubuntu ++@pytest.mark.user_data(NESTED_KEYS_USERDATA) ++def test_nested_keys(client: IntegrationInstance): ++ expected_keys = [ ++ ('test_user1', '/home/test_user1/foo/bar/ssh/keys', ++ TEST_USER1_KEYS), ++ ('test_user2', '/home/test_user2/foo/bar/ssh/keys', ++ TEST_USER2_KEYS), ++ ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys', ++ TEST_DEFAULT_KEYS), ++ ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS), ++ ] ++ common_verify(client, expected_keys) ++ ++ ++EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=( ++ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " ++ "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " ++ "/etc/ssh/sshd_config")) ++ ++ ++@pytest.mark.ubuntu ++@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA) ++def test_external_keys(client: IntegrationInstance): ++ expected_keys = [ ++ ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys', ++ TEST_USER1_KEYS), ++ ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys', ++ TEST_USER2_KEYS), ++ ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys', ++ TEST_DEFAULT_KEYS), ++ ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS), ++ ] ++ common_verify(client, expected_keys) +-- +2.27.0 + diff --git a/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch b/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch new file mode 100644 index 0000000..c47788f --- /dev/null +++ b/ci-Revert-unnecesary-lcase-in-ds-identify-978.patch @@ -0,0 +1,47 @@ +From 0eeec94882779de76c08b1a7faf862e22f21f242 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 14 Jan 2022 16:42:46 +0100 +Subject: [PATCH 5/6] Revert unnecesary lcase in ds-identify (#978) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 44: Datasource for VMware +RH-Commit: [5/6] f7385c15cf17a9c4a2fa15b29afd1b8a96b24d1e +RH-Bugzilla: 2026587 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +commit f516a7d37c1654addc02485e681b4358d7e7c0db +Author: Andrew Kutz <101085+akutz@users.noreply.github.com> +Date: Fri Aug 13 14:30:55 2021 -0500 + + Revert unnecesary lcase in ds-identify (#978) + + This patch reverts an unnecessary lcase optimization in the + ds-identify script. SystemD documents the values produced by + the systemd-detect-virt command are lower case, and the mapping + table used by the FreeBSD check is also lower-case. + + The optimization added two new forked processes, needlessly + causing overhead. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + tools/ds-identify | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/ds-identify b/tools/ds-identify +index 0e12298f..7b782462 100755 +--- a/tools/ds-identify ++++ b/tools/ds-identify +@@ -449,7 +449,7 @@ detect_virt() { + read_virt() { + cached "$DI_VIRT" && return 0 + detect_virt +- DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')" ++ DI_VIRT="${_RET}" + } + + is_container() { +-- +2.27.0 + diff --git a/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch b/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch new file mode 100644 index 0000000..e46b52b --- /dev/null +++ b/ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch @@ -0,0 +1,1385 @@ +From 3b68aff3b7b1dc567ef6721a269c2d4e054b729f Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 9 Aug 2021 23:41:44 +0200 +Subject: [PATCH] Stop copying ssh system keys and check folder permissions + (#956) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 28: Stop copying ssh system keys and check folder permissions (#956) +RH-Commit: [1/1] 7cada613be82f2f525ee56b86ef9f71edf40d2ef (eesposit/cloud-init) +RH-Bugzilla: 1862967 +RH-Acked-by: Miroslav Rezanina +RH-Acked-by: Eduardo Otubo + +TESTED: By me and QA +BREW: 38818284 + +This is a continuation of previous MR 25 and upstream PR #937. +There were still issues when using non-standard file paths like +/etc/ssh/userkeys/%u or /etc/ssh/authorized_keys, and the choice +of storing the keys of all authorized_keys files into a single +one was not ideal. This fix modifies cloudinit to support +all different cases of authorized_keys file locations, and +picks a user-specific file where to copy the new keys that +complies with ssh permissions. + +commit 00dbaf1e9ab0e59d81662f0f3561897bef499a3f +Author: Emanuele Giuseppe Esposito +Date: Mon Aug 9 16:49:56 2021 +0200 + + Stop copying ssh system keys and check folder permissions (#956) + + In /etc/ssh/sshd_config, it is possible to define a custom + authorized_keys file that will contain the keys allowed to access the + machine via the AuthorizedKeysFile option. Cloudinit is able to add + user-specific keys to the existing ones, but we need to be careful on + which of the authorized_keys files listed to pick. + Chosing a file that is shared by all user will cause security + issues, because the owner of that key can then access also other users. + + We therefore pick an authorized_keys file only if it satisfies the + following conditions: + 1. it is not a "global" file, ie it must be defined in + AuthorizedKeysFile with %u, %h or be in /home/. This avoids + security issues. + 2. it must comply with ssh permission requirements, otherwise the ssh + agent won't use that file. + + If it doesn't meet either of those conditions, write to + ~/.ssh/authorized_keys + + We also need to consider the case when the chosen authorized_keys file + does not exist. In this case, the existing behavior of cloud-init is + to create the new file. We therefore need to be sure that the file + complies with ssh permissions too, by setting: + - the actual file to permission 600, and owned by the user + - the directories in the path that do not exist must be root owned and + with permission 755. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/ssh_util.py | 133 ++++- + cloudinit/util.py | 51 +- + tests/unittests/test_sshutil.py | 952 +++++++++++++++++++++++++------- + 3 files changed, 920 insertions(+), 216 deletions(-) + +diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py +index 89057262..b8a3c8f7 100644 +--- a/cloudinit/ssh_util.py ++++ b/cloudinit/ssh_util.py +@@ -249,6 +249,113 @@ def render_authorizedkeysfile_paths(value, homedir, username): + return rendered + + ++# Inspired from safe_path() in openssh source code (misc.c). ++def check_permissions(username, current_path, full_path, is_file, strictmodes): ++ """Check if the file/folder in @current_path has the right permissions. ++ ++ We need to check that: ++ 1. If StrictMode is enabled, the owner is either root or the user ++ 2. the user can access the file/folder, otherwise ssh won't use it ++ 3. If StrictMode is enabled, no write permission is given to group ++ and world users (022) ++ """ ++ ++ # group/world can only execute the folder (access) ++ minimal_permissions = 0o711 ++ if is_file: ++ # group/world can only read the file ++ minimal_permissions = 0o644 ++ ++ # 1. owner must be either root or the user itself ++ owner = util.get_owner(current_path) ++ if strictmodes and owner != username and owner != "root": ++ LOG.debug("Path %s in %s must be own by user %s or" ++ " by root, but instead is own by %s. Ignoring key.", ++ current_path, full_path, username, owner) ++ return False ++ ++ parent_permission = util.get_permissions(current_path) ++ # 2. the user can access the file/folder, otherwise ssh won't use it ++ if owner == username: ++ # need only the owner permissions ++ minimal_permissions &= 0o700 ++ else: ++ group_owner = util.get_group(current_path) ++ user_groups = util.get_user_groups(username) ++ ++ if group_owner in user_groups: ++ # need only the group permissions ++ minimal_permissions &= 0o070 ++ else: ++ # need only the world permissions ++ minimal_permissions &= 0o007 ++ ++ if parent_permission & minimal_permissions == 0: ++ LOG.debug("Path %s in %s must be accessible by user %s," ++ " check its permissions", ++ current_path, full_path, username) ++ return False ++ ++ # 3. no write permission (w) is given to group and world users (022) ++ # Group and world user can still have +rx. ++ if strictmodes and parent_permission & 0o022 != 0: ++ LOG.debug("Path %s in %s must not give write" ++ "permission to group or world users. Ignoring key.", ++ current_path, full_path) ++ return False ++ ++ return True ++ ++ ++def check_create_path(username, filename, strictmodes): ++ user_pwent = users_ssh_info(username)[1] ++ root_pwent = users_ssh_info("root")[1] ++ try: ++ # check the directories first ++ directories = filename.split("/")[1:-1] ++ ++ # scan in order, from root to file name ++ parent_folder = "" ++ # this is to comply also with unit tests, and ++ # strange home directories ++ home_folder = os.path.dirname(user_pwent.pw_dir) ++ for directory in directories: ++ parent_folder += "/" + directory ++ if home_folder.startswith(parent_folder): ++ continue ++ ++ if not os.path.isdir(parent_folder): ++ # directory does not exist, and permission so far are good: ++ # create the directory, and make it accessible by everyone ++ # but owned by root, as it might be used by many users. ++ with util.SeLinuxGuard(parent_folder): ++ os.makedirs(parent_folder, mode=0o755, exist_ok=True) ++ util.chownbyid(parent_folder, root_pwent.pw_uid, ++ root_pwent.pw_gid) ++ ++ permissions = check_permissions(username, parent_folder, ++ filename, False, strictmodes) ++ if not permissions: ++ return False ++ ++ # check the file ++ if not os.path.exists(filename): ++ # if file does not exist: we need to create it, since the ++ # folders at this point exist and have right permissions ++ util.write_file(filename, '', mode=0o600, ensure_dir_exists=True) ++ util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid) ++ ++ permissions = check_permissions(username, filename, ++ filename, True, strictmodes) ++ if not permissions: ++ return False ++ except (IOError, OSError) as e: ++ util.logexc(LOG, str(e)) ++ return False ++ ++ return True ++ ++ + def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): + (ssh_dir, pw_ent) = users_ssh_info(username) + default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') +@@ -259,6 +366,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): + ssh_cfg = parse_ssh_config_map(sshd_cfg_file) + key_paths = ssh_cfg.get("authorizedkeysfile", + "%h/.ssh/authorized_keys") ++ strictmodes = ssh_cfg.get("strictmodes", "yes") + auth_key_fns = render_authorizedkeysfile_paths( + key_paths, pw_ent.pw_dir, username) + +@@ -269,31 +377,31 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): + "config from %r, using 'AuthorizedKeysFile' file " + "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) + +- # check if one of the keys is the user's one ++ # check if one of the keys is the user's one and has the right permissions + for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): + if any([ + '%u' in key_path, + '%h' in key_path, + auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) + ]): +- user_authorizedkeys_file = auth_key_fn ++ permissions_ok = check_create_path(username, auth_key_fn, ++ strictmodes == "yes") ++ if permissions_ok: ++ user_authorizedkeys_file = auth_key_fn ++ break + + if user_authorizedkeys_file != default_authorizedkeys_file: + LOG.debug( + "AuthorizedKeysFile has an user-specific authorized_keys, " + "using %s", user_authorizedkeys_file) + +- # always store all the keys in the user's private file +- return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) ++ return ( ++ user_authorizedkeys_file, ++ parse_authorized_keys([user_authorizedkeys_file]) ++ ) + + + def setup_user_keys(keys, username, options=None): +- # Make sure the users .ssh dir is setup accordingly +- (ssh_dir, pwent) = users_ssh_info(username) +- if not os.path.isdir(ssh_dir): +- util.ensure_dir(ssh_dir, mode=0o700) +- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) +- + # Turn the 'update' keys given into actual entries + parser = AuthKeyLineParser() + key_entries = [] +@@ -302,11 +410,10 @@ def setup_user_keys(keys, username, options=None): + + # Extract the old and make the new + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) ++ ssh_dir = os.path.dirname(auth_key_fn) + with util.SeLinuxGuard(ssh_dir, recursive=True): + content = update_authorized_keys(auth_key_entries, key_entries) +- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700) +- util.write_file(auth_key_fn, content, mode=0o600) +- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) ++ util.write_file(auth_key_fn, content, preserve_mode=True) + + + class SshdConfigLine(object): +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 4e0a72db..343976ad 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -35,6 +35,7 @@ from base64 import b64decode, b64encode + from errno import ENOENT + from functools import lru_cache + from urllib import parse ++from typing import List + + from cloudinit import importer + from cloudinit import log as logging +@@ -1830,6 +1831,53 @@ def chmod(path, mode): + os.chmod(path, real_mode) + + ++def get_permissions(path: str) -> int: ++ """ ++ Returns the octal permissions of the file/folder pointed by the path, ++ encoded as an int. ++ ++ @param path: The full path of the file/folder. ++ """ ++ ++ return stat.S_IMODE(os.stat(path).st_mode) ++ ++ ++def get_owner(path: str) -> str: ++ """ ++ Returns the owner of the file/folder pointed by the path. ++ ++ @param path: The full path of the file/folder. ++ """ ++ st = os.stat(path) ++ return pwd.getpwuid(st.st_uid).pw_name ++ ++ ++def get_group(path: str) -> str: ++ """ ++ Returns the group of the file/folder pointed by the path. ++ ++ @param path: The full path of the file/folder. ++ """ ++ st = os.stat(path) ++ return grp.getgrgid(st.st_gid).gr_name ++ ++ ++def get_user_groups(username: str) -> List[str]: ++ """ ++ Returns a list of all groups to which the user belongs ++ ++ @param username: the user we want to check ++ """ ++ groups = [] ++ for group in grp.getgrall(): ++ if username in group.gr_mem: ++ groups.append(group.gr_name) ++ ++ gid = pwd.getpwnam(username).pw_gid ++ groups.append(grp.getgrgid(gid).gr_name) ++ return groups ++ ++ + def write_file( + filename, + content, +@@ -1856,8 +1904,7 @@ def write_file( + + if preserve_mode: + try: +- file_stat = os.stat(filename) +- mode = stat.S_IMODE(file_stat.st_mode) ++ mode = get_permissions(filename) + except OSError: + pass + +diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py +index bcb8044f..a66788bf 100644 +--- a/tests/unittests/test_sshutil.py ++++ b/tests/unittests/test_sshutil.py +@@ -1,6 +1,9 @@ + # This file is part of cloud-init. See LICENSE file for license information. + ++import os ++ + from collections import namedtuple ++from functools import partial + from unittest.mock import patch + + from cloudinit import ssh_util +@@ -8,13 +11,48 @@ from cloudinit.tests import helpers as test_helpers + from cloudinit import util + + # https://stackoverflow.com/questions/11351032/ +-FakePwEnt = namedtuple( +- 'FakePwEnt', +- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid']) ++FakePwEnt = namedtuple('FakePwEnt', [ ++ 'pw_name', ++ 'pw_passwd', ++ 'pw_uid', ++ 'pw_gid', ++ 'pw_gecos', ++ 'pw_dir', ++ 'pw_shell', ++]) + FakePwEnt.__new__.__defaults__ = tuple( + "UNSET_%s" % n for n in FakePwEnt._fields) + + ++def mock_get_owner(updated_permissions, value): ++ try: ++ return updated_permissions[value][0] ++ except ValueError: ++ return util.get_owner(value) ++ ++ ++def mock_get_group(updated_permissions, value): ++ try: ++ return updated_permissions[value][1] ++ except ValueError: ++ return util.get_group(value) ++ ++ ++def mock_get_user_groups(username): ++ return username ++ ++ ++def mock_get_permissions(updated_permissions, value): ++ try: ++ return updated_permissions[value][2] ++ except ValueError: ++ return util.get_permissions(value) ++ ++ ++def mock_getpwnam(users, username): ++ return users[username] ++ ++ + # Do not use these public keys, most of them are fetched from + # the testdata for OpenSSH, and their private keys are available + # https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata +@@ -552,12 +590,30 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): + ssh_util.render_authorizedkeysfile_paths( + "/opt/%u/keys", "/home/bobby", "bobby")) + ++ def test_user_file(self): ++ self.assertEqual( ++ ["/opt/bobby"], ++ ssh_util.render_authorizedkeysfile_paths( ++ "/opt/%u", "/home/bobby", "bobby")) ++ ++ def test_user_file2(self): ++ self.assertEqual( ++ ["/opt/bobby/bobby"], ++ ssh_util.render_authorizedkeysfile_paths( ++ "/opt/%u/%u", "/home/bobby", "bobby")) ++ + def test_multiple(self): + self.assertEqual( + ["/keys/path1", "/keys/path2"], + ssh_util.render_authorizedkeysfile_paths( + "/keys/path1 /keys/path2", "/home/bobby", "bobby")) + ++ def test_multiple2(self): ++ self.assertEqual( ++ ["/keys/path1", "/keys/bobby"], ++ ssh_util.render_authorizedkeysfile_paths( ++ "/keys/path1 /keys/%u", "/home/bobby", "bobby")) ++ + def test_relative(self): + self.assertEqual( + ["/home/bobby/.secret/keys"], +@@ -581,269 +637,763 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): + + class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): + +- @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir +- +- # /tmp/home2/bobby/.ssh/authorized_keys = rsa +- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) +- +- # /tmp/home2/bobby/.ssh/user_keys = dsa +- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) +- +- # /tmp/sshd_config ++ def create_fake_users(self, names, mock_permissions, ++ m_get_group, m_get_owner, m_get_permissions, ++ m_getpwnam, users): ++ homes = [] ++ ++ root = '/tmp/root' ++ fpw = FakePwEnt(pw_name="root", pw_dir=root) ++ users["root"] = fpw ++ ++ for name in names: ++ home = '/tmp/home/' + name ++ fpw = FakePwEnt(pw_name=name, pw_dir=home) ++ users[name] = fpw ++ homes.append(home) ++ ++ m_get_permissions.side_effect = partial( ++ mock_get_permissions, mock_permissions) ++ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions) ++ m_get_group.side_effect = partial(mock_get_group, mock_permissions) ++ m_getpwnam.side_effect = partial(mock_getpwnam, users) ++ return homes ++ ++ def create_user_authorized_file(self, home, filename, content_key, keys): ++ user_ssh_folder = "%s/.ssh" % home ++ # /tmp/home//.ssh/authorized_keys = content_key ++ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder) ++ util.write_file(authorized_keys, VALID_CONTENT[content_key]) ++ keys[authorized_keys] = content_key ++ return authorized_keys ++ ++ def create_global_authorized_file(self, filename, content_key, keys): ++ authorized_keys = self.tmp_path(filename, dir='/tmp') ++ util.write_file(authorized_keys, VALID_CONTENT[content_key]) ++ keys[authorized_keys] = content_key ++ return authorized_keys ++ ++ def create_sshd_config(self, authorized_keys_files): + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, +- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) ++ "AuthorizedKeysFile " + authorized_keys_files + ) ++ return sshd_config + ++ def execute_and_check(self, user, sshd_config, solution, keys, ++ delete_keys=True): + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) ++ user, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + +- self.assertEqual(user_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) ++ self.assertEqual(auth_key_fn, solution) ++ for path, key in keys.items(): ++ if path == solution: ++ self.assertTrue(VALID_CONTENT[key] in content) ++ else: ++ self.assertFalse(VALID_CONTENT[key] in content) ++ ++ if delete_keys and os.path.isdir("/tmp/home/"): ++ util.delete_dir_contents("/tmp/home/") + + @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_single_user_two_local_files( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ user_bobby = 'bobby' ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ } ++ ++ homes = self.create_fake_users( ++ [user_bobby], mock_permissions, m_get_group, m_get_owner, ++ m_get_permissions, m_getpwnam, users ++ ) ++ home = homes[0] + +- # /tmp/home/suzie/.ssh/authorized_keys = rsa +- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home, 'authorized_keys', 'rsa', keys ++ ) + +- # /tmp/home/suzie/.ssh/user_keys = dsa +- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) ++ # /tmp/home/bobby/.ssh/user_keys = dsa ++ user_keys = self.create_user_authorized_file( ++ home, 'user_keys', 'dsa', keys ++ ) + + # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config', dir="/tmp") +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) ++ options = "%s %s" % (authorized_keys, user_keys) ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_single_user_two_local_files_inverted( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ user_bobby = 'bobby' ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ } ++ ++ homes = self.create_fake_users( ++ [user_bobby], mock_permissions, m_get_group, m_get_owner, ++ m_get_permissions, m_getpwnam, users + ) ++ home = homes[0] + +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home, 'authorized_keys', 'rsa', keys ++ ) + +- self.assertEqual(authorized_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) ++ # /tmp/home/bobby/.ssh/user_keys = dsa ++ user_keys = self.create_user_authorized_file( ++ home, 'user_keys', 'dsa', keys ++ ) + +- @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ # /tmp/sshd_config ++ options = "%s %s" % (user_keys, authorized_keys) ++ sshd_config = self.create_sshd_config(options) + +- # /tmp/home2/bobby/.ssh/authorized_keys = rsa +- authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ self.execute_and_check(user_bobby, sshd_config, user_keys, keys) + +- # /tmp/home2/bobby/.ssh/user_keys = dsa +- user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_single_user_local_global_files( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ user_bobby = 'bobby' ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ } ++ ++ homes = self.create_fake_users( ++ [user_bobby], mock_permissions, m_get_group, m_get_owner, ++ m_get_permissions, m_getpwnam, users ++ ) ++ home = homes[0] + +- # /tmp/etc/ssh/authorized_keys = ecdsa +- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', +- dir="/tmp") +- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home, 'authorized_keys', 'rsa', keys ++ ) + +- # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config', dir="/tmp") +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, +- user_keys, authorized_keys) ++ # /tmp/home/bobby/.ssh/user_keys = dsa ++ user_keys = self.create_user_authorized_file( ++ home, 'user_keys', 'dsa', keys + ) + +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys', 'ecdsa', keys ++ ) + +- self.assertEqual(authorized_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) ++ options = "%s %s %s" % (authorized_keys_global, user_keys, ++ authorized_keys) ++ sshd_config = self.create_sshd_config(options) + +- @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ self.execute_and_check(user_bobby, sshd_config, user_keys, keys) + +- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa +- authorized_keys = self.tmp_path('authorized_keys2', +- dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_single_user_local_global_files_inverted( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ user_bobby = 'bobby' ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), ++ } ++ ++ homes = self.create_fake_users( ++ [user_bobby], mock_permissions, m_get_group, m_get_owner, ++ m_get_permissions, m_getpwnam, users ++ ) ++ home = homes[0] + +- # /tmp/home2/bobby/.ssh/user_keys3 = dsa +- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home, 'authorized_keys2', 'rsa', keys ++ ) + +- # /tmp/etc/ssh/authorized_keys = ecdsa +- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', +- dir="/tmp") +- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ # /tmp/home/bobby/.ssh/user_keys = dsa ++ user_keys = self.create_user_authorized_file( ++ home, 'user_keys3', 'dsa', keys ++ ) + +- # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config', dir="/tmp") +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, +- authorized_keys, user_keys) ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys', 'ecdsa', keys + ) + +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ options = "%s %s %s" % (authorized_keys_global, authorized_keys, ++ user_keys) ++ sshd_config = self.create_sshd_config(options) + +- self.assertEqual(user_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) ++ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) + + @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_global(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') +- m_getpwnam.return_value = fpw ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_single_user_global_file( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ user_bobby = 'bobby' ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ } ++ ++ homes = self.create_fake_users( ++ [user_bobby], mock_permissions, m_get_group, m_get_owner, ++ m_get_permissions, m_getpwnam, users ++ ) ++ home = homes[0] + + # /tmp/etc/ssh/authorized_keys = rsa +- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', +- dir="/tmp") +- util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys', 'rsa', keys ++ ) + +- # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config') +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s" % (authorized_keys_global) ++ options = "%s" % authorized_keys_global ++ sshd_config = self.create_sshd_config(options) ++ ++ default = "%s/.ssh/authorized_keys" % home ++ self.execute_and_check(user_bobby, sshd_config, default, keys) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_local_file_standard( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users + ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] + +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) + +- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) ++ # /tmp/home/suzie/.ssh/authorized_keys = rsa ++ authorized_keys2 = self.create_user_authorized_file( ++ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ options = ".ssh/authorized_keys" ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + + @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir +- # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa +- authorized_keys = self.tmp_path('authorized_keys2', +- dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) +- # /tmp/home2/bobby/.ssh/user_keys3 = dsa +- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) +- +- fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') +- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir +- # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com +- authorized_keys2 = self.tmp_path('authorized_keys2', +- dir=user_ssh_folder) +- util.write_file(authorized_keys2, +- VALID_CONTENT['ssh-xmss@openssh.com']) ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_local_file_custom( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] + +- # /tmp/etc/ssh/authorized_keys = ecdsa +- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', +- dir="/tmp") +- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys2', 'rsa', keys ++ ) + +- # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config', dir="/tmp") +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % +- (authorized_keys_global, user_keys) ++ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa ++ authorized_keys2 = self.create_user_authorized_file( ++ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys + ) + +- # process first user +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ options = ".ssh/authorized_keys2" ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + +- self.assertEqual(user_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) +- self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_local_global_files( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), ++ '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] + +- m_getpwnam.return_value = fpw2 +- # process second user +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw2.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa ++ self.create_user_authorized_file( ++ home_bobby, 'authorized_keys2', 'rsa', keys ++ ) ++ # /tmp/home/bobby/.ssh/user_keys3 = dsa ++ user_keys = self.create_user_authorized_file( ++ home_bobby, 'user_keys3', 'dsa', keys ++ ) ++ ++ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa ++ authorized_keys2 = self.create_user_authorized_file( ++ home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys2', 'ecdsa', keys ++ ) ++ ++ options = "%s %s %%h/.ssh/authorized_keys2" % \ ++ (authorized_keys_global, user_keys) ++ sshd_config = self.create_sshd_config(options) + +- self.assertEqual(authorized_keys2, auth_key_fn) +- self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) +- self.assertFalse(VALID_CONTENT['rsa'] in content) ++ self.execute_and_check( ++ user_bobby, sshd_config, user_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + ++ @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") +- def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') +- m_getpwnam.return_value = fpw +- user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_local_global_files_badguy( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), ++ '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), ++ '/tmp/home/badguy': ('root', 'root', 0o755), ++ '/tmp/home/badguy/home': ('root', 'root', 0o755), ++ '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655), ++ } ++ ++ user_bobby = 'bobby' ++ user_badguy = 'badguy' ++ home_bobby, *_ = self.create_fake_users( ++ [user_bobby, user_badguy], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ m_get_user_groups.side_effect = mock_get_user_groups ++ + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa +- authorized_keys = self.tmp_path('authorized_keys2', +- dir=user_ssh_folder) +- util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys2', 'rsa', keys ++ ) + # /tmp/home/bobby/.ssh/user_keys3 = dsa +- user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) +- util.write_file(user_keys, VALID_CONTENT['dsa']) ++ user_keys = self.create_user_authorized_file( ++ home_bobby, 'user_keys3', 'dsa', keys ++ ) + +- fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') +- user_ssh_folder = "%s/.ssh" % fpw2.pw_dir + # /tmp/home/badguy/home/bobby = "" + authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") ++ util.write_file(authorized_keys2, '') + + # /tmp/etc/ssh/authorized_keys = ecdsa +- authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', +- dir="/tmp") +- util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys2', 'ecdsa', keys ++ ) + + # /tmp/sshd_config +- sshd_config = self.tmp_path('sshd_config', dir="/tmp") +- util.write_file( +- sshd_config, +- "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % +- (authorized_keys_global, user_keys, authorized_keys2) ++ options = "%s %%h/.ssh/authorized_keys2 %s %s" % \ ++ (authorized_keys2, authorized_keys_global, user_keys) ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check( ++ user_badguy, sshd_config, authorized_keys2, keys + ) + +- # process first user +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ @patch("cloudinit.util.get_user_groups") ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_unaccessible_file( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ ++ '/tmp/etc': ('root', 'root', 0o755), ++ '/tmp/etc/ssh': ('root', 'root', 0o755), ++ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700), ++ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), ++ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), ++ ++ '/tmp/home/badguy': ('badguy', 'badguy', 0o700), ++ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), ++ '/tmp/home/badguy/.ssh/authorized_keys': ++ ('badguy', 'badguy', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_badguy = 'badguy' ++ homes = self.create_fake_users( ++ [user_bobby, user_badguy], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ m_get_user_groups.side_effect = mock_get_user_groups ++ home_bobby = homes[0] ++ home_badguy = homes[1] + +- self.assertEqual(user_keys, auth_key_fn) +- self.assertTrue(VALID_CONTENT['rsa'] in content) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) ++ # /tmp/etc/ssh/userkeys/bobby = dsa ++ # assume here that we can bypass userkeys, despite permissions ++ self.create_global_authorized_file( ++ 'etc/ssh/userkeys/bobby', 'dsa', keys ++ ) + +- m_getpwnam.return_value = fpw2 +- # process second user +- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw2.pw_name, sshd_config) +- content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com ++ authorized_keys2 = self.create_user_authorized_file( ++ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) + +- # badguy should not take the key from the other user! +- self.assertEqual(authorized_keys2, auth_key_fn) +- self.assertTrue(VALID_CONTENT['ecdsa'] in content) +- self.assertTrue(VALID_CONTENT['dsa'] in content) +- self.assertFalse(VALID_CONTENT['rsa'] in content) ++ # /tmp/etc/ssh/userkeys/badguy = ecdsa ++ self.create_global_authorized_file( ++ 'etc/ssh/userkeys/badguy', 'ecdsa', keys ++ ) ++ ++ # /tmp/sshd_config ++ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check( ++ user_badguy, sshd_config, authorized_keys2, keys ++ ) ++ ++ @patch("cloudinit.util.get_user_groups") ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_accessible_file( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ ++ '/tmp/etc': ('root', 'root', 0o755), ++ '/tmp/etc/ssh': ('root', 'root', 0o755), ++ '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755), ++ '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), ++ '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), ++ ++ '/tmp/home/badguy': ('badguy', 'badguy', 0o700), ++ '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), ++ '/tmp/home/badguy/.ssh/authorized_keys': ++ ('badguy', 'badguy', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_badguy = 'badguy' ++ homes = self.create_fake_users( ++ [user_bobby, user_badguy], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ m_get_user_groups.side_effect = mock_get_user_groups ++ home_bobby = homes[0] ++ home_badguy = homes[1] ++ ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) ++ # /tmp/etc/ssh/userkeys/bobby = dsa ++ # assume here that we can bypass userkeys, despite permissions ++ authorized_keys = self.create_global_authorized_file( ++ 'etc/ssh/userkeys/bobby', 'dsa', keys ++ ) ++ ++ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com ++ self.create_user_authorized_file( ++ home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ # /tmp/etc/ssh/userkeys/badguy = ecdsa ++ authorized_keys2 = self.create_global_authorized_file( ++ 'etc/ssh/userkeys/badguy', 'ecdsa', keys ++ ) ++ ++ # /tmp/sshd_config ++ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check( ++ user_badguy, sshd_config, authorized_keys2, keys ++ ) ++ ++ @patch("cloudinit.util.get_user_groups") ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_hardcoded_single_user_file( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] ++ m_get_user_groups.side_effect = mock_get_user_groups ++ ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) ++ ++ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com ++ self.create_user_authorized_file( ++ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ # /tmp/sshd_config ++ options = "%s" % (authorized_keys) ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ default = "%s/.ssh/authorized_keys" % home_suzie ++ self.execute_and_check(user_suzie, sshd_config, default, keys) ++ ++ @patch("cloudinit.util.get_user_groups") ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_hardcoded_single_user_file_inverted( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] ++ m_get_user_groups.side_effect = mock_get_user_groups ++ ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) ++ ++ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com ++ authorized_keys2 = self.create_user_authorized_file( ++ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ # /tmp/sshd_config ++ options = "%s" % (authorized_keys2) ++ sshd_config = self.create_sshd_config(options) ++ ++ default = "%s/.ssh/authorized_keys" % home_bobby ++ self.execute_and_check( ++ user_bobby, sshd_config, default, keys, delete_keys=False ++ ) ++ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) ++ ++ @patch("cloudinit.util.get_user_groups") ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ @patch("cloudinit.util.get_permissions") ++ @patch("cloudinit.util.get_owner") ++ @patch("cloudinit.util.get_group") ++ def test_two_users_hardcoded_user_files( ++ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, ++ m_get_user_groups ++ ): ++ keys = {} ++ users = {} ++ mock_permissions = { ++ '/tmp/home/bobby': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), ++ '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), ++ ++ '/tmp/home/suzie': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), ++ '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), ++ } ++ ++ user_bobby = 'bobby' ++ user_suzie = 'suzie' ++ homes = self.create_fake_users( ++ [user_bobby, user_suzie], mock_permissions, m_get_group, ++ m_get_owner, m_get_permissions, m_getpwnam, users ++ ) ++ home_bobby = homes[0] ++ home_suzie = homes[1] ++ m_get_user_groups.side_effect = mock_get_user_groups ++ ++ # /tmp/home/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.create_user_authorized_file( ++ home_bobby, 'authorized_keys', 'rsa', keys ++ ) ++ ++ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com ++ authorized_keys2 = self.create_user_authorized_file( ++ home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys ++ ) ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.create_global_authorized_file( ++ 'etc/ssh/authorized_keys', 'ecdsa', keys ++ ) ++ ++ # /tmp/sshd_config ++ options = "%s %s %s" % \ ++ (authorized_keys_global, authorized_keys, authorized_keys2) ++ sshd_config = self.create_sshd_config(options) ++ ++ self.execute_and_check( ++ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False ++ ) ++ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + + # vi: ts=4 expandtab +-- +2.27.0 + diff --git a/ci-Update-dscheck_VMware-s-rpctool-check-970.patch b/ci-Update-dscheck_VMware-s-rpctool-check-970.patch new file mode 100644 index 0000000..07c44fe --- /dev/null +++ b/ci-Update-dscheck_VMware-s-rpctool-check-970.patch @@ -0,0 +1,97 @@ +From ded01bd47c65636e59dc332d06fb8acb982ec677 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Fri, 14 Jan 2022 16:41:52 +0100 +Subject: [PATCH 4/6] Update dscheck_VMware's rpctool check (#970) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 44: Datasource for VMware +RH-Commit: [4/6] 509f68596f2d8f32027677f756b9d81e6a507ff1 +RH-Bugzilla: 2026587 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +commit 7781dec3306e9467f216cfcb36b7e10a8b38547a +Author: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> +Date: Fri Aug 13 00:40:39 2021 +0530 + + Update dscheck_VMware's rpctool check (#970) + + This patch updates the dscheck_VMware function's use of "vmware-rpctool". + + When checking to see if a "guestinfo" property is set. + Because a successful exit code can occur even if there is an empty + string returned, it is possible that the VMware datasource will be + loaded as a false-positive. This patch ensures that in addition to + validating the exit code, the emitted output is also examined to ensure + a non-empty value is returned by rpctool before returning "${DS_FOUND}" + from "dscheck_VMware()". + +Signed-off-by: Emanuele Giuseppe Esposito +--- + tools/ds-identify | 15 +++++++++------ + 1 file changed, 9 insertions(+), 6 deletions(-) + +diff --git a/tools/ds-identify b/tools/ds-identify +index c01eae3d..0e12298f 100755 +--- a/tools/ds-identify ++++ b/tools/ds-identify +@@ -141,6 +141,7 @@ error() { + debug 0 "$@" + stderr "$@" + } ++ + warn() { + set -- "WARN:" "$@" + debug 0 "$@" +@@ -344,7 +345,6 @@ geom_label_status_as() { + return $ret + } + +- + read_fs_info_freebsd() { + local oifs="$IFS" line="" delim="," + local ret=0 labels="" dev="" label="" ftype="" isodevs="" +@@ -404,7 +404,6 @@ cached() { + [ -n "$1" ] && _RET="$1" && return || return 1 + } + +- + detect_virt() { + local virt="${UNAVAILABLE}" r="" out="" + if [ -d /run/systemd ]; then +@@ -450,7 +449,7 @@ detect_virt() { + read_virt() { + cached "$DI_VIRT" && return 0 + detect_virt +- DI_VIRT=${_RET} ++ DI_VIRT="$(echo "${_RET}" | tr '[:upper:]' '[:lower:]')" + } + + is_container() { +@@ -1370,16 +1369,20 @@ vmware_has_rpctool() { + command -v vmware-rpctool >/dev/null 2>&1 + } + ++vmware_rpctool_guestinfo() { ++ vmware-rpctool "info-get guestinfo.${1}" 2>/dev/null | grep "[[:alnum:]]" ++} ++ + vmware_rpctool_guestinfo_metadata() { +- vmware-rpctool "info-get guestinfo.metadata" ++ vmware_rpctool_guestinfo "metadata" + } + + vmware_rpctool_guestinfo_userdata() { +- vmware-rpctool "info-get guestinfo.userdata" ++ vmware_rpctool_guestinfo "userdata" + } + + vmware_rpctool_guestinfo_vendordata() { +- vmware-rpctool "info-get guestinfo.vendordata" ++ vmware_rpctool_guestinfo "vendordata" + } + + dscheck_VMware() { +-- +2.27.0 + diff --git a/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch b/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch new file mode 100644 index 0000000..1ccfec9 --- /dev/null +++ b/ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch @@ -0,0 +1,470 @@ +From 6e79106a09a0d142915da1fb48640575bb4bfe08 Mon Sep 17 00:00:00 2001 +From: Anh Vo +Date: Tue, 13 Apr 2021 17:39:39 -0400 +Subject: [PATCH 3/7] azure: Removing ability to invoke walinuxagent (#799) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 45: Add support for userdata on Azure from IMDS +RH-Commit: [3/7] f5e98665bf2093edeeccfcd95b47df2e44a40536 +RH-Bugzilla: 2023940 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +Invoking walinuxagent from within cloud-init is no longer +supported/necessary +--- + cloudinit/sources/DataSourceAzure.py | 137 ++++-------------- + doc/rtd/topics/datasources/azure.rst | 62 ++------ + tests/unittests/test_datasource/test_azure.py | 97 ------------- + 3 files changed, 35 insertions(+), 261 deletions(-) + +diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py +index de1452ce..020b7006 100755 +--- a/cloudinit/sources/DataSourceAzure.py ++++ b/cloudinit/sources/DataSourceAzure.py +@@ -381,53 +381,6 @@ class DataSourceAzure(sources.DataSource): + util.logexc(LOG, "handling set_hostname failed") + return False + +- @azure_ds_telemetry_reporter +- def get_metadata_from_agent(self): +- temp_hostname = self.metadata.get('local-hostname') +- agent_cmd = self.ds_cfg['agent_command'] +- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", +- temp_hostname, agent_cmd) +- +- self.bounce_network_with_azure_hostname() +- +- try: +- invoke_agent(agent_cmd) +- except subp.ProcessExecutionError: +- # claim the datasource even if the command failed +- util.logexc(LOG, "agent command '%s' failed.", +- self.ds_cfg['agent_command']) +- +- ddir = self.ds_cfg['data_dir'] +- +- fp_files = [] +- key_value = None +- for pk in self.cfg.get('_pubkeys', []): +- if pk.get('value', None): +- key_value = pk['value'] +- LOG.debug("SSH authentication: using value from fabric") +- else: +- bname = str(pk['fingerprint'] + ".crt") +- fp_files += [os.path.join(ddir, bname)] +- LOG.debug("SSH authentication: " +- "using fingerprint from fabric") +- +- with events.ReportEventStack( +- name="waiting-for-ssh-public-key", +- description="wait for agents to retrieve SSH keys", +- parent=azure_ds_reporter): +- # wait very long for public SSH keys to arrive +- # https://bugs.launchpad.net/cloud-init/+bug/1717611 +- missing = util.log_time(logfunc=LOG.debug, +- msg="waiting for SSH public key files", +- func=util.wait_for_files, +- args=(fp_files, 900)) +- if len(missing): +- LOG.warning("Did not find files, but going on: %s", missing) +- +- metadata = {} +- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) +- return metadata +- + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): +@@ -1354,35 +1307,32 @@ class DataSourceAzure(sources.DataSource): + On failure, returns False. + """ + +- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: +- self.bounce_network_with_azure_hostname() ++ self.bounce_network_with_azure_hostname() + +- pubkey_info = None +- try: +- raise KeyError( +- "Not using public SSH keys from IMDS" +- ) +- # pylint:disable=unreachable +- public_keys = self.metadata['imds']['compute']['publicKeys'] +- LOG.debug( +- 'Successfully retrieved %s key(s) from IMDS', +- len(public_keys) +- if public_keys is not None +- else 0 +- ) +- except KeyError: +- LOG.debug( +- 'Unable to retrieve SSH keys from IMDS during ' +- 'negotiation, falling back to OVF' +- ) +- pubkey_info = self.cfg.get('_pubkeys', None) +- +- metadata_func = partial(get_metadata_from_fabric, +- fallback_lease_file=self. +- dhclient_lease_file, +- pubkey_info=pubkey_info) +- else: +- metadata_func = self.get_metadata_from_agent ++ pubkey_info = None ++ try: ++ raise KeyError( ++ "Not using public SSH keys from IMDS" ++ ) ++ # pylint:disable=unreachable ++ public_keys = self.metadata['imds']['compute']['publicKeys'] ++ LOG.debug( ++ 'Successfully retrieved %s key(s) from IMDS', ++ len(public_keys) ++ if public_keys is not None ++ else 0 ++ ) ++ except KeyError: ++ LOG.debug( ++ 'Unable to retrieve SSH keys from IMDS during ' ++ 'negotiation, falling back to OVF' ++ ) ++ pubkey_info = self.cfg.get('_pubkeys', None) ++ ++ metadata_func = partial(get_metadata_from_fabric, ++ fallback_lease_file=self. ++ dhclient_lease_file, ++ pubkey_info=pubkey_info) + + LOG.debug("negotiating with fabric via agent command %s", + self.ds_cfg['agent_command']) +@@ -1617,33 +1567,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): + return True + + +-@azure_ds_telemetry_reporter +-def crtfile_to_pubkey(fname, data=None): +- pipeline = ('openssl x509 -noout -pubkey < "$0" |' +- 'ssh-keygen -i -m PKCS8 -f /dev/stdin') +- (out, _err) = subp.subp(['sh', '-c', pipeline, fname], +- capture=True, data=data) +- return out.rstrip() +- +- +-@azure_ds_telemetry_reporter +-def pubkeys_from_crt_files(flist): +- pubkeys = [] +- errors = [] +- for fname in flist: +- try: +- pubkeys.append(crtfile_to_pubkey(fname)) +- except subp.ProcessExecutionError: +- errors.append(fname) +- +- if errors: +- report_diagnostic_event( +- "failed to convert the crt files to pubkey: %s" % errors, +- logger_func=LOG.warning) +- +- return pubkeys +- +- + @azure_ds_telemetry_reporter + def write_files(datadir, files, dirmode=None): + +@@ -1672,16 +1595,6 @@ def write_files(datadir, files, dirmode=None): + util.write_file(filename=fname, content=content, mode=0o600) + + +-@azure_ds_telemetry_reporter +-def invoke_agent(cmd): +- # this is a function itself to simplify patching it for test +- if cmd: +- LOG.debug("invoking agent: %s", cmd) +- subp.subp(cmd, shell=(not isinstance(cmd, list))) +- else: +- LOG.debug("not invoking agent") +- +- + def find_child(node, filter_func): + ret = [] + if not node.hasChildNodes(): +diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst +index e04c3a33..ad9f2236 100644 +--- a/doc/rtd/topics/datasources/azure.rst ++++ b/doc/rtd/topics/datasources/azure.rst +@@ -5,28 +5,6 @@ Azure + + This datasource finds metadata and user-data from the Azure cloud platform. + +-walinuxagent +------------- +-walinuxagent has several functions within images. For cloud-init +-specifically, the relevant functionality it performs is to register the +-instance with the Azure cloud platform at boot so networking will be +-permitted. For more information about the other functionality of +-walinuxagent, see `Azure's documentation +-`_ for more details. +-(Note, however, that only one of walinuxagent's provisioning and cloud-init +-should be used to perform instance customisation.) +- +-If you are configuring walinuxagent yourself, you will want to ensure that you +-have `Provisioning.UseCloudInit +-`_ set to +-``y``. +- +- +-Builtin Agent +-------------- +-An alternative to using walinuxagent to register to the Azure cloud platform +-is to use the ``__builtin__`` agent command. This section contains more +-background on what that code path does, and how to enable it. + + The Azure cloud platform provides initial data to an instance via an attached + CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some +@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in + 'dhclient_hook' of cloud-init itself. This sub-command will write the client + information in json format to /run/cloud-init/dhclient.hook/.json. + +-In order for cloud-init to leverage this method to find the endpoint, the +-cloud.cfg file must contain: +- +-.. sourcecode:: yaml +- +- datasource: +- Azure: +- set_hostname: False +- agent_command: __builtin__ +- + If those files are not available, the fallback is to check the leases file + for the endpoint server (again option 245). + +@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + + The settings that may be configured are: + +- * **agent_command**: Either __builtin__ (default) or a command to run to getcw +- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the +- provided command to obtain metadata. + * **apply_network_config**: Boolean set to True to use network configuration + described by Azure's IMDS endpoint instead of fallback network config of + dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is +@@ -121,7 +86,6 @@ An example configuration with the default values is provided below: + + datasource: + Azure: +- agent_command: __builtin__ + apply_network_config: true + data_dir: /var/lib/waagent + dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases +@@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) + If both ``UserData`` and ``CustomData`` are provided behavior is undefined on + which will be selected. + +-In the example below, user-data provided is 'this is my userdata', and the +-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. +-That agent command will take affect as if it were specified in system config. ++In the example below, user-data provided is 'this is my userdata' + + Example: + +@@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as + Whatever value the instance provides in its dhcp request will resolve in the + domain returned in the 'search' request. + +-The interesting issue is that a generic image will already have a hostname +-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the +-system, and the initial dhcp request on eth0 is not guaranteed to occur after +-the datasource code has been run. So, on first boot, that initial value will +-be sent in the dhcp request and *that* value will resolve. +- +-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a +-dhcp request must be made with the new value. Walinuxagent (in its current +-version) handles this by polling the state of hostname and bouncing ('``ifdown +-eth0; ifup eth0``' the network interface if it sees that a change has been +-made. ++A generic image will already have a hostname configured. The ubuntu ++cloud images have 'ubuntu' as the hostname of the system, and the ++initial dhcp request on eth0 is not guaranteed to occur after the ++datasource code has been run. So, on first boot, that initial value ++will be sent in the dhcp request and *that* value will resolve. + +-cloud-init handles this by setting the hostname in the DataSource's 'get_data' +-method via '``hostname $HostName``', and then bouncing the interface. This ++In order to make the ``HostName`` provided in the ovf-env.xml resolve, ++a dhcp request must be made with the new value. cloud-init handles ++this by setting the hostname in the DataSource's 'get_data' method via ++'``hostname $HostName``', and then bouncing the interface. This + behavior can be configured or disabled in the datasource config. See + 'Configuration' above. + +diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py +index dedebeb1..320fa857 100644 +--- a/tests/unittests/test_datasource/test_azure.py ++++ b/tests/unittests/test_datasource/test_azure.py +@@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0 + def dsdevs(): + return data.get('dsdevs', []) + +- def _invoke_agent(cmd): +- data['agent_invoked'] = cmd +- + def _wait_for_files(flist, _maxwait=None, _naplen=None): + data['waited'] = flist + return [] + +- def _pubkeys_from_crt_files(flist): +- data['pubkey_files'] = flist +- return ["pubkey_from: %s" % f for f in flist] +- + if data.get('ovfcontent') is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': data['ovfcontent']}) +@@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0 + + self.apply_patches([ + (dsaz, 'list_possible_azure_ds_devs', dsdevs), +- (dsaz, 'invoke_agent', _invoke_agent), +- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), +@@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0 + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) +- self.assertNotIn('agent_invoked', data) + # Assert that for non viable platforms, + # there is no communication with the Azure datasource. + self.assertEqual( +@@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0 + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) +- self.assertNotIn('agent_invoked', data) + self.assertEqual( + 1, + m_report_failure.call_count) +@@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0 + 1, + m_crawl_metadata.call_count) + self.assertFalse(ret) +- self.assertNotIn('agent_invoked', data) + + def test_crawl_metadata_exception_should_report_failure_with_msg(self): + data = {} +@@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0 + self.assertTrue(os.path.isdir(self.waagent_d)) + self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) + +- def test_user_cfg_set_agent_command_plain(self): +- # set dscfg in via plaintext +- # we must have friendly-to-xml formatted plaintext in yaml_cfg +- # not all plaintext is expected to work. +- yaml_cfg = "{agent_command: my_command}\n" +- cfg = yaml.safe_load(yaml_cfg) +- odata = {'HostName': "myhost", 'UserName': "myuser", +- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} +- data = {'ovfcontent': construct_valid_ovf_env(data=odata)} +- +- dsrc = self._get_ds(data) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- self.assertEqual(data['agent_invoked'], cfg['agent_command']) +- + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds(self, m_driver): +@@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0 + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + +- def test_user_cfg_set_agent_command(self): +- # set dscfg in via base64 encoded yaml +- cfg = {'agent_command': "my_command"} +- odata = {'HostName': "myhost", 'UserName': "myuser", +- 'dscfg': {'text': b64e(yaml.dump(cfg)), +- 'encoding': 'base64'}} +- data = {'ovfcontent': construct_valid_ovf_env(data=odata)} +- +- dsrc = self._get_ds(data) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- self.assertEqual(data['agent_invoked'], cfg['agent_command']) +- +- def test_sys_cfg_set_agent_command(self): +- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}} +- data = {'ovfcontent': construct_valid_ovf_env(data={}), +- 'sys_cfg': sys_cfg} +- +- dsrc = self._get_ds(data) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- self.assertEqual(data['agent_invoked'], '_COMMAND') +- + def test_sys_cfg_set_never_destroy_ntfs(self): + sys_cfg = {'datasource': {'Azure': { + 'never_destroy_ntfs': 'user-supplied-value'}}} +@@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0 + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) + +- def test_cfg_has_pubkeys_fingerprint(self): +- odata = {'HostName': "myhost", 'UserName': "myuser"} +- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] +- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] +- data = {'ovfcontent': construct_valid_ovf_env(data=odata, +- pubkeys=pubkeys)} +- +- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- for mypk in mypklist: +- self.assertIn(mypk, dsrc.cfg['_pubkeys']) +- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1]) +- +- def test_cfg_has_pubkeys_value(self): +- # make sure that provided key is used over fingerprint +- odata = {'HostName': "myhost", 'UserName': "myuser"} +- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}] +- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] +- data = {'ovfcontent': construct_valid_ovf_env(data=odata, +- pubkeys=pubkeys)} +- +- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- +- for mypk in mypklist: +- self.assertIn(mypk, dsrc.cfg['_pubkeys']) +- self.assertIn(mypk['value'], dsrc.metadata['public-keys']) +- +- def test_cfg_has_no_fingerprint_has_value(self): +- # test value is used when fingerprint not provided +- odata = {'HostName': "myhost", 'UserName': "myuser"} +- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}] +- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] +- data = {'ovfcontent': construct_valid_ovf_env(data=odata, +- pubkeys=pubkeys)} +- +- dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) +- ret = self._get_and_setup(dsrc) +- self.assertTrue(ret) +- +- for mypk in mypklist: +- self.assertIn(mypk['value'], dsrc.metadata['public-keys']) +- + def test_default_ephemeral_configs_ephemeral_exists(self): + # make sure the ephemeral configs are correct if disk present + odata = {} +@@ -1919,8 +1824,6 @@ class TestAzureBounce(CiTestCase): + with_logs = True + + def mock_out_azure_moving_parts(self): +- self.patches.enter_context( +- mock.patch.object(dsaz, 'invoke_agent')) + self.patches.enter_context( + mock.patch.object(dsaz.util, 'wait_for_files')) + self.patches.enter_context( +-- +2.27.0 + diff --git a/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch b/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch new file mode 100644 index 0000000..44ad400 --- /dev/null +++ b/ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch @@ -0,0 +1,97 @@ +From 478709d7c157a085e3b2fee432e24978a3485234 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Wed, 20 Oct 2021 16:28:42 +0200 +Subject: [PATCH] cc_ssh.py: fix private key group owner and permissions + (#1070) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 32: cc_ssh.py: fix private key group owner and permissions (#1070) +RH-Commit: [1/1] 0382c3f671ae0fa9cab23dfad1f636967b012148 +RH-Bugzilla: 2013644 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +commit ee296ced9c0a61b1484d850b807c601bcd670ec1 +Author: Emanuele Giuseppe Esposito +Date: Tue Oct 19 21:32:10 2021 +0200 + + cc_ssh.py: fix private key group owner and permissions (#1070) + + When default host keys are created by sshd-keygen (/etc/ssh/ssh_host_*_key) + in RHEL/CentOS/Fedora, openssh it performs the following: + + # create new keys + if ! $KEYGEN -q -t $KEYTYPE -f $KEY -C '' -N '' >&/dev/null; then + exit 1 + fi + + # sanitize permissions + /usr/bin/chgrp ssh_keys $KEY + /usr/bin/chmod 640 $KEY + /usr/bin/chmod 644 $KEY.pub + Note that the group ssh_keys exists only in RHEL/CentOS/Fedora. + + Now that we disable sshd-keygen to allow only cloud-init to create + them, we miss the "sanitize permissions" part, where we set the group + owner as ssh_keys and the private key mode to 640. + + According to https://bugzilla.redhat.com/show_bug.cgi?id=2013644#c8, failing + to set group ownership and permissions like openssh does makes the RHEL openscap + tool generate an error. + + Signed-off-by: Emanuele Giuseppe Esposito eesposit@redhat.com + + RHBZ: 2013644 + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/config/cc_ssh.py | 7 +++++++ + cloudinit/util.py | 14 ++++++++++++++ + 2 files changed, 21 insertions(+) + +diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py +index 05a16dbc..4e986c55 100755 +--- a/cloudinit/config/cc_ssh.py ++++ b/cloudinit/config/cc_ssh.py +@@ -240,6 +240,13 @@ def handle(_name, cfg, cloud, log, _args): + try: + out, err = subp.subp(cmd, capture=True, env=lang_c) + sys.stdout.write(util.decode_binary(out)) ++ ++ gid = util.get_group_id("ssh_keys") ++ if gid != -1: ++ # perform same "sanitize permissions" as sshd-keygen ++ os.chown(keyfile, -1, gid) ++ os.chmod(keyfile, 0o640) ++ os.chmod(keyfile + ".pub", 0o644) + except subp.ProcessExecutionError as e: + err = util.decode_binary(e.stderr).lower() + if (e.exit_code == 1 and +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 343976ad..fe37ae89 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -1831,6 +1831,20 @@ def chmod(path, mode): + os.chmod(path, real_mode) + + ++def get_group_id(grp_name: str) -> int: ++ """ ++ Returns the group id of a group name, or -1 if no group exists ++ ++ @param grp_name: the name of the group ++ """ ++ gid = -1 ++ try: ++ gid = grp.getgrnam(grp_name).gr_gid ++ except KeyError: ++ LOG.debug("Group %s is not a valid group name", grp_name) ++ return gid ++ ++ + def get_permissions(path: str) -> int: + """ + Returns the octal permissions of the file/folder pointed by the path, +-- +2.27.0 + diff --git a/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch b/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch new file mode 100644 index 0000000..9ea95c1 --- /dev/null +++ b/ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch @@ -0,0 +1,87 @@ +From ea83e72b335e652b080fda66a075c0d1322ed6dc Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Tue, 7 Dec 2021 10:00:41 +0100 +Subject: [PATCH] cloudinit/net: handle two different routes for the same ip + (#1124) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 39: cloudinit/net: handle two different routes for the same ip (#1124) +RH-Commit: [1/1] 6810dc29ce786fbca96d2033386aa69c6ab65997 +RH-Bugzilla: 2028028 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo + +commit 0e25076b34fa995161b83996e866c0974cee431f +Author: Emanuele Giuseppe Esposito +Date: Mon Dec 6 18:34:26 2021 +0100 + + cloudinit/net: handle two different routes for the same ip (#1124) + + If we set a dhcp server side like this: + $ cat /var/tmp/cloud-init/cloud-init-dhcp-f0rie5tm/dhcp.leases + lease { + ... + option classless-static-routes 31.169.254.169.254 0.0.0.0,31.169.254.169.254 + 10.112.143.127,22.10.112.140 0.0.0.0,0 10.112.140.1; + ... + } + cloud-init fails to configure the routes via 'ip route add' because to there are + two different routes for 169.254.169.254: + + $ ip -4 route add 192.168.1.1/32 via 0.0.0.0 dev eth0 + $ ip -4 route add 192.168.1.1/32 via 10.112.140.248 dev eth0 + + But NetworkManager can handle such scenario successfully as it uses "ip route append". + So change cloud-init to also use "ip route append" to fix the issue: + + $ ip -4 route append 192.168.1.1/32 via 0.0.0.0 dev eth0 + $ ip -4 route append 192.168.1.1/32 via 10.112.140.248 dev eth0 + + Signed-off-by: Emanuele Giuseppe Esposito + + RHBZ: #2003231 + +Conflicts: + cloudinit/net/tests/test_init.py: a mock call in + test_ephemeral_ipv4_network_with_rfc3442_static_routes is not + present downstream. + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/net/__init__.py | 2 +- + cloudinit/net/tests/test_init.py | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py +index 385b7bcc..003efa2a 100644 +--- a/cloudinit/net/__init__.py ++++ b/cloudinit/net/__init__.py +@@ -1138,7 +1138,7 @@ class EphemeralIPv4Network(object): + if gateway != "0.0.0.0/0": + via_arg = ['via', gateway] + subp.subp( +- ['ip', '-4', 'route', 'add', net_address] + via_arg + ++ ['ip', '-4', 'route', 'append', net_address] + via_arg + + ['dev', self.interface], capture=True) + self.cleanup_cmds.insert( + 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + +diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py +index 946f8ee2..2350837b 100644 +--- a/cloudinit/net/tests/test_init.py ++++ b/cloudinit/net/tests/test_init.py +@@ -719,10 +719,10 @@ class TestEphemeralIPV4Network(CiTestCase): + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( +- ['ip', '-4', 'route', 'add', '169.254.169.254/32', ++ ['ip', '-4', 'route', 'append', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( +- ['ip', '-4', 'route', 'add', '0.0.0.0/0', ++ ['ip', '-4', 'route', 'append', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call( +-- +2.27.0 + diff --git a/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch b/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch new file mode 100644 index 0000000..f257a67 --- /dev/null +++ b/ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch @@ -0,0 +1,173 @@ +From 005d0a98c69d154a00e9fd599c7fbe5aef73c933 Mon Sep 17 00:00:00 2001 +From: Amy Chen +Date: Thu, 25 Nov 2021 18:30:48 +0800 +Subject: [PATCH] fix error on upgrade caused by new vendordata2 attributes + +RH-Author: xiachen +RH-MergeRequest: 35: fix error on upgrade caused by new vendordata2 attributes +RH-Commit: [1/1] 9e00a7744838afbbdc5eb14628b7f572beba9f19 +RH-Bugzilla: 2021538 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Emanuele Giuseppe Esposito + +commit d132356cc361abef2d90d4073438f3ab759d5964 +Author: James Falcon +Date: Mon Apr 19 11:31:28 2021 -0500 + + fix error on upgrade caused by new vendordata2 attributes (#869) + + In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to + the DataSource class, but didn't use the upgrade framework to deal + with an unpickle after upgrade. This commit adds the necessary + upgrade code. + + Additionally, added a smaller-scope upgrade test to our integration + tests that will be run on every CI run so we catch these issues + immediately in the future. + + LP: #1922739 + +Signed-off-by: Amy Chen +--- + cloudinit/sources/__init__.py | 12 +++++++++++- + cloudinit/tests/test_upgrade.py | 4 ++++ + tests/integration_tests/clouds.py | 4 ++-- + tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++- + 4 files changed, 41 insertions(+), 4 deletions(-) + +diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py +index 1ad1880d..7d74f8d9 100644 +--- a/cloudinit/sources/__init__.py ++++ b/cloudinit/sources/__init__.py +@@ -24,6 +24,7 @@ from cloudinit import util + from cloudinit.atomic_helper import write_json + from cloudinit.event import EventType + from cloudinit.filters import launch_index ++from cloudinit.persistence import CloudInitPickleMixin + from cloudinit.reporting import events + + DSMODE_DISABLED = "disabled" +@@ -134,7 +135,7 @@ URLParams = namedtuple( + 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) + + +-class DataSource(metaclass=abc.ABCMeta): ++class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): + + dsmode = DSMODE_NETWORK + default_locale = 'en_US.UTF-8' +@@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta): + # non-root users + sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) + ++ _ci_pkl_version = 1 ++ + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + self.sys_cfg = sys_cfg + self.distro = distro +@@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta): + else: + self.ud_proc = ud_proc + ++ def _unpickle(self, ci_pkl_version: int) -> None: ++ """Perform deserialization fixes for Paths.""" ++ if not hasattr(self, 'vendordata2'): ++ self.vendordata2 = None ++ if not hasattr(self, 'vendordata2_raw'): ++ self.vendordata2_raw = None ++ + def __str__(self): + return type_utils.obj_name(self) + +diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py +index f79a2536..fd3c5812 100644 +--- a/cloudinit/tests/test_upgrade.py ++++ b/cloudinit/tests/test_upgrade.py +@@ -43,3 +43,7 @@ class TestUpgrade: + def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): + """We always expect Networking.blacklist_drivers to be initialised.""" + assert previous_obj_pkl.distro.networking.blacklist_drivers is None ++ ++ def test_vendordata_exists(self, previous_obj_pkl): ++ assert previous_obj_pkl.vendordata2 is None ++ assert previous_obj_pkl.vendordata2_raw is None +diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py +index 9527a413..1d0b9d83 100644 +--- a/tests/integration_tests/clouds.py ++++ b/tests/integration_tests/clouds.py +@@ -100,14 +100,14 @@ class IntegrationCloud(ABC): + # Even if we're using the default key, it may still have a + # different name in the clouds, so we need to set it separately. + self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME +- self._released_image_id = self._get_initial_image() ++ self.released_image_id = self._get_initial_image() + self.snapshot_id = None + + @property + def image_id(self): + if self.snapshot_id: + return self.snapshot_id +- return self._released_image_id ++ return self.released_image_id + + def emit_settings_to_log(self) -> None: + log.info( +diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py +index c20cb3c1..48e0691b 100644 +--- a/tests/integration_tests/test_upgrade.py ++++ b/tests/integration_tests/test_upgrade.py +@@ -1,4 +1,5 @@ + import logging ++import os + import pytest + import time + from pathlib import Path +@@ -8,6 +9,8 @@ from tests.integration_tests.conftest import ( + get_validated_source, + session_start_time, + ) ++from tests.integration_tests.instances import CloudInitSource ++ + + log = logging.getLogger('integration_testing') + +@@ -63,7 +66,7 @@ def test_upgrade(session_cloud: IntegrationCloud): + return # type checking doesn't understand that skip raises + + launch_kwargs = { +- 'image_id': session_cloud._get_initial_image(), ++ 'image_id': session_cloud.released_image_id, + } + + image = ImageSpecification.from_os_image() +@@ -93,6 +96,26 @@ def test_upgrade(session_cloud: IntegrationCloud): + instance.install_new_cloud_init(source, take_snapshot=False) + instance.execute('hostname something-else') + _restart(instance) ++ assert instance.execute('cloud-init status --wait --long').ok + _output_to_compare(instance, after_path, netcfg_path) + + log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) ++ ++ ++@pytest.mark.ci ++@pytest.mark.ubuntu ++def test_upgrade_package(session_cloud: IntegrationCloud): ++ if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE: ++ not_run_message = 'Test only supports upgrading to build deb' ++ if os.environ.get('TRAVIS'): ++ # If this isn't running on CI, we should know ++ pytest.fail(not_run_message) ++ else: ++ pytest.skip(not_run_message) ++ ++ launch_kwargs = {'image_id': session_cloud.released_image_id} ++ ++ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: ++ instance.install_deb() ++ instance.restart() ++ assert instance.execute('cloud-init status --wait --long').ok +-- +2.27.0 + diff --git a/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch b/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch new file mode 100644 index 0000000..be1e283 --- /dev/null +++ b/ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch @@ -0,0 +1,65 @@ +From abf1adeae8211f5acd87dc63b03b2ed995047efd Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Thu, 20 May 2021 08:53:55 +0200 +Subject: [PATCH 1/2] rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and + set in cloud.cfg + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 10: rhel/cloud.cfg: remove ssh_genkeytypes in settings.py and set in cloud.cfg +RH-Commit: [1/1] 6da989423b9b6e017afbac2f1af3649b0487310f +RH-Bugzilla: 1957532 +RH-Acked-by: Eduardo Otubo +RH-Acked-by: Cathy Avery +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +Currently genkeytypes in cloud.cfg is set to None, so together with +ssh_deletekeys=1 cloudinit on first boot it will just delete the existing +keys and not generate new ones. + +Just removing that property in cloud.cfg is not enough, because +settings.py provides another empty default value that will be used +instead, resulting to no key generated even when the property is not defined. + +Removing genkeytypes also in settings.py will default to GENERATE_KEY_NAMES, +but since we want only 'rsa', 'ecdsa' and 'ed25519', add back genkeytypes in +cloud.cfg with the above defaults. + +Also remove ssh_deletekeys in settings.py as we always need +to 1 (and it also defaults to 1). + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/settings.py | 2 -- + rhel/cloud.cfg | 2 +- + 2 files changed, 1 insertion(+), 3 deletions(-) + +diff --git a/cloudinit/settings.py b/cloudinit/settings.py +index 43a1490c..2acf2615 100644 +--- a/cloudinit/settings.py ++++ b/cloudinit/settings.py +@@ -49,8 +49,6 @@ CFG_BUILTIN = { + 'def_log_file_mode': 0o600, + 'log_cfgs': [], + 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], +- 'ssh_deletekeys': False, +- 'ssh_genkeytypes': [], + 'syslog_fix_perms': [], + 'system_info': { + 'paths': { +diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg +index 9ecba215..cbee197a 100644 +--- a/rhel/cloud.cfg ++++ b/rhel/cloud.cfg +@@ -7,7 +7,7 @@ ssh_pwauth: 0 + mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] + resize_rootfs_tmp: /dev + ssh_deletekeys: 1 +-ssh_genkeytypes: ~ ++ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519'] + syslog_fix_perms: ~ + disable_vmware_customization: false + +-- +2.27.0 + diff --git a/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch b/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch new file mode 100644 index 0000000..bdec823 --- /dev/null +++ b/ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch @@ -0,0 +1,653 @@ +From aeab67600eb2d5e483812620b56ce5fb031a57d6 Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 12 Jul 2021 21:47:37 +0200 +Subject: [PATCH] ssh-util: allow cloudinit to merge all ssh keys into a custom + user file, defined in AuthorizedKeysFile (#937) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 25: ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) +RH-Commit: [1/1] 27bbe94f3b9dd8734865766bd30b06cff83383ab (eesposit/cloud-init) +RH-Bugzilla: 1862967 +RH-Acked-by: Vitaly Kuznetsov +RH-Acked-by: Mohamed Gamal Morsy + +TESTED: By me and QA +BREW: 38030830 + +Conflicts: upstream patch modifies tests/integration_tests/util.py, that is +not present in RHEL. + +commit 9b52405c6f0de5e00d5ee9c1d13540425d8f6bf5 +Author: Emanuele Giuseppe Esposito +Date: Mon Jul 12 20:21:02 2021 +0200 + + ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) + + This patch aims to fix LP1911680, by analyzing the files provided + in sshd_config and merge all keys into an user-specific file. Also + introduces additional tests to cover this specific case. + + The file is picked by analyzing the path given in AuthorizedKeysFile. + + If it points inside the current user folder (path is /home/user/*), it + means it is an user-specific file, so we can copy all user-keys there. + If it contains a %u or %h, it means that there will be a specific + authorized_keys file for each user, so we can copy all user-keys there. + If no path points to an user-specific file, for example when only + /etc/ssh/authorized_keys is given, default to ~/.ssh/authorized_keys. + Note that if there are more than a single user-specific file, the last + one will be picked. + + Signed-off-by: Emanuele Giuseppe Esposito + Co-authored-by: James Falcon + + LP: #1911680 + RHBZ:1862967 + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/ssh_util.py | 22 +- + .../assets/keys/id_rsa.test1 | 38 +++ + .../assets/keys/id_rsa.test1.pub | 1 + + .../assets/keys/id_rsa.test2 | 38 +++ + .../assets/keys/id_rsa.test2.pub | 1 + + .../assets/keys/id_rsa.test3 | 38 +++ + .../assets/keys/id_rsa.test3.pub | 1 + + .../modules/test_ssh_keysfile.py | 85 ++++++ + tests/unittests/test_sshutil.py | 246 +++++++++++++++++- + 9 files changed, 456 insertions(+), 14 deletions(-) + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1 + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1.pub + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2 + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2.pub + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3 + create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3.pub + create mode 100644 tests/integration_tests/modules/test_ssh_keysfile.py + +diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py +index c08042d6..89057262 100644 +--- a/cloudinit/ssh_util.py ++++ b/cloudinit/ssh_util.py +@@ -252,13 +252,15 @@ def render_authorizedkeysfile_paths(value, homedir, username): + def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): + (ssh_dir, pw_ent) = users_ssh_info(username) + default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') ++ user_authorizedkeys_file = default_authorizedkeys_file + auth_key_fns = [] + with util.SeLinuxGuard(ssh_dir, recursive=True): + try: + ssh_cfg = parse_ssh_config_map(sshd_cfg_file) ++ key_paths = ssh_cfg.get("authorizedkeysfile", ++ "%h/.ssh/authorized_keys") + auth_key_fns = render_authorizedkeysfile_paths( +- ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"), +- pw_ent.pw_dir, username) ++ key_paths, pw_ent.pw_dir, username) + + except (IOError, OSError): + # Give up and use a default key filename +@@ -267,8 +269,22 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): + "config from %r, using 'AuthorizedKeysFile' file " + "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) + ++ # check if one of the keys is the user's one ++ for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): ++ if any([ ++ '%u' in key_path, ++ '%h' in key_path, ++ auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) ++ ]): ++ user_authorizedkeys_file = auth_key_fn ++ ++ if user_authorizedkeys_file != default_authorizedkeys_file: ++ LOG.debug( ++ "AuthorizedKeysFile has an user-specific authorized_keys, " ++ "using %s", user_authorizedkeys_file) ++ + # always store all the keys in the user's private file +- return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) ++ return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) + + + def setup_user_keys(keys, username, options=None): +diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1 +new file mode 100644 +index 00000000..bd4c822e +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test1 +@@ -0,0 +1,38 @@ ++-----BEGIN OPENSSH PRIVATE KEY----- ++b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn ++NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye ++t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV ++3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+ ++yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY ++lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN ++HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw ++Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO ++geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2 ++EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL ++NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/ ++rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n ++vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2 ++euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS ++khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg ++RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN ++oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT ++Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT ++tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi ++vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq ++KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA ++w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+ ++qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6 +++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA ++AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp ++60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E ++uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC ++77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ ++aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk ++cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb ++Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb ++GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB ++/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g ++0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I ++bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4 ++CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q= ++-----END OPENSSH PRIVATE KEY----- +diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub +new file mode 100644 +index 00000000..3d2e26e1 +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub +@@ -0,0 +1 @@ ++ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host +diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2 +new file mode 100644 +index 00000000..5854d901 +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test2 +@@ -0,0 +1,38 @@ ++-----BEGIN OPENSSH PRIVATE KEY----- ++b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn ++NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO ++LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+ ++3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO ++i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH ++m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2 ++17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5 ++qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS ++yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2 ++EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ ++A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq ++0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4 ++qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP ++CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T ++wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH ++BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5 ++ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+ ++aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV ++RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd ++eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34 ++qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql ++rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB ++w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy ++dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA ++wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj ++c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr ++IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy ++Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv ++vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u ++U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv ++/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9 ++mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV ++zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd ++E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS ++0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD ++tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q= ++-----END OPENSSH PRIVATE KEY----- +diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub +new file mode 100644 +index 00000000..f3831a57 +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub +@@ -0,0 +1 @@ ++ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host +diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3 +new file mode 100644 +index 00000000..2596c762 +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test3 +@@ -0,0 +1,38 @@ ++-----BEGIN OPENSSH PRIVATE KEY----- ++b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn ++NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV ++yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG ++bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH ++9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ ++ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV ++O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr ++jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm ++Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2 ++EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL ++6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy ++fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e ++fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ ++iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f ++2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA ++QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ ++HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n ++Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK ++WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV ++JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ ++vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR ++2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8 ++1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d ++DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA ++wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P ++yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy ++QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2 ++0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k ++mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi ++uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9 ++3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr ++VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM ++2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM ++GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa ++e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x ++eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE= ++-----END OPENSSH PRIVATE KEY----- +diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub +new file mode 100644 +index 00000000..057db632 +--- /dev/null ++++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub +@@ -0,0 +1 @@ ++ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host +diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py +new file mode 100644 +index 00000000..f82d7649 +--- /dev/null ++++ b/tests/integration_tests/modules/test_ssh_keysfile.py +@@ -0,0 +1,85 @@ ++import paramiko ++import pytest ++from io import StringIO ++from paramiko.ssh_exception import SSHException ++ ++from tests.integration_tests.instances import IntegrationInstance ++from tests.integration_tests.util import get_test_rsa_keypair ++ ++TEST_USER1_KEYS = get_test_rsa_keypair('test1') ++TEST_USER2_KEYS = get_test_rsa_keypair('test2') ++TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') ++ ++USERDATA = """\ ++#cloud-config ++bootcmd: ++ - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config ++ssh_authorized_keys: ++ - {default} ++users: ++- default ++- name: test_user1 ++ ssh_authorized_keys: ++ - {user1} ++- name: test_user2 ++ ssh_authorized_keys: ++ - {user2} ++""".format( # noqa: E501 ++ default=TEST_DEFAULT_KEYS.public_key, ++ user1=TEST_USER1_KEYS.public_key, ++ user2=TEST_USER2_KEYS.public_key, ++) ++ ++ ++@pytest.mark.ubuntu ++@pytest.mark.user_data(USERDATA) ++def test_authorized_keys(client: IntegrationInstance): ++ expected_keys = [ ++ ('test_user1', '/home/test_user1/.ssh/authorized_keys2', ++ TEST_USER1_KEYS), ++ ('test_user2', '/home/test_user2/.ssh/authorized_keys2', ++ TEST_USER2_KEYS), ++ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', ++ TEST_DEFAULT_KEYS), ++ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), ++ ] ++ ++ for user, filename, keys in expected_keys: ++ contents = client.read_from_file(filename) ++ if user in ['ubuntu', 'root']: ++ # Our personal public key gets added by pycloudlib ++ lines = contents.split('\n') ++ assert len(lines) == 2 ++ assert keys.public_key.strip() in contents ++ else: ++ assert contents.strip() == keys.public_key.strip() ++ ++ # Ensure we can actually connect ++ ssh = paramiko.SSHClient() ++ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ++ paramiko_key = paramiko.RSAKey.from_private_key(StringIO( ++ keys.private_key)) ++ ++ # Will fail with AuthenticationException if ++ # we cannot connect ++ ssh.connect( ++ client.instance.ip, ++ username=user, ++ pkey=paramiko_key, ++ look_for_keys=False, ++ allow_agent=False, ++ ) ++ ++ # Ensure other uses can't connect using our key ++ other_users = [u[0] for u in expected_keys if u[2] != keys] ++ for other_user in other_users: ++ with pytest.raises(SSHException): ++ print('trying to connect as {} with key from {}'.format( ++ other_user, user)) ++ ssh.connect( ++ client.instance.ip, ++ username=other_user, ++ pkey=paramiko_key, ++ look_for_keys=False, ++ allow_agent=False, ++ ) +diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py +index fd1d1bac..bcb8044f 100644 +--- a/tests/unittests/test_sshutil.py ++++ b/tests/unittests/test_sshutil.py +@@ -570,20 +570,33 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): + ssh_util.render_authorizedkeysfile_paths( + "%h/.keys", "/homedirs/bobby", "bobby")) + ++ def test_all(self): ++ self.assertEqual( ++ ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys", ++ "/keys/path1", "/opt/bobby/keys"], ++ ssh_util.render_authorizedkeysfile_paths( ++ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys", ++ "/homedirs/bobby", "bobby")) ++ + + class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby') ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw +- authorized_keys = self.tmp_path('authorized_keys') ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ ++ # /tmp/home2/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + +- user_keys = self.tmp_path('user_keys') ++ # /tmp/home2/bobby/.ssh/user_keys = dsa ++ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + +- sshd_config = self.tmp_path('sshd_config') ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) +@@ -593,33 +606,244 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + +- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) ++ self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): +- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie') ++ fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') + m_getpwnam.return_value = fpw +- authorized_keys = self.tmp_path('authorized_keys') ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ ++ # /tmp/home/suzie/.ssh/authorized_keys = rsa ++ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + +- user_keys = self.tmp_path('user_keys') ++ # /tmp/home/suzie/.ssh/user_keys = dsa ++ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + +- sshd_config = self.tmp_path('sshd_config') ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, +- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) ++ "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) + ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( +- fpw.pw_name, sshd_config ++ fpw.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(authorized_keys, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['rsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') ++ m_getpwnam.return_value = fpw ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ ++ # /tmp/home2/bobby/.ssh/authorized_keys = rsa ++ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) ++ util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ ++ # /tmp/home2/bobby/.ssh/user_keys = dsa ++ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) ++ util.write_file(user_keys, VALID_CONTENT['dsa']) ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', ++ dir="/tmp") ++ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") ++ util.write_file( ++ sshd_config, ++ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, ++ user_keys, authorized_keys) ++ ) ++ ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(authorized_keys, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['rsa'] in content) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') ++ m_getpwnam.return_value = fpw ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ ++ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa ++ authorized_keys = self.tmp_path('authorized_keys2', ++ dir=user_ssh_folder) ++ util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ ++ # /tmp/home2/bobby/.ssh/user_keys3 = dsa ++ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) ++ util.write_file(user_keys, VALID_CONTENT['dsa']) ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', ++ dir="/tmp") ++ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") ++ util.write_file( ++ sshd_config, ++ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, ++ authorized_keys, user_keys) ++ ) ++ ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(user_keys, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['rsa'] in content) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ def test_multiple_authorizedkeys_file_global(self, m_getpwnam): ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') ++ m_getpwnam.return_value = fpw ++ ++ # /tmp/etc/ssh/authorized_keys = rsa ++ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', ++ dir="/tmp") ++ util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) ++ ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config') ++ util.write_file( ++ sshd_config, ++ "AuthorizedKeysFile %s" % (authorized_keys_global) + ) ++ ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') ++ m_getpwnam.return_value = fpw ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa ++ authorized_keys = self.tmp_path('authorized_keys2', ++ dir=user_ssh_folder) ++ util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ # /tmp/home2/bobby/.ssh/user_keys3 = dsa ++ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) ++ util.write_file(user_keys, VALID_CONTENT['dsa']) ++ ++ fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') ++ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir ++ # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com ++ authorized_keys2 = self.tmp_path('authorized_keys2', ++ dir=user_ssh_folder) ++ util.write_file(authorized_keys2, ++ VALID_CONTENT['ssh-xmss@openssh.com']) ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', ++ dir="/tmp") ++ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") ++ util.write_file( ++ sshd_config, ++ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % ++ (authorized_keys_global, user_keys) ++ ) ++ ++ # process first user ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(user_keys, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['rsa'] in content) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) ++ ++ m_getpwnam.return_value = fpw2 ++ # process second user ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw2.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(authorized_keys2, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ self.assertFalse(VALID_CONTENT['rsa'] in content) ++ ++ @patch("cloudinit.ssh_util.pwd.getpwnam") ++ def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): ++ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') ++ m_getpwnam.return_value = fpw ++ user_ssh_folder = "%s/.ssh" % fpw.pw_dir ++ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa ++ authorized_keys = self.tmp_path('authorized_keys2', ++ dir=user_ssh_folder) ++ util.write_file(authorized_keys, VALID_CONTENT['rsa']) ++ # /tmp/home/bobby/.ssh/user_keys3 = dsa ++ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) ++ util.write_file(user_keys, VALID_CONTENT['dsa']) ++ ++ fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') ++ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir ++ # /tmp/home/badguy/home/bobby = "" ++ authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") ++ ++ # /tmp/etc/ssh/authorized_keys = ecdsa ++ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', ++ dir="/tmp") ++ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) ++ ++ # /tmp/sshd_config ++ sshd_config = self.tmp_path('sshd_config', dir="/tmp") ++ util.write_file( ++ sshd_config, ++ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % ++ (authorized_keys_global, user_keys, authorized_keys2) ++ ) ++ ++ # process first user ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ self.assertEqual(user_keys, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['rsa'] in content) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) ++ self.assertTrue(VALID_CONTENT['dsa'] in content) ++ ++ m_getpwnam.return_value = fpw2 ++ # process second user ++ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( ++ fpw2.pw_name, sshd_config) ++ content = ssh_util.update_authorized_keys(auth_key_entries, []) ++ ++ # badguy should not take the key from the other user! ++ self.assertEqual(authorized_keys2, auth_key_fn) ++ self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) ++ self.assertFalse(VALID_CONTENT['rsa'] in content) + + # vi: ts=4 expandtab +-- +2.27.0 + diff --git a/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch b/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch new file mode 100644 index 0000000..13484d3 --- /dev/null +++ b/ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch @@ -0,0 +1,85 @@ +From 7d4e16bfc1cefbdd4d1477480b02b1d6c1399e4d Mon Sep 17 00:00:00 2001 +From: Emanuele Giuseppe Esposito +Date: Mon, 20 Sep 2021 12:16:36 +0200 +Subject: [PATCH] ssh_utils.py: ignore when sshd_config options are not + key/value pairs (#1007) + +RH-Author: Emanuele Giuseppe Esposito +RH-MergeRequest: 31: ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007) +RH-Commit: [1/1] 9007fb8a116e98036ff17df0168a76e9a5843671 (eesposit/cloud-init) +RH-Bugzilla: 1862933 +RH-Acked-by: Mohamed Gamal Morsy +RH-Acked-by: Vitaly Kuznetsov + +TESTED: by me +BREW: 39832462 + +commit 2ce857248162957a785af61c135ca8433fdbbcde +Author: Emanuele Giuseppe Esposito +Date: Wed Sep 8 02:08:36 2021 +0200 + + ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007) + + As specified in #LP 1845552, + In cloudinit/ssh_util.py, in parse_ssh_config_lines(), we attempt to + parse each line of sshd_config. This function expects each line to + be one of the following forms: + + \# comment + key value + key=value + + However, options like DenyGroups and DenyUsers are specified to + *optionally* accepts values in sshd_config. + Cloud-init should comply to this and skip the option if a value + is not provided. + + Signed-off-by: Emanuele Giuseppe Esposito + +Signed-off-by: Emanuele Giuseppe Esposito +--- + cloudinit/ssh_util.py | 8 +++++++- + tests/unittests/test_sshutil.py | 8 ++++++++ + 2 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py +index 9ccadf09..33679dcc 100644 +--- a/cloudinit/ssh_util.py ++++ b/cloudinit/ssh_util.py +@@ -484,7 +484,13 @@ def parse_ssh_config_lines(lines): + try: + key, val = line.split(None, 1) + except ValueError: +- key, val = line.split('=', 1) ++ try: ++ key, val = line.split('=', 1) ++ except ValueError: ++ LOG.debug( ++ "sshd_config: option \"%s\" has no key/value pair," ++ " skipping it", line) ++ continue + ret.append(SshdConfigLine(line, key, val)) + return ret + +diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py +index a66788bf..08e20050 100644 +--- a/tests/unittests/test_sshutil.py ++++ b/tests/unittests/test_sshutil.py +@@ -525,6 +525,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): + self.assertEqual([self.pwauth], result) + self.check_line(lines[-1], self.pwauth, "no") + ++ def test_option_without_value(self): ++ """Implementation only accepts key-value pairs.""" ++ extended_exlines = self.exlines.copy() ++ denyusers_opt = "DenyUsers" ++ extended_exlines.append(denyusers_opt) ++ lines = ssh_util.parse_ssh_config_lines(list(extended_exlines)) ++ self.assertNotIn(denyusers_opt, str(lines)) ++ + def test_single_option_updated(self): + """A single update should have change made and line updated.""" + opt, val = ("UsePAM", "no") +-- +2.27.0 + diff --git a/ci-write-passwords-only-to-serial-console-lock-down-clo.patch b/ci-write-passwords-only-to-serial-console-lock-down-clo.patch new file mode 100644 index 0000000..5cf4671 --- /dev/null +++ b/ci-write-passwords-only-to-serial-console-lock-down-clo.patch @@ -0,0 +1,369 @@ +From 769b9f8c9b1ecc294a197575108ae7cb54ad7f4b Mon Sep 17 00:00:00 2001 +From: Eduardo Otubo +Date: Mon, 5 Jul 2021 14:13:45 +0200 +Subject: [PATCH] write passwords only to serial console, lock down + cloud-init-output.log (#847) + +RH-Author: Eduardo Otubo +RH-MergeRequest: 21: write passwords only to serial console, lock down cloud-init-output.log (#847) +RH-Commit: [1/1] 8f30f2b7d0d6f9dca19994dbd0827b44e998f238 (otubo/cloud-init) +RH-Bugzilla: 1945891 +RH-Acked-by: Emanuele Giuseppe Esposito +RH-Acked-by: Mohamed Gamal Morsy + +commit b794d426b9ab43ea9d6371477466070d86e10668 +Author: Daniel Watkins +Date: Fri Mar 19 10:06:42 2021 -0400 + + write passwords only to serial console, lock down cloud-init-output.log (#847) + + Prior to this commit, when a user specified configuration which would + generate random passwords for users, cloud-init would cause those + passwords to be written to the serial console by emitting them on + stderr. In the default configuration, any stdout or stderr emitted by + cloud-init is also written to `/var/log/cloud-init-output.log`. This + file is world-readable, meaning that those randomly-generated passwords + were available to be read by any user with access to the system. This + presents an obvious security issue. + + This commit responds to this issue in two ways: + + * We address the direct issue by moving from writing the passwords to + sys.stderr to writing them directly to /dev/console (via + util.multi_log); this means that the passwords will never end up in + cloud-init-output.log + * To avoid future issues like this, we also modify the logging code so + that any files created in a log sink subprocess will only be + owner/group readable and, if it exists, will be owned by the adm + group. This results in `/var/log/cloud-init-output.log` no longer + being world-readable, meaning that if there are other parts of the + codebase that are emitting sensitive data intended for the serial + console, that data is no longer available to all users of the system. + + LP: #1918303 + +Signed-off-by: Eduardo Otubo +--- + cloudinit/config/cc_set_passwords.py | 5 +- + cloudinit/config/tests/test_set_passwords.py | 40 +++++++++---- + cloudinit/tests/test_util.py | 56 +++++++++++++++++++ + cloudinit/util.py | 38 +++++++++++-- + .../modules/test_set_password.py | 24 ++++++++ + tests/integration_tests/test_logging.py | 22 ++++++++ + tests/unittests/test_util.py | 4 ++ + 7 files changed, 173 insertions(+), 16 deletions(-) + create mode 100644 tests/integration_tests/test_logging.py + +diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py +index d6b5682d..433de751 100755 +--- a/cloudinit/config/cc_set_passwords.py ++++ b/cloudinit/config/cc_set_passwords.py +@@ -78,7 +78,6 @@ password. + """ + + import re +-import sys + + from cloudinit.distros import ug_util + from cloudinit import log as logging +@@ -214,7 +213,9 @@ def handle(_name, cfg, cloud, log, args): + if len(randlist): + blurb = ("Set the following 'random' passwords\n", + '\n'.join(randlist)) +- sys.stderr.write("%s\n%s\n" % blurb) ++ util.multi_log( ++ "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False ++ ) + + if expire: + expired_users = [] +diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py +index daa1ef51..bbe2ee8f 100644 +--- a/cloudinit/config/tests/test_set_passwords.py ++++ b/cloudinit/config/tests/test_set_passwords.py +@@ -74,10 +74,6 @@ class TestSetPasswordsHandle(CiTestCase): + + with_logs = True + +- def setUp(self): +- super(TestSetPasswordsHandle, self).setUp() +- self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') +- + def test_handle_on_empty_config(self, *args): + """handle logs that no password has changed when config is empty.""" + cloud = self.tmp_cloud(distro='ubuntu') +@@ -129,10 +125,12 @@ class TestSetPasswordsHandle(CiTestCase): + mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], + m_subp.call_args_list) + ++ @mock.patch(MODPATH + "util.multi_log") + @mock.patch(MODPATH + "util.is_BSD") + @mock.patch(MODPATH + "subp.subp") +- def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, +- m_is_bsd): ++ def test_handle_on_chpasswd_list_creates_random_passwords( ++ self, m_subp, m_is_bsd, m_multi_log ++ ): + """handle parses command set random passwords.""" + m_is_bsd.return_value = False + cloud = self.tmp_cloud(distro='ubuntu') +@@ -146,10 +144,32 @@ class TestSetPasswordsHandle(CiTestCase): + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) +- self.assertNotEqual( +- [mock.call(['chpasswd'], +- '\n'.join(valid_random_pwds) + '\n')], +- m_subp.call_args_list) ++ ++ self.assertEqual(1, m_subp.call_count) ++ args, _kwargs = m_subp.call_args ++ self.assertEqual(["chpasswd"], args[0]) ++ ++ stdin = args[1] ++ user_pass = { ++ user: password ++ for user, password ++ in (line.split(":") for line in stdin.splitlines()) ++ } ++ ++ self.assertEqual(1, m_multi_log.call_count) ++ self.assertEqual( ++ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), ++ m_multi_log.call_args ++ ) ++ ++ self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) ++ written_lines = m_multi_log.call_args[0][0].splitlines() ++ for password in user_pass.values(): ++ for line in written_lines: ++ if password in line: ++ break ++ else: ++ self.fail("Password not emitted to console") + + + # vi: ts=4 expandtab +diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py +index b7a302f1..e811917e 100644 +--- a/cloudinit/tests/test_util.py ++++ b/cloudinit/tests/test_util.py +@@ -851,4 +851,60 @@ class TestEnsureFile: + assert "ab" == kwargs["omode"] + + ++@mock.patch("cloudinit.util.grp.getgrnam") ++@mock.patch("cloudinit.util.os.setgid") ++@mock.patch("cloudinit.util.os.umask") ++class TestRedirectOutputPreexecFn: ++ """This tests specifically the preexec_fn used in redirect_output.""" ++ ++ @pytest.fixture(params=["outfmt", "errfmt"]) ++ def preexec_fn(self, request): ++ """A fixture to gather the preexec_fn used by redirect_output. ++ ++ This enables simpler direct testing of it, and parameterises any tests ++ using it to cover both the stdout and stderr code paths. ++ """ ++ test_string = "| piped output to invoke subprocess" ++ if request.param == "outfmt": ++ args = (test_string, None) ++ elif request.param == "errfmt": ++ args = (None, test_string) ++ with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: ++ util.redirect_output(*args) ++ ++ assert 1 == m_popen.call_count ++ _args, kwargs = m_popen.call_args ++ assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" ++ return kwargs["preexec_fn"] ++ ++ def test_preexec_fn_sets_umask( ++ self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn ++ ): ++ """preexec_fn should set a mask that avoids world-readable files.""" ++ preexec_fn() ++ ++ assert [mock.call(0o037)] == m_os_umask.call_args_list ++ ++ def test_preexec_fn_sets_group_id_if_adm_group_present( ++ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn ++ ): ++ """We should setgrp to adm if present, so files are owned by them.""" ++ fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) ++ m_getgrnam.return_value = fake_group ++ ++ preexec_fn() ++ ++ assert [mock.call("adm")] == m_getgrnam.call_args_list ++ assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list ++ ++ def test_preexec_fn_handles_absent_adm_group_gracefully( ++ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn ++ ): ++ """We should handle an absent adm group gracefully.""" ++ m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") ++ ++ preexec_fn() ++ ++ assert 0 == m_setgid.call_count ++ + # vi: ts=4 expandtab +diff --git a/cloudinit/util.py b/cloudinit/util.py +index 769f3425..4e0a72db 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -359,7 +359,7 @@ def find_modules(root_dir): + + + def multi_log(text, console=True, stderr=True, +- log=None, log_level=logging.DEBUG): ++ log=None, log_level=logging.DEBUG, fallback_to_stdout=True): + if stderr: + sys.stderr.write(text) + if console: +@@ -368,7 +368,7 @@ def multi_log(text, console=True, stderr=True, + with open(conpath, 'w') as wfh: + wfh.write(text) + wfh.flush() +- else: ++ elif fallback_to_stdout: + # A container may lack /dev/console (arguably a container bug). If + # it does not exist, then write output to stdout. this will result + # in duplicate stderr and stdout messages if stderr was True. +@@ -623,6 +623,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): + if not o_err: + o_err = sys.stderr + ++ # pylint: disable=subprocess-popen-preexec-fn ++ def set_subprocess_umask_and_gid(): ++ """Reconfigure umask and group ID to create output files securely. ++ ++ This is passed to subprocess.Popen as preexec_fn, so it is executed in ++ the context of the newly-created process. It: ++ ++ * sets the umask of the process so created files aren't world-readable ++ * if an adm group exists in the system, sets that as the process' GID ++ (so that the created file(s) are owned by root:adm) ++ """ ++ os.umask(0o037) ++ try: ++ group_id = grp.getgrnam("adm").gr_gid ++ except KeyError: ++ # No adm group, don't set a group ++ pass ++ else: ++ os.setgid(group_id) ++ + if outfmt: + LOG.debug("Redirecting %s to %s", o_out, outfmt) + (mode, arg) = outfmt.split(" ", 1) +@@ -632,7 +652,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): + owith = "wb" + new_fp = open(arg, owith) + elif mode == "|": +- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) ++ proc = subprocess.Popen( ++ arg, ++ shell=True, ++ stdin=subprocess.PIPE, ++ preexec_fn=set_subprocess_umask_and_gid, ++ ) + new_fp = proc.stdin + else: + raise TypeError("Invalid type for output format: %s" % outfmt) +@@ -654,7 +679,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): + owith = "wb" + new_fp = open(arg, owith) + elif mode == "|": +- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) ++ proc = subprocess.Popen( ++ arg, ++ shell=True, ++ stdin=subprocess.PIPE, ++ preexec_fn=set_subprocess_umask_and_gid, ++ ) + new_fp = proc.stdin + else: + raise TypeError("Invalid type for error format: %s" % errfmt) +diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py +index b13f76fb..d7cf91a5 100644 +--- a/tests/integration_tests/modules/test_set_password.py ++++ b/tests/integration_tests/modules/test_set_password.py +@@ -116,6 +116,30 @@ class Mixin: + # Which are not the same + assert shadow_users["harry"] != shadow_users["dick"] + ++ def test_random_passwords_not_stored_in_cloud_init_output_log( ++ self, class_client ++ ): ++ """We should not emit passwords to the in-instance log file. ++ ++ LP: #1918303 ++ """ ++ cloud_init_output = class_client.read_from_file( ++ "/var/log/cloud-init-output.log" ++ ) ++ assert "dick:" not in cloud_init_output ++ assert "harry:" not in cloud_init_output ++ ++ def test_random_passwords_emitted_to_serial_console(self, class_client): ++ """We should emit passwords to the serial console. (LP: #1918303)""" ++ try: ++ console_log = class_client.instance.console_log() ++ except NotImplementedError: ++ # Assume that an exception here means that we can't use the console ++ # log ++ pytest.skip("NotImplementedError when requesting console log") ++ assert "dick:" in console_log ++ assert "harry:" in console_log ++ + def test_explicit_password_set_correctly(self, class_client): + """Test that an explicitly-specified password is set correctly.""" + shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client) +diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py +new file mode 100644 +index 00000000..b31a0434 +--- /dev/null ++++ b/tests/integration_tests/test_logging.py +@@ -0,0 +1,22 @@ ++"""Integration tests relating to cloud-init's logging.""" ++ ++ ++class TestVarLogCloudInitOutput: ++ """Integration tests relating to /var/log/cloud-init-output.log.""" ++ ++ def test_var_log_cloud_init_output_not_world_readable(self, client): ++ """ ++ The log can contain sensitive data, it shouldn't be world-readable. ++ ++ LP: #1918303 ++ """ ++ # Check the file exists ++ assert client.execute("test -f /var/log/cloud-init-output.log").ok ++ ++ # Check its permissions are as we expect ++ perms, user, group = client.execute( ++ "stat -c %a:%U:%G /var/log/cloud-init-output.log" ++ ).split(":") ++ assert "640" == perms ++ assert "root" == user ++ assert "adm" == group +diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py +index 857629f1..e5292001 100644 +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -572,6 +572,10 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): + util.multi_log(logged_string) + self.assertEqual(logged_string, self.stdout.getvalue()) + ++ def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self): ++ util.multi_log('something', fallback_to_stdout=False) ++ self.assertEqual('', self.stdout.getvalue()) ++ + def test_logs_go_to_log_if_given(self): + log = mock.MagicMock() + logged_string = 'something very important' +-- +2.27.0 + diff --git a/cloud-init-tmpfiles.conf b/cloud-init-tmpfiles.conf new file mode 100644 index 0000000..0c6d2a3 --- /dev/null +++ b/cloud-init-tmpfiles.conf @@ -0,0 +1 @@ +d /run/cloud-init 0700 root root - - diff --git a/cloud-init.spec b/cloud-init.spec new file mode 100644 index 0000000..04b8907 --- /dev/null +++ b/cloud-init.spec @@ -0,0 +1,744 @@ +%{!?license: %global license %%doc} + +# The only reason we are archful is because dmidecode is ExclusiveArch +# https://bugzilla.redhat.com/show_bug.cgi?id=1067089 +%global debug_package %{nil} + +Name: cloud-init +Version: 21.1 +Release: 15%{?dist} +Summary: Cloud instance init scripts + +Group: System Environment/Base +License: GPLv3 +URL: http://launchpad.net/cloud-init +Source0: https://launchpad.net/cloud-init/trunk/%{version}/+download/%{name}-%{version}.tar.gz +Source1: cloud-init-tmpfiles.conf +Source2: test_version_change.pkl + +Patch0001: 0001-Add-initial-redhat-setup.patch +Patch0002: 0002-Do-not-write-NM_CONTROLLED-no-in-generated-interface.patch +Patch0003: 0003-limit-permissions-on-def_log_file.patch +Patch0004: 0004-sysconfig-Don-t-write-BOOTPROTO-dhcp-for-ipv6-dhcp.patch +Patch0005: 0005-DataSourceAzure.py-use-hostnamectl-to-set-hostname.patch +Patch0006: 0006-include-NOZEROCONF-yes-in-etc-sysconfig-network.patch +Patch0007: 0007-Remove-race-condition-between-cloud-init-and-Network.patch +Patch0008: 0008-net-exclude-OVS-internal-interfaces-in-get_interface.patch +Patch0009: 0009-Fix-requiring-device-number-on-EC2-derivatives-836.patch +# For bz#1957532 - [cloud-init] From RHEL 82+ cloud-init no longer displays sshd keys fingerprints from instance launched from a backup image +Patch10: ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch +# For bz#1945891 - CVE-2021-3429 cloud-init: randomly generated passwords logged in clear-text to world-readable file [rhel-8] +Patch11: ci-write-passwords-only-to-serial-console-lock-down-clo.patch +# For bz#1862967 - [cloud-init]Customize ssh AuthorizedKeysFile causes login failure +Patch12: ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch +# For bz#1862967 - [cloud-init]Customize ssh AuthorizedKeysFile causes login failure +Patch13: ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch +# For bz#1995840 - [cloudinit] Fix home permissions modified by ssh module +Patch14: ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch +# For bz#1862933 - cloud-init fails with ValueError: need more than 1 value to unpack[rhel-8] +Patch15: ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch +# For bz#2013644 - cloud-init fails to set host key permissions correctly +Patch16: ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch +# For bz#2021538 - cloud-init.service fails to start after package update +Patch17: ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch +# For bz#2028028 - [RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP +Patch18: ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch +# For bz#2039697 - [RHEL8] [Azure] cloud-init fails to configure the system +# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' +Patch20: ci-Datasource-for-VMware-953.patch +# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' +Patch21: ci-Change-netifaces-dependency-to-0.10.4-965.patch +# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' +Patch22: ci-Update-dscheck_VMware-s-rpctool-check-970.patch +# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' +Patch23: ci-Revert-unnecesary-lcase-in-ds-identify-978.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch24: ci-Add-flexibility-to-IMDS-api-version-793.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch25: ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch26: ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch27: ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch28: ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch29: ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch +# For bz#2023940 - [RHEL-8] Support for provisioning Azure VM with userdata +Patch30: ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch +# For bz#2046540 - cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::". +Patch31: ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch +# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8. +Patch32: ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch +# For bz#1935826 - [rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8. +Patch33: ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch +# For bz#2026587 - [cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo' + + +BuildArch: noarch + +BuildRequires: pkgconfig(systemd) +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: systemd + +# For tests +BuildRequires: iproute +BuildRequires: python3-configobj +# # https://bugzilla.redhat.com/show_bug.cgi?id=1417029 +BuildRequires: python3-httpretty >= 0.8.14-2 +BuildRequires: python3-jinja2 +BuildRequires: python3-jsonpatch +BuildRequires: python3-jsonschema +BuildRequires: python3-mock +BuildRequires: python3-nose +BuildRequires: python3-oauthlib +BuildRequires: python3-prettytable +BuildRequires: python3-pyserial +BuildRequires: python3-PyYAML +BuildRequires: python3-requests +BuildRequires: python3-six +BuildRequires: python3-unittest2 +# dnf is needed to make cc_ntp unit tests work +# https://bugs.launchpad.net/cloud-init/+bug/1721573 +BuildRequires: /usr/bin/dnf + +Requires: e2fsprogs +Requires: iproute +Requires: libselinux-python3 +Requires: policycoreutils-python3 +Requires: procps +Requires: python3-configobj +Requires: python3-jinja2 +Requires: python3-jsonpatch +Requires: python3-jsonschema +Requires: python3-oauthlib +Requires: python3-prettytable +Requires: python3-pyserial +Requires: python3-PyYAML +Requires: python3-requests +Requires: python3-six +Requires: shadow-utils +Requires: util-linux +Requires: xfsprogs +Requires: dhcp-client +# https://bugzilla.redhat.com/show_bug.cgi?id=2039697 +Requires: gdisk +Requires: openssl +Requires: python3-netifaces + +%{?systemd_requires} + +%description +Cloud-init is a set of init scripts for cloud instances. Cloud instances +need special scripts to run during initialization to retrieve and install +ssh keys and to let the user run various scripts. + + +%prep +%autosetup -p1 + +# Change shebangs +sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/env python3|' \ + -e 's|#!/usr/bin/python|#!/usr/bin/python3|' tools/* cloudinit/ssh_util.py + +cp -f %{SOURCE2} tests/integration_tests/assets/test_version_change.pkl + +%build +%py3_build + + +%install +%py3_install -- + +python3 tools/render-cloudcfg --variant fedora > $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg + +sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $RPM_BUILD_ROOT/%{python3_sitelib}/cloudinit/version.py + +mkdir -p $RPM_BUILD_ROOT/var/lib/cloud + +# /run/cloud-init needs a tmpfiles.d entry +mkdir -p $RPM_BUILD_ROOT/run/cloud-init +mkdir -p $RPM_BUILD_ROOT/%{_tmpfilesdir} +cp -p %{SOURCE1} $RPM_BUILD_ROOT/%{_tmpfilesdir}/%{name}.conf + +# We supply our own config file since our software differs from Ubuntu's. +cp -p rhel/cloud.cfg $RPM_BUILD_ROOT/%{_sysconfdir}/cloud/cloud.cfg + +mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d +cp -p tools/21-cloudinit.conf $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +# Make installed NetworkManager hook name less generic +mv $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/hook-network-manager \ + $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook + +# Install our own systemd units (rhbz#1440831) +mkdir -p $RPM_BUILD_ROOT%{_unitdir} +cp rhel/systemd/* $RPM_BUILD_ROOT%{_unitdir}/ + +[ ! -d $RPM_BUILD_ROOT/usr/lib/systemd/system-generators ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/systemd/system-generators +python3 tools/render-cloudcfg --variant rhel systemd/cloud-init-generator.tmpl > $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator +chmod 755 $RPM_BUILD_ROOT/usr/lib/systemd/system-generators/cloud-init-generator + +[ ! -d $RPM_BUILD_ROOT/usr/lib/%{name} ] && mkdir -p $RPM_BUILD_ROOT/usr/lib/%{name} +cp -p tools/ds-identify $RPM_BUILD_ROOT%{_libexecdir}/%{name}/ds-identify + +# installing man pages +mkdir -p ${RPM_BUILD_ROOT}%{_mandir}/man1/ +for man in cloud-id.1 cloud-init.1 cloud-init-per.1; do + install -c -m 0644 doc/man/${man} ${RPM_BUILD_ROOT}%{_mandir}/man1/${man} + chmod -x ${RPM_BUILD_ROOT}%{_mandir}/man1/* +done + +%clean +rm -rf $RPM_BUILD_ROOT + + +%post +if [ $1 -eq 1 ] ; then + # Initial installation + # Enabled by default per "runs once then goes away" exception + /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init.target >/dev/null 2>&1 || : +elif [ $1 -eq 2 ]; then + # Upgrade. If the upgrade is from a version older than 0.7.9-8, + # there will be stale systemd config + /bin/systemctl is-enabled cloud-config.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-config.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-final.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-final.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-init.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-init-local.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init-local.service >/dev/null 2>&1 || : + + /bin/systemctl is-enabled cloud-init.target >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init.target >/dev/null 2>&1 || : +fi + +%preun +if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init.target >/dev/null 2>&1 || : + # One-shot services -> no need to stop +fi + +%postun +%systemd_postun cloud-config.service cloud-config.target cloud-final.service cloud-init.service cloud-init.target cloud-init-local.service + +%files +%license LICENSE +%doc ChangeLog rhel/README.rhel +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%doc %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* +%{_unitdir}/cloud-config.service +%{_unitdir}/cloud-config.target +%{_unitdir}/cloud-final.service +%{_unitdir}/cloud-init-local.service +%{_unitdir}/cloud-init.service +%{_unitdir}/cloud-init.target +%{_tmpfilesdir}/%{name}.conf +%{python3_sitelib}/* +%{_libexecdir}/%{name} +%{_bindir}/cloud-init* +%doc %{_datadir}/doc/%{name} +%{_mandir}/man1/* +%dir %verify(not mode) /run/cloud-init +%dir /var/lib/cloud +/etc/NetworkManager/dispatcher.d/cloud-init-azure-hook +%{_udevrulesdir}/66-azure-ephemeral.rules +%{_sysconfdir}/bash_completion.d/cloud-init +%{_bindir}/cloud-id +%{_libexecdir}/%{name}/ds-identify +/usr/lib/systemd/system-generators/cloud-init-generator + + +%dir %{_sysconfdir}/rsyslog.d +%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +%changelog +* Fri Apr 01 2022 Camilla Conte - 21.1-15 +- ci-Detect-a-Python-version-change-and-clear-the-cache-8.patch [bz#1935826] +- ci-Fix-MIME-policy-failure-on-python-version-upgrade-93.patch [bz#1935826] +- Resolves: bz#1935826 + ([rhel-8] Cloud-init init stage fails after upgrade from RHEL7 to RHEL8.) + +* Fri Feb 25 2022 Jon Maloy - 21.1-14 +- ci-Fix-IPv6-netmask-format-for-sysconfig-1215.patch [bz#2046540] +- Resolves: bz#2046540 + (cloud-init writes route6-$DEVICE config with a HEX netmask. ip route does not like : Error: inet6 prefix is expected rather than "fd00:fd00:fd00::/ffff:ffff:ffff:ffff::".) + +* Tue Jan 25 2022 Jon Maloy - 21.1-13 +- ci-Add-flexibility-to-IMDS-api-version-793.patch [bz#2023940] +- ci-Azure-helper-Ensure-Azure-http-handler-sleeps-betwee.patch [bz#2023940] +- ci-azure-Removing-ability-to-invoke-walinuxagent-799.patch [bz#2023940] +- ci-Azure-eject-the-provisioning-iso-before-reporting-re.patch [bz#2023940] +- ci-Azure-Retrieve-username-and-hostname-from-IMDS-865.patch [bz#2023940] +- ci-Azure-Retry-net-metadata-during-nic-attach-for-non-t.patch [bz#2023940] +- ci-Azure-adding-support-for-consuming-userdata-from-IMD.patch [bz#2023940] +- Resolves: bz#2023940 + ([RHEL-8] Support for provisioning Azure VM with userdata) + +* Wed Jan 19 2022 Jon Maloy - 21.1-12 +- ci-Add-gdisk-and-openssl-as-deps-to-fix-UEFI-Azure-init.patch [bz#2039697] +- ci-Datasource-for-VMware-953.patch [bz#2026587] +- ci-Change-netifaces-dependency-to-0.10.4-965.patch [bz#2026587] +- ci-Update-dscheck_VMware-s-rpctool-check-970.patch [bz#2026587] +- ci-Revert-unnecesary-lcase-in-ds-identify-978.patch [bz#2026587] +- ci-Add-netifaces-package-as-a-Requires-in-cloud-init.sp.patch [bz#2026587] +- Resolves: bz#2039697 + ([RHEL8] [Azure] cloud-init fails to configure the system) +- Resolves: bz#2026587 + ([cloud-init][RHEL8] Support for cloud-init datasource 'cloud-init-vmware-guestinfo') + +* Wed Dec 08 2021 Jon Maloy - 21.1-11 +- ci-cloudinit-net-handle-two-different-routes-for-the-sa.patch [bz#2028028] +- Resolves: bz#2028028 + ([RHEL-8] Above 19.2 of cloud-init fails to configure routes when configuring static and default routes to the same destination IP) + +* Mon Dec 06 2021 Jon Maloy - 21.1-10 +- ci-fix-error-on-upgrade-caused-by-new-vendordata2-attri.patch [bz#2021538] +- Resolves: bz#2021538 + (cloud-init.service fails to start after package update) + +* Mon Oct 25 2021 Jon Maloy - 21.1-9 +- ci-cc_ssh.py-fix-private-key-group-owner-and-permission.patch [bz#2013644] +- Resolves: bz#2013644 + (cloud-init fails to set host key permissions correctly) + +* Thu Sep 23 2021 Miroslav Rezanina - 21.1-8 +- ci-ssh_utils.py-ignore-when-sshd_config-options-are-not.patch [bz#1862933] +- Resolves: bz#1862933 + (cloud-init fails with ValueError: need more than 1 value to unpack[rhel-8]) + +* Fri Aug 27 2021 Miroslav Rezanina - 21.1-7 +- ci-Fix-home-permissions-modified-by-ssh-module-SC-338-9.patch [bz#1995840] +- Resolves: bz#1995840 + ([cloudinit] Fix home permissions modified by ssh module) + +* Wed Aug 11 2021 Miroslav Rezanina - 21.1-6 +- ci-Stop-copying-ssh-system-keys-and-check-folder-permis.patch [bz#1862967] +- Resolves: bz#1862967 + ([cloud-init]Customize ssh AuthorizedKeysFile causes login failure) + +* Fri Aug 06 2021 Miroslav Rezanina - 21.1-5 +- ci-Add-dhcp-client-as-a-dependency.patch [bz#1977385] +- Resolves: bz#1977385 + ([Azure][RHEL-8] cloud-init must require dhcp-client on Azure) + +* Mon Jul 19 2021 Miroslav Rezanina - 21.1-4 +- ci-ssh-util-allow-cloudinit-to-merge-all-ssh-keys-into-.patch [bz#1862967] +- Resolves: bz#1862967 + ([cloud-init]Customize ssh AuthorizedKeysFile causes login failure) + +* Mon Jul 12 2021 Miroslav Rezanina - 21.1-3 +- ci-write-passwords-only-to-serial-console-lock-down-clo.patch [bz#1945891] +- Resolves: bz#1945891 + (CVE-2021-3429 cloud-init: randomly generated passwords logged in clear-text to world-readable file [rhel-8]) + +* Fri Jun 11 2021 Miroslav Rezanina - 21.1-2 +- ci-rhel-cloud.cfg-remove-ssh_genkeytypes-in-settings.py.patch [bz#1957532] +- ci-cloud-init.spec.template-update-systemd_postun-param.patch [bz#1952089] +- Resolves: bz#1957532 + ([cloud-init] From RHEL 82+ cloud-init no longer displays sshd keys fingerprints from instance launched from a backup image) +- Resolves: bz#1952089 + (cloud-init brew build fails on Fedora 33) + +* Thu May 27 2021 Miroslav Rezanina - 21.1-1.el8 +- Rebaes to 21.1 [bz#1958174] +- Resolves: bz#1958174 + ([RHEL-8.5.0] Rebase cloud-init to 21.1) + +* Tue Feb 02 2021 Miroslav Rezanina - 20.3-10.el8 +- ci-fix-a-typo-in-man-page-cloud-init.1-752.patch [bz#1913127] +- Resolves: bz#1913127 + (A typo in cloud-init man page) + +* Tue Jan 26 2021 Miroslav Rezanina - 20.3-9.el8 +- ci-DataSourceAzure-update-password-for-defuser-if-exist.patch [bz#1900892] +- ci-Revert-ssh_util-handle-non-default-AuthorizedKeysFil.patch [bz#1919972] +- Resolves: bz#1900892 + ([Azure] Update existing user password RHEL8x) +- Resolves: bz#1919972 + ([RHEL-8.4] ssh keys can be shared across users giving potential root access) + +* Thu Jan 21 2021 Miroslav Rezanina - 20.3-8.el8 +- ci-Missing-IPV6_AUTOCONF-no-to-render-sysconfig-dhcp6-s.patch [bz#1859695] +- Resolves: bz#1859695 + ([Cloud-init] DHCPv6 assigned address is not added to VM's interface) + +* Tue Jan 05 2021 Miroslav Rezanina - 20.3-7.el8 +- ci-Report-full-specific-version-with-cloud-init-version.patch [bz#1898949] +- Resolves: bz#1898949 + (cloud-init should report full specific full version with "cloud-init --version") + +* Mon Dec 14 2020 Miroslav Rezanina - 20.3-6.el8 +- ci-Installing-man-pages-in-the-correct-place-with-corre.patch [bz#1612573] +- ci-Adding-BOOTPROTO-dhcp-to-render-sysconfig-dhcp6-stat.patch [bz#1859695] +- ci-Fix-unit-failure-of-cloud-final.service-if-NetworkMa.patch [bz#1898943] +- ci-ssh_util-handle-non-default-AuthorizedKeysFile-confi.patch [bz#1862967] +- Resolves: bz#1612573 + (Man page scan results for cloud-init) +- Resolves: bz#1859695 + ([Cloud-init] DHCPv6 assigned address is not added to VM's interface) +- Resolves: bz#1898943 + ([rhel-8]cloud-final.service fails if NetworkManager not installed.) +- Resolves: bz#1862967 + ([cloud-init]Customize ssh AuthorizedKeysFile causes login failure) + +* Fri Nov 27 2020 Miroslav Rezanina - 20.3-5.el8 +- ci-network-Fix-type-and-respect-name-when-rendering-vla.patch [bz#1881462] +- Resolves: bz#1881462 + ([rhel8][cloud-init] ifup bond0.504 Error: Connection activation failed: No suitable device found for this connection) + +* Tue Nov 24 2020 Miroslav Rezanina - 20.3-4.el8 +- ci-Changing-permission-of-cloud-init-generator-to-755.patch [bz#1897528] +- Resolves: bz#1897528 + (Change permission on ./systemd/cloud-init-generator.tmpl to 755 instead of 771) + +* Fri Nov 13 2020 Miroslav Rezanina - 20.3-3.el8 +- ci--Removing-net-tools-dependency.patch [bz#1881871] +- ci--Adding-man-pages-to-Red-Hat-spec-file.patch [bz#1612573] +- Resolves: bz#1881871 + (Remove net-tools legacy dependency from spec file) +- Resolves: bz#1612573 + (Man page scan results for cloud-init) + +* Tue Nov 03 2020 Miroslav Rezanina - 20.3-2.el8 +- ci-Explicit-set-IPV6_AUTOCONF-and-IPV6_FORCE_ACCEPT_RA-.patch [bz#1889635] +- ci-Add-config-modules-for-controlling-IBM-PowerVM-RMC.-.patch [bz#1886430] +- Resolves: bz#1886430 + (Support for cloud-init config modules for PowerVM Hypervisor in Red Hat cloud-init) +- Resolves: bz#1889635 + (Add support for ipv6_autoconf on cloud-init-20.3) + +* Fri Oct 23 2020 Eduardo Otubo - 20.3-1.el8 +- Rebase to cloud-init 20.3 [bz#1885185] +- Resolves: bz#1885185 + ([RHEL-8.4.0] cloud-init rebase to 20.3) + +* Wed Sep 02 2020 Miroslav Rezanina - 19.4-11.el8 +- ci-cc_mounts-fix-incorrect-format-specifiers-316.patch [bz#1794664] +- Resolves: bz#1794664 + ([RHEL8] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init) + +* Mon Aug 31 2020 Miroslav Rezanina - 19.4-10.el8 +- ci-Changing-notation-of-subp-call.patch [bz#1839662] +- Resolves: bz#1839662 + ([ESXi][RHEL8.3][cloud-init]ERROR log in cloud-init.log after clone VM on ESXi platform) + +* Mon Aug 24 2020 Miroslav Rezanina - 19.4-9.el8 +- ci-Do-not-use-fallocate-in-swap-file-creation-on-xfs.-7.patch [bz#1794664] +- ci-swap-file-size-being-used-before-checked-if-str-315.patch [bz#1794664] +- ci-Detect-kernel-version-before-swap-file-creation-428.patch [bz#1794664] +- Resolves: bz#1794664 + ([RHEL8] swapon fails with "swapfile has holes" when created on a xfs filesystem by cloud-init) + +* Mon Aug 17 2020 Miroslav Rezanina - 19.4-8.el8 +- ci-When-tools.conf-does-not-exist-running-cmd-vmware-to.patch [bz#1839662] +- ci-ssh-exit-with-non-zero-status-on-disabled-user-472.patch [bz#1833874] +- Resolves: bz#1833874 + ([rhel-8.3]using root user error should cause a non-zero exit code) +- Resolves: bz#1839662 + ([ESXi][RHEL8.3][cloud-init]ERROR log in cloud-init.log after clone VM on ESXi platform) + +* Fri Jun 26 2020 Miroslav Rezanina - 19.4-7.el8 +- Fixing cloud-init-generator permissions [bz#1834173] +- Resolves: bz#1834173 + ([rhel-8.3]Incorrect ds-identify check in cloud-init-generator) + +* Thu Jun 25 2020 Miroslav Rezanina - 19.4-6.el8 +- ci-ec2-only-redact-token-request-headers-in-logs-avoid-.patch [bz#1822343] +- Resolves: bz#1822343 + ([RHEL8.3] Do not log IMDSv2 token values into cloud-init.log) + +* Wed Jun 24 2020 Miroslav Rezanina - 19.4-5.el8 +- ci-ec2-Do-not-log-IMDSv2-token-values-instead-use-REDAC.patch [bz#1822343] +- ci-Render-the-generator-from-template-instead-of-cp.patch [bz#1834173] +- ci-Change-from-redhat-to-rhel-in-systemd-generator-tmpl.patch [bz#1834173] +- ci-cloud-init.service.tmpl-use-rhel-instead-of-redhat-4.patch [bz#1834173] +- Resolves: bz#1822343 + ([RHEL8.3] Do not log IMDSv2 token values into cloud-init.log) +- Resolves: bz#1834173 + ([rhel-8.3]Incorrect ds-identify check in cloud-init-generator) + +* Tue Jun 09 2020 Miroslav Rezanina - 19.4-4.el8 +- ci-changing-ds-identify-patch-from-usr-lib-to-usr-libex.patch [bz#1834173] +- Resolves: bz#1834173 + ([rhel-8.3]Incorrect ds-identify check in cloud-init-generator) + +* Mon Jun 01 2020 Miroslav Rezanina - 19.4-3.el8 +- ci-Make-cloud-init.service-execute-after-network-is-up.patch [bz#1803928] +- Resolves: bz#1803928 + ([RHEL8.3] Race condition of starting cloud-init and NetworkManager) + +* Thu May 28 2020 Miroslav Rezanina - 19.4-2.el8 +- ci-cc_set_password-increase-random-pwlength-from-9-to-2.patch [bz#1812171] +- ci-utils-use-SystemRandom-when-generating-random-passwo.patch [bz#1812174] +- ci-Enable-ssh_deletekeys-by-default.patch [bz#1814152] +- ci-Remove-race-condition-between-cloud-init-and-Network.patch [bz#1840648] +- Resolves: bz#1812171 + (CVE-2020-8632 cloud-init: Too short random password length in cc_set_password in config/cc_set_passwords.py [rhel-8]) +- Resolves: bz#1812174 + (CVE-2020-8631 cloud-init: Use of random.choice when generating random password [rhel-8]) +- Resolves: bz#1814152 + (CVE-2018-10896 cloud-init: default configuration disabled deletion of SSH host keys [rhel-8]) +- Resolves: bz#1840648 + ([cloud-init][RHEL-8.2.0] /etc/resolv.conf lose config after reboot (initial instance is ok)) + +* Mon Apr 20 2020 Miroslav Rezanina - 19.4-1.el8.1 +- Rebase to cloud-init 19.4 [bz#1811912] +- Resolves: bz#1811912 + ([RHEL-8.2.1] cloud-init rebase to 19.4) + +* Tue Mar 10 2020 Miroslav Rezanina - 18.5-12.el8 +- ci-Remove-race-condition-between-cloud-init-and-Network.patch [bz#1807797] +- Resolves: bz#1807797 + ([cloud-init][RHEL-8.2.0] /etc/resolv.conf lose config after reboot (initial instance is ok)) + +* Thu Feb 20 2020 Miroslav Rezanina - 18.5-11.el8 +- ci-azure-avoid-re-running-cloud-init-when-instance-id-i.patch [bz#1788684] +- ci-net-skip-bond-interfaces-in-get_interfaces.patch [bz#1768770] +- ci-net-add-is_master-check-for-filtering-device-list.patch [bz#1768770] +- Resolves: bz#1768770 + (cloud-init complaining about enslaved mac) +- Resolves: bz#1788684 + ([RHEL-8] cloud-init Azure byte swap (hyperV Gen2 Only)) + +* Thu Feb 13 2020 Miroslav Rezanina - 18.5-10.el8 +- ci-cmd-main.py-Fix-missing-modules-init-key-in-modes-di.patch [bz#1802140] +- Resolves: bz#1802140 + ([cloud-init][RHEL8.2]cloud-init cloud-final.service fail with KeyError: 'modules-init' after upgrade to version 18.2-1.el7_6.1 in RHV) + +* Tue Jan 28 2020 Miroslav Rezanina - 18.5-9.el8 +- ci-Removing-cloud-user-from-wheel.patch [bz#1785648] +- Resolves: bz#1785648 + ([RHEL8]cloud-user added to wheel group and sudoers.d causes 'sudo -v' prompts for passphrase) + +* Fri Nov 22 2019 Miroslav Rezanina - 18.5-8.el8 +- ci-Fix-for-network-configuration-not-persisting-after-r.patch [bz#1706482] +- ci-util-json.dumps-on-python-2.7-will-handle-UnicodeDec.patch [bz#1744718] +- Resolves: bz#1706482 + ([cloud-init][RHVM]cloud-init network configuration does not persist reboot [RHEL 8.2.0]) +- Resolves: bz#1744718 + ([cloud-init][RHEL8][OpenStack] cloud-init can't persist instance-data.json) + +* Mon Jul 15 2019 Miroslav Rezanina - 18.5-7.el8 +- Fixing TPS [bz#1729864] +- Resolves: bz#1729864 + (cloud-init tps fail) + +* Thu Jul 04 2019 Miroslav Rezanina - 18.5-6.el8 +- ci-Revert-azure-ensure-that-networkmanager-hook-script-.patch [bz#1692914] +- ci-Azure-Return-static-fallback-address-as-if-failed-to.patch [bz#1691986] +- Resolves: bz#1691986 + ([Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure) +- Resolves: bz#1692914 + ([8.1] [WALA][cloud] cloud-init dhclient-hook script has some unexpected side-effects on Azure) + +* Mon Jun 03 2019 Miroslav Rezanina - 18.5-4.el8 +- ci-Azure-Ensure-platform-random_seed-is-always-serializ.patch [bz#1691986] +- ci-DatasourceAzure-add-additional-logging-for-azure-dat.patch [bz#1691986] +- ci-Azure-Changes-to-the-Hyper-V-KVP-Reporter.patch [bz#1691986] +- ci-DataSourceAzure-Adjust-timeout-for-polling-IMDS.patch [bz#1691986] +- ci-cc_mounts-check-if-mount-a-on-no-change-fstab-path.patch [bz#1691986] +- Resolves: bz#1691986 + ([Azure] [RHEL 8.1] Cloud-init fixes to support fast provisioning for Azure) + +* Tue Apr 16 2019 Danilo Cesar Lemes de Paula - 18.5-3.el8 +- ci-Adding-gating-tests-for-Azure-ESXi-and-AWS.patch [bz#1682786] +- Resolves: bz#1682786 + (cloud-init changes blocked until gating tests are added) + +* Wed Apr 10 2019 Danilo C. L. de Paula - 18.5-2 +- Adding gating.yaml file +- Resolves: rhbz#1682786 + (cloud-init changes blocked until gating tests are added) + + +* Wed Apr 10 2019 Danilo de Paula - 18.2-6.el8 +- ci-net-Make-sysconfig-renderer-compatible-with-Network-.patch [bz#1602784] +- Resolves: bz#1602784 + (cloud-init: Sometimes image boots fingerprints is configured, there's a network device present but it's not configured) + +* Fri Jan 18 2019 Miroslav Rezanina - 18.2-5.el8 +- ci-Fix-string-missmatch-when-mounting-ntfs.patch [bz#1664227] +- Resolves: bz#1664227 + ([Azure]String missmatch causes the /dev/sdb1 mounting failed after stop&start VM) + +* Thu Jan 10 2019 Miroslav Rezanina - 18.2-4.el8 +- ci-Enable-cloud-init-by-default-on-vmware.patch [bz#1644335] +- Resolves: bz#1644335 + ([ESXi][RHEL8.0]Enable cloud-init by default on VMware) + +* Wed Nov 28 2018 Miroslav Rezanina - 18.2-3.el8 +- ci-Adding-systemd-mount-options-to-wait-for-cloud-init.patch [bz#1615599] +- ci-Azure-Ignore-NTFS-mount-errors-when-checking-ephemer.patch [bz#1615599] +- ci-azure-Add-reported-ready-marker-file.patch [bz#1615599] +- ci-Adding-disk_setup-to-rhel-cloud.cfg.patch [bz#1615599] +- Resolves: bz#1615599 + ([Azure] cloud-init fails to mount /dev/sdb1 after stop(deallocate)&&start VM) + +* Tue Nov 06 2018 Miroslav Rezanina - 18.2-2.el7 +- Revert "remove 'tee' command from logging configuration" [bz#1626117] +- Resolves: rhbz#1626117] + (cloud-init-0.7.9-9 doesn't feed cloud-init-output.log) + +* Fri Jun 29 2018 Miroslav Rezanina - 18.2-1.el7 +- Rebase to 18.2 [bz#1515909] + Resolves: rhbz#1515909 + +* Tue Feb 13 2018 Ryan McCabe 0.7.9-24 +- Set DHCP_HOSTNAME on Azure to allow for the hostname to be + published correctly when bouncing the network. + Resolves: rhbz#1434109 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-23 +- Fix a bug tha caused cloud-init to fail as a result of trying + to rename bonds. + Resolves: rhbz#1512247 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-22 +- Apply patch from -21 + Resolves: rhbz#1489270 + +* Mon Jan 15 2018 Ryan McCabe 0.7.9-21 +- sysconfig: Fix a potential traceback introduced in the + 0.7.9-17 build + Resolves: rhbz#1489270 + +* Sun Dec 17 2017 Ryan McCabe 0.7.9-20 +- sysconfig: Correct rendering for dhcp on ipv6 + Resolves: rhbz#1519271 + +* Thu Nov 30 2017 Ryan McCabe 0.7.9-19 +- sysconfig: Fix rendering of default gateway for ipv6 + Resolves: rhbz#1492726 + +* Fri Nov 24 2017 Ryan McCabe 0.7.9-18 +- Start the cloud-init init local service after the dbus socket is created + so that the hostnamectl command works. + Resolves: rhbz#1450521 + +* Tue Nov 21 2017 Ryan McCabe 0.7.9-17 +- Correctly render DNS and DOMAIN for sysconfig + Resolves: rhbz#1489270 + +* Mon Nov 20 2017 Ryan McCabe 0.7.9-16 +- Disable NetworkManager management of resolv.conf if nameservers + are specified by configuration. + Resolves: rhbz#1454491 + +* Mon Nov 13 2017 Ryan McCabe 0.7.9-15 +- Fix a null reference error in the rh_subscription module + Resolves: rhbz#1498974 + +* Mon Nov 13 2017 Ryan McCabe 0-7.9-14 +- Include gateway if it's included in subnet configration + Resolves: rhbz#1492726 + +* Sun Nov 12 2017 Ryan McCabe 0-7.9-13 +- Do proper cleanup of systemd units when upgrading from versions + 0.7.9-3 through 0.7.9-8. + Resolves: rhbz#1465730 + +* Thu Nov 09 2017 Ryan McCabe 0.7.9-12 +- Prevent Azure NM and dhclient hooks from running when cloud-init is + disabled (rhbz#1474226) + +* Tue Oct 31 2017 Ryan McCabe 0.7.9-11 +- Fix rendering of multiple static IPs per interface file + Resolves: rhbz#bz1497954 + +* Tue Sep 26 2017 Ryan McCabe 0.7.9-10 +- AliCloud: Add support for the Alibaba Cloud datasource (rhbz#1482547) + +* Thu Jun 22 2017 Lars Kellogg-Stedman 0.7.9-9 +- RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. (rhbz#1438082) +- azure: ensure that networkmanager hook script runs (rhbz#1440831 rhbz#1460206) +- Fix ipv6 subnet detection (rhbz#1438082) + +* Tue May 23 2017 Lars Kellogg-Stedman 0.7.9-8 +- Update patches + +* Mon May 22 2017 Lars Kellogg-Stedman 0.7.9-7 +- Add missing sysconfig unit test data (rhbz#1438082) +- Fix dual stack IPv4/IPv6 configuration for RHEL (rhbz#1438082) +- sysconfig: Raise ValueError when multiple default gateways are present. (rhbz#1438082) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- Do not write NM_CONTROLLED=no in generated interface config files (rhbz#1385172) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-6 +- add power-state-change module to cloud_final_modules (rhbz#1252477) +- remove 'tee' command from logging configuration (rhbz#1424612) +- limit permissions on def_log_file (rhbz#1424612) +- Bounce network interface for Azure when using the built-in path. (rhbz#1434109) +- OpenStack: add 'dvs' to the list of physical link types. (rhbz#1442783) + +* Wed May 10 2017 Lars Kellogg-Stedman 0.7.9-5 +- systemd: replace generator with unit conditionals (rhbz#1440831) + +* Thu Apr 13 2017 Charalampos Stratakis 0.7.9-4 +- Import to RHEL 7 +Resolves: rhbz#1427280 + +* Tue Mar 07 2017 Lars Kellogg-Stedman 0.7.9-3 +- fixes for network config generation +- avoid dependency cycle at boot (rhbz#1420946) + +* Tue Jan 17 2017 Lars Kellogg-Stedman 0.7.9-2 +- use timeout from datasource config in openstack get_data (rhbz#1408589) + +* Thu Dec 01 2016 Lars Kellogg-Stedman - 0.7.9-1 +- Rebased on upstream 0.7.9. +- Remove dependency on run-parts + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-8 +- make rh_subscription plugin do nothing in the absence of a valid + configuration [RH:1295953] +- move rh_subscription module to cloud_config stage + +* Wed Jan 06 2016 Lars Kellogg-Stedman - 0.7.6-7 +- correct permissions on /etc/ssh/sshd_config [RH:1296191] + +* Thu Sep 03 2015 Lars Kellogg-Stedman - 0.7.6-6 +- rebuild for ppc64le + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-5 +- bump revision for new build + +* Tue Jul 07 2015 Lars Kellogg-Stedman - 0.7.6-4 +- ensure rh_subscription plugin is enabled by default + +* Wed Apr 29 2015 Lars Kellogg-Stedman - 0.7.6-3 +- added dependency on python-jinja2 [RH:1215913] +- added rhn_subscription plugin [RH:1227393] +- require pyserial to support smartos data source [RH:1226187] + +* Fri Jan 16 2015 Lars Kellogg-Stedman - 0.7.6-2 +- Rebased RHEL version to Fedora rawhide +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1246485 +- Backported fix for https://bugs.launchpad.net/cloud-init/+bug/1411829 + +* Fri Nov 14 2014 Colin Walters - 0.7.6-1 +- New upstream version [RH:974327] +- Drop python-cheetah dependency (same as above bug) diff --git a/sources b/sources new file mode 100644 index 0000000..a1a92cd --- /dev/null +++ b/sources @@ -0,0 +1,2 @@ +SHA512 (cloud-init-21.1.tar.gz) = 893b1eacc343a0aad61e308819163e99cab93c6a15b47d30d0a7fb2aced03d6188cf69062117876009ebccf69ba1bd1e3145d7fd2988b5794e736c91196bea1d +SHA512 (test_version_change.pkl) = 90024f78a5931a7cfca3a90a7b288bd9594aba2ac99d47021a282a3221153acba6b4f1bc70513e87fc26919864b78773442cb60aec456e06ca293c0e6938f174