diff --git a/.gitignore b/.gitignore index 7d62069..2a2a28f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /sos-4.2.tar.gz /sos-audit-0.3.tgz /sos-4.3.tar.gz +/sos-4.4.tar.gz diff --git a/sos-bz2055002-rebase-sos-add-sos-help.patch b/sos-bz2055002-rebase-sos-add-sos-help.patch deleted file mode 100644 index 1b8af24..0000000 --- a/sos-bz2055002-rebase-sos-add-sos-help.patch +++ /dev/null @@ -1,67 +0,0 @@ -From b5389aa195675f473acdd22f20017a8854ff82d0 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 16 Feb 2022 08:43:32 +0100 -Subject: [PATCH] [man] Mention sos-help in main sos manpage - -Related to #2860 - -Signed-off-by: Pavel Moravec ---- - man/en/sos.1 | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/man/en/sos.1 b/man/en/sos.1 -index ce4918f99..c335b7e10 100644 ---- a/man/en/sos.1 -+++ b/man/en/sos.1 -@@ -67,6 +67,14 @@ May be invoked via either \fBsos clean\fR, \fBsos cleaner\fR, \fBsos mask\fR, - or via the \fB--clean\fR, \fB--cleaner\fR or \fB --mask\fR options - for \fBreport\fR and \fBcollect\fR. - -+.TP -+.B help -+This subcommand is used to retrieve more detailed information on the various SoS -+commands and components than is directly available in either other manpages or -+--help output. -+ -+See \fB sos help --help\fR and \fB man sos-help\fR for more information. -+ - .SH GLOBAL OPTIONS - sos components provide their own set of options, however the following are available - to be set across all components. -From ac4eb48fa35c13b99ada41540831412480babf8d Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 16 Feb 2022 08:44:16 +0100 -Subject: [PATCH] [setup] Add sos-help to build process - -Resolves: #2860 -Closes: #2861 - -Signed-off-by: Pavel Moravec ---- - setup.py | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/setup.py b/setup.py -index 25e87a71b..8db8641f0 100644 ---- a/setup.py -+++ b/setup.py -@@ -90,7 +90,7 @@ def copy_file (self, filename, dirname): - ('share/man/man1', ['man/en/sosreport.1', 'man/en/sos-report.1', - 'man/en/sos.1', 'man/en/sos-collect.1', - 'man/en/sos-collector.1', 'man/en/sos-clean.1', -- 'man/en/sos-mask.1']), -+ 'man/en/sos-mask.1', 'man/en/sos-help.1']), - ('share/man/man5', ['man/en/sos.conf.5']), - ('share/licenses/sos', ['LICENSE']), - ('share/doc/sos', ['AUTHORS', 'README.md']), -@@ -102,7 +102,8 @@ def copy_file (self, filename, dirname): - 'sos.policies.package_managers', 'sos.policies.init_systems', - 'sos.report', 'sos.report.plugins', 'sos.collector', - 'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner', -- 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives' -+ 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives', -+ 'sos.help' - ], - cmdclass=cmdclass, - command_options=command_options, diff --git a/sos-bz2058279-ocp-backports.patch b/sos-bz2058279-ocp-backports.patch deleted file mode 100644 index 7cfaa91..0000000 --- a/sos-bz2058279-ocp-backports.patch +++ /dev/null @@ -1,1113 +0,0 @@ -From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 15 Feb 2022 16:24:47 -0500 -Subject: [PATCH] [runtimes] Allow container IDs to be used with - `container_exists()` - -As container runtimes can interchange container names and container IDs, -sos should also allow the use of container IDs when checking for the -presence of a given container. - -In particular, this change unblocks the use of `Plugin.exec_cmd()` when -used in conjunction with `Plugin.get_container_by_name()` to pick a -container based on a provided regex that the container name may match. - -Related: #2856 - -Signed-off-by: Jake Hunsaker ---- - sos/policies/runtimes/__init__.py | 17 +++++++++++++++++ - sos/report/plugins/__init__.py | 6 +++--- - 2 files changed, 20 insertions(+), 3 deletions(-) - -diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py -index 5ac67354..d2837349 100644 ---- a/sos/policies/runtimes/__init__.py -+++ b/sos/policies/runtimes/__init__.py -@@ -147,6 +147,23 @@ class ContainerRuntime(): - vols.append(ent[-1]) - return vols - -+ def container_exists(self, container): -+ """Check if a given container ID or name exists on the system from the -+ perspective of the container runtime. -+ -+ Note that this will only check _running_ containers -+ -+ :param container: The name or ID of the container -+ :type container: ``str`` -+ -+ :returns: True if the container exists, else False -+ :rtype: ``bool`` -+ """ -+ for _contup in self.containers: -+ if container in _contup: -+ return True -+ return False -+ - def fmt_container_cmd(self, container, cmd, quotecmd): - """Format a command to run inside a container using the runtime - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 2988be08..cc5cb65b 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -2593,7 +2593,7 @@ class Plugin(): - """If a container runtime is present, check to see if a container with - a given name is currently running - -- :param name: The name of the container to check presence of -+ :param name: The name or ID of the container to check presence of - :type name: ``str`` - - :returns: ``True`` if `name` exists, else ``False`` -@@ -2601,8 +2601,8 @@ class Plugin(): - """ - _runtime = self._get_container_runtime() - if _runtime is not None: -- con = _runtime.get_container_by_name(name) -- return con is not None -+ return (_runtime.container_exists(name) or -+ _runtime.get_container_by_name(name) is not None) - return False - - def get_all_containers_by_regex(self, regex, get_all=False): --- -2.34.3 - -From 2ae16e0245e1b01b8547e507abb69c11871a8467 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Mon, 21 Feb 2022 14:37:09 -0500 -Subject: [PATCH] [sosnode] Handle downstream versioning for runtime option - check - -First, adds parsing and formatting for an sos installation's release -version according to the loaded package manager for that node. - -Adds a fallback version check for 4.2-13 for RHEL downstreams that -backport the `container-runtime` option into sos-4.2. - -Carry this in upstream to account for use cases where a workstation used -to run `collect` from may be from a different stream than those used by -cluster nodes. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/sosnode.py | 60 ++++++++++++++++++++++++++++++++++------ - 1 file changed, 51 insertions(+), 9 deletions(-) - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 7bbe0cd1..d9b998b0 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -275,21 +275,34 @@ class SosNode(): - def _load_sos_info(self): - """Queries the node for information about the installed version of sos - """ -+ ver = None -+ rel = None - if self.host.container_version_command is None: - pkg = self.host.package_manager.pkg_version(self.host.sos_pkg_name) - if pkg is not None: - ver = '.'.join(pkg['version']) -- self.sos_info['version'] = ver -+ if pkg['release']: -+ rel = pkg['release'] -+ - else: - # use the containerized policy's command - pkgs = self.run_command(self.host.container_version_command, - use_container=True, need_root=True) - if pkgs['status'] == 0: -- ver = pkgs['output'].strip().split('-')[1] -- if ver: -- self.sos_info['version'] = ver -- else: -- self.sos_info['version'] = None -+ _, ver, rel = pkgs['output'].strip().split('-') -+ -+ if ver: -+ if len(ver.split('.')) == 2: -+ # safeguard against maintenance releases throwing off the -+ # comparison by LooseVersion -+ ver += '.0' -+ try: -+ ver += '-%s' % rel.split('.')[0] -+ except Exception as err: -+ self.log_debug("Unable to fully parse sos release: %s" % err) -+ -+ self.sos_info['version'] = ver -+ - if self.sos_info['version']: - self.log_info('sos version is %s' % self.sos_info['version']) - else: -@@ -381,9 +394,37 @@ class SosNode(): - """Checks to see if the sos installation on the node is AT LEAST the - given ver. This means that if the installed version is greater than - ver, this will still return True -+ -+ :param ver: Version number we are trying to verify is installed -+ :type ver: ``str`` -+ -+ :returns: True if installed version is at least ``ver``, else False -+ :rtype: ``bool`` - """ -- return self.sos_info['version'] is not None and \ -- LooseVersion(self.sos_info['version']) >= ver -+ def _format_version(ver): -+ # format the version we're checking to a standard form of X.Y.Z-R -+ try: -+ _fver = ver.split('-')[0] -+ _rel = '' -+ if '-' in ver: -+ _rel = '-' + ver.split('-')[-1].split('.')[0] -+ if len(_fver.split('.')) == 2: -+ _fver += '.0' -+ -+ return _fver + _rel -+ except Exception as err: -+ self.log_debug("Unable to format '%s': %s" % (ver, err)) -+ return ver -+ -+ _ver = _format_version(ver) -+ -+ try: -+ _node_ver = LooseVersion(self.sos_info['version']) -+ _test_ver = LooseVersion(_ver) -+ return _node_ver >= _test_ver -+ except Exception as err: -+ self.log_error("Error checking sos version: %s" % err) -+ return False - - def is_installed(self, pkg): - """Checks if a given package is installed on the node""" -@@ -587,7 +628,8 @@ class SosNode(): - sos_opts.append('--cmd-timeout=%s' - % quote(str(self.opts.cmd_timeout))) - -- if self.check_sos_version('4.3'): -+ # handle downstream versions that backported this option -+ if self.check_sos_version('4.3') or self.check_sos_version('4.2-13'): - if self.opts.container_runtime != 'auto': - sos_opts.append( - "--container-runtime=%s" % self.opts.container_runtime --- -2.34.3 - -From cc60fa5ee25bffed9203a4f786256185b7fe0115 Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Tue, 15 Mar 2022 11:49:57 +0100 -Subject: [PATCH] Add ovs datapath and groups collection commands Add - ct-zone-list command for openshift-ovn - -Signed-off-by: Nadia Pinaeva ---- - sos/report/plugins/openshift_ovn.py | 4 ++++ - sos/report/plugins/openvswitch.py | 3 +++ - 2 files changed, 7 insertions(+) - -diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py -index 168f1dd3..b4787b8e 100644 ---- a/sos/report/plugins/openshift_ovn.py -+++ b/sos/report/plugins/openshift_ovn.py -@@ -34,6 +34,10 @@ class OpenshiftOVN(Plugin, RedHatPlugin): - 'ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ' + - 'cluster/status OVN_Southbound'], - container='ovnkube-master') -+ self.add_cmd_output([ -+ 'ovs-appctl -t /var/run/ovn/ovn-controller.*.ctl ' + -+ 'ct-zone-list'], -+ container='ovnkube-node') - self.add_cmd_output([ - 'ovs-appctl -t ovs-monitor-ipsec tunnels/show', - 'ipsec status', -diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py -index 179d1532..159b0bd2 100644 ---- a/sos/report/plugins/openvswitch.py -+++ b/sos/report/plugins/openvswitch.py -@@ -124,6 +124,8 @@ class OpenVSwitch(Plugin): - "ovs-vsctl -t 5 list interface", - # Capture OVS detailed information from all the bridges - "ovs-vsctl -t 5 list bridge", -+ # Capture OVS datapath list -+ "ovs-vsctl -t 5 list datapath", - # Capture DPDK queue to pmd mapping - "ovs-appctl dpif-netdev/pmd-rxq-show", - # Capture DPDK pmd stats -@@ -229,6 +231,7 @@ class OpenVSwitch(Plugin): - "ovs-ofctl queue-get-config %s" % br, - "ovs-ofctl queue-stats %s" % br, - "ovs-ofctl show %s" % br, -+ "ovs-ofctl dump-groups %s" % br, - ]) - - # Flow protocols currently supported --- -2.34.3 - -From af40be92f502b35fa9d39ce4d4fea7d80c367830 Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Tue, 15 Mar 2022 13:09:55 +0100 -Subject: [PATCH] Improve sos collect for OCP: 1. wait for sos tmp project to - be deleted (just calling delete changes project state to Terminating, and - running a new sos collect is not possible before this project is fully - deleted) 2. use --retries flag to copy sos reports from the nodes more - reliably. The flag has been recently added to kubectl, and the most reliable - way to check if it's available or not is to check command error output for - "unknown flag" substring - -Signed-off-by: Nadia Pinaeva ---- - sos/collector/clusters/ocp.py | 5 +++++ - sos/collector/transports/oc.py | 6 +++++- - 2 files changed, 10 insertions(+), 1 deletion(-) - -diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py -index f1714239..9beb2f9b 100644 ---- a/sos/collector/clusters/ocp.py -+++ b/sos/collector/clusters/ocp.py -@@ -123,6 +123,11 @@ class ocp(Cluster): - if not ret['status'] == 0: - self.log_error("Error deleting temporary project: %s" - % ret['output']) -+ ret = self.exec_primary_cmd("oc wait namespace/%s --for=delete " -+ "--timeout=30s" % self.project) -+ if not ret['status'] == 0: -+ self.log_error("Error waiting for temporary project to be " -+ "deleted: %s" % ret['output']) - # don't leave the config on a non-existing project - self.exec_primary_cmd("oc project default") - self.project = None -diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py -index 0fc9eee8..90a802b2 100644 ---- a/sos/collector/transports/oc.py -+++ b/sos/collector/transports/oc.py -@@ -231,5 +231,9 @@ class OCTransport(RemoteTransport): - % (self.project, self.pod_name)) - - def _retrieve_file(self, fname, dest): -- cmd = self.run_oc("cp %s:%s %s" % (self.pod_name, fname, dest)) -+ # check if --retries flag is available for given version of oc -+ result = self.run_oc("cp --retries", stderr=True) -+ flags = '' if "unknown flag" in result["output"] else '--retries=5' -+ cmd = self.run_oc("cp %s %s:%s %s" -+ % (flags, self.pod_name, fname, dest)) - return cmd['status'] == 0 --- -2.34.3 - -From 3b0676b90ff65f20eaba3062775ff72b89386ffc Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 22 Mar 2022 14:25:24 -0400 -Subject: [PATCH] [Plugin] Allow plugins to define default command environment - vars - -Adds the ability for plugins to define a default set of environment vars -to pass to all commands executed by the plugin. This may be done either -via the new `set_default_cmd_environment()` or -`add_default_cmd_environment()` methods. The former will override any -previously set values, whereas the latter will add/update/modify any -existing values. - -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/__init__.py | 55 ++++++++++++++++++- - .../plugin_tests/plugin_environment.py | 44 +++++++++++++++ - .../fake_plugins/default_env_test.py | 28 ++++++++++ - tests/unittests/plugin_tests.py | 15 +++++ - 4 files changed, 140 insertions(+), 2 deletions(-) - create mode 100644 tests/report_tests/plugin_tests/plugin_environment.py - create mode 100644 tests/test_data/fake_plugins/default_env_test.py - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 336b4d22..74b4f4be 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -571,6 +571,7 @@ class Plugin(): - self.manifest = None - self.skip_files = commons['cmdlineopts'].skip_files - self.skip_commands = commons['cmdlineopts'].skip_commands -+ self.default_environment = {} - - self.soslog = self.commons['soslog'] if 'soslog' in self.commons \ - else logging.getLogger('sos') -@@ -624,6 +625,52 @@ class Plugin(): - self.manifest.add_field('strings', {}) - self.manifest.add_field('containers', {}) - -+ def set_default_cmd_environment(self, env_vars): -+ """ -+ Specify a collection of environment variables that should always be -+ passed to commands being executed by this plugin. -+ -+ :param env_vars: The environment variables and their values to set -+ :type env_vars: ``dict{ENV_VAR_NAME: ENV_VAR_VALUE}`` -+ """ -+ if not isinstance(env_vars, dict): -+ raise TypeError( -+ "Environment variables for Plugin must be specified by dict" -+ ) -+ self.default_environment = env_vars -+ self._log_debug("Default environment for all commands now set to %s" -+ % self.default_environment) -+ -+ def add_default_cmd_environment(self, env_vars): -+ """ -+ Add or modify a specific environment variable in the set of default -+ environment variables used by this Plugin. -+ -+ :param env_vars: The environment variables to add to the current -+ set of env vars in use -+ :type env_vars: ``dict`` -+ """ -+ if not isinstance(env_vars, dict): -+ raise TypeError("Environment variables must be added via dict") -+ self._log_debug("Adding %s to default environment" % env_vars) -+ self.default_environment.update(env_vars) -+ -+ def _get_cmd_environment(self, env=None): -+ """ -+ Get the merged set of environment variables for a command about to be -+ executed by this plugin. -+ -+ :returns: The set of env vars to use for a command -+ :rtype: ``dict`` -+ """ -+ if env is None: -+ return self.default_environment -+ if not isinstance(env, dict): -+ raise TypeError("Command env vars must be passed as dict") -+ _env = self.default_environment.copy() -+ _env.update(env) -+ return _env -+ - def timeout_from_options(self, optname, plugoptname, default_timeout): - """Returns either the default [plugin|cmd] timeout value, the value as - provided on the commandline via -k plugin.[|cmd-]timeout=value, or the -@@ -2258,6 +2305,8 @@ class Plugin(): - - _tags = list(set(_tags)) - -+ _env = self._get_cmd_environment(env) -+ - if chroot or self.commons['cmdlineopts'].chroot == 'always': - root = self.sysroot - else: -@@ -2282,7 +2331,7 @@ class Plugin(): - - result = sos_get_command_output( - cmd, timeout=timeout, stderr=stderr, chroot=root, -- chdir=runat, env=env, binary=binary, sizelimit=sizelimit, -+ chdir=runat, env=_env, binary=binary, sizelimit=sizelimit, - poller=self.check_timeout, foreground=foreground, - to_file=out_file - ) -@@ -2510,6 +2559,8 @@ class Plugin(): - else: - root = None - -+ _env = self._get_cmd_environment(env) -+ - if container: - if self._get_container_runtime() is None: - self._log_info("Cannot run cmd '%s' in container %s: no " -@@ -2522,7 +2573,7 @@ class Plugin(): - "container is running." % (cmd, container)) - - return sos_get_command_output(cmd, timeout=timeout, chroot=root, -- chdir=runat, binary=binary, env=env, -+ chdir=runat, binary=binary, env=_env, - foreground=foreground, stderr=stderr) - - def _add_container_file_to_manifest(self, container, path, arcpath, tags): -diff --git a/tests/report_tests/plugin_tests/plugin_environment.py b/tests/report_tests/plugin_tests/plugin_environment.py -new file mode 100644 -index 00000000..3158437a ---- /dev/null -+++ b/tests/report_tests/plugin_tests/plugin_environment.py -@@ -0,0 +1,44 @@ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+import os -+ -+from sos_tests import StageTwoReportTest -+ -+ -+class PluginDefaultEnvironmentTest(StageTwoReportTest): -+ """ -+ Ensure that being able to set a default set of environment variables is -+ working correctly and does not leave a lingering env var on the system -+ -+ :avocado: tags=stageone -+ """ -+ -+ install_plugins = ['default_env_test'] -+ sos_cmd = '-o default_env_test' -+ -+ def test_environment_used_in_cmd(self): -+ self.assertFileHasContent( -+ 'sos_commands/default_env_test/env_var_test', -+ 'Does Linus play hockey?' -+ ) -+ -+ def test_environment_setting_logged(self): -+ self.assertSosLogContains( -+ 'Default environment for all commands now set to' -+ ) -+ -+ def test_environment_not_set_on_host(self): -+ self.assertTrue('TORVALDS' not in os.environ) -+ self.assertTrue('GREATESTSPORT' not in os.environ) -+ -+ def test_environment_not_captured(self): -+ # we should still have an empty environment file -+ self.assertFileCollected('environment') -+ self.assertFileNotHasContent('environment', 'TORVALDS') -+ self.assertFileNotHasContent('environment', 'GREATESTSPORT') -diff --git a/tests/test_data/fake_plugins/default_env_test.py b/tests/test_data/fake_plugins/default_env_test.py -new file mode 100644 -index 00000000..d1d1fb78 ---- /dev/null -+++ b/tests/test_data/fake_plugins/default_env_test.py -@@ -0,0 +1,28 @@ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.report.plugins import Plugin, IndependentPlugin -+ -+ -+class DefaultEnv(Plugin, IndependentPlugin): -+ -+ plugin_name = 'default_env_test' -+ short_desc = 'Fake plugin to test default env var handling' -+ -+ def setup(self): -+ self.set_default_cmd_environment({ -+ 'TORVALDS': 'Linus', -+ 'GREATESTSPORT': 'hockey' -+ }) -+ -+ self.add_cmd_output( -+ "sh -c 'echo Does '$TORVALDS' play '$GREATESTSPORT'?'", -+ suggest_filename='env_var_test' -+ ) -+ -+ self.add_env_var(['TORVALDS', 'GREATESTSPORT']) -diff --git a/tests/unittests/plugin_tests.py b/tests/unittests/plugin_tests.py -index 0dfa243d..e469b78e 100644 ---- a/tests/unittests/plugin_tests.py -+++ b/tests/unittests/plugin_tests.py -@@ -305,6 +305,21 @@ class PluginTests(unittest.TestCase): - p.postproc() - self.assertTrue(p.did_postproc) - -+ def test_set_default_cmd_env(self): -+ p = MockPlugin({ -+ 'sysroot': self.sysroot, -+ 'policy': LinuxPolicy(init=InitSystem(), probe_runtime=False), -+ 'cmdlineopts': MockOptions(), -+ 'devices': {} -+ }) -+ e = {'TORVALDS': 'Linus'} -+ p.set_default_cmd_environment(e) -+ self.assertEquals(p.default_environment, e) -+ add_e = {'GREATESTSPORT': 'hockey'} -+ p.add_default_cmd_environment(add_e) -+ self.assertEquals(p.default_environment['GREATESTSPORT'], 'hockey') -+ self.assertEquals(p.default_environment['TORVALDS'], 'Linus') -+ - - class AddCopySpecTests(unittest.TestCase): - --- -2.34.3 - -From 1e12325efaa500d304dcbfbeeb50e72ed0f938f5 Mon Sep 17 00:00:00 2001 -From: Vladislav Walek <22072258+vwalek@users.noreply.github.com> -Date: Thu, 17 Mar 2022 14:10:26 -0700 -Subject: [PATCH] [openshift] Adding ability to use the localhost.kubeconfig - and KUBECONFIG env to use system:admin - -Signed-off-by: Vladislav Walek <22072258+vwalek@users.noreply.github.com> ---- - sos/report/plugins/openshift.py | 45 +++++++++++++++++++++++++++++++-- - 1 file changed, 43 insertions(+), 2 deletions(-) - -diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py -index 5ae38178..d643f04c 100644 ---- a/sos/report/plugins/openshift.py -+++ b/sos/report/plugins/openshift.py -@@ -53,12 +53,19 @@ class Openshift(Plugin, RedHatPlugin): - profiles = ('openshift',) - packages = ('openshift-hyperkube',) - -+ master_localhost_kubeconfig = ( -+ '/etc/kubernetes/static-pod-resources/' -+ 'kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig' -+ ) -+ - option_list = [ - PluginOpt('token', default=None, val_type=str, - desc='admin token to allow API queries'), -+ PluginOpt('kubeconfig', default=None, val_type=str, -+ desc='Path to a locally available kubeconfig file'), - PluginOpt('host', default='https://localhost:6443', - desc='host address to use for oc login, including port'), -- PluginOpt('no-oc', default=False, desc='do not collect `oc` output'), -+ PluginOpt('no-oc', default=True, desc='do not collect `oc` output'), - PluginOpt('podlogs', default=True, desc='collect logs from each pod'), - PluginOpt('podlogs-filter', default='', val_type=str, - desc='only collect logs from pods matching this pattern'), -@@ -73,6 +80,10 @@ class Openshift(Plugin, RedHatPlugin): - """Check to see if we can run `oc` commands""" - return self.exec_cmd('oc whoami')['status'] == 0 - -+ def _check_localhost_kubeconfig(self): -+ """Check if the localhost.kubeconfig exists with system:admin user""" -+ return self.path_exists(self.get_option('kubeconfig')) -+ - def _check_oc_logged_in(self): - """See if we're logged in to the API service, and if not attempt to do - so using provided plugin options -@@ -80,8 +91,38 @@ class Openshift(Plugin, RedHatPlugin): - if self._check_oc_function(): - return True - -- # Not logged in currently, attempt to do so -+ if self.get_option('kubeconfig') is None: -+ # If admin doesn't add the kubeconfig -+ # use default localhost.kubeconfig -+ self.set_option( -+ 'kubeconfig', -+ self.master_localhost_kubeconfig -+ ) -+ -+ # Check first if we can use the localhost.kubeconfig before -+ # using token. We don't want to use 'host' option due we use -+ # cluster url from kubeconfig. Default is localhost. -+ if self._check_localhost_kubeconfig(): -+ self.set_default_cmd_environment({ -+ 'KUBECONFIG': self.get_option('kubeconfig') -+ }) -+ -+ oc_res = self.exec_cmd( -+ "oc login -u system:admin " -+ "--insecure-skip-tls-verify=True" -+ ) -+ if oc_res['status'] == 0 and self._check_oc_function(): -+ return True -+ -+ self._log_warn( -+ "The login command failed with status: %s and error: %s" -+ % (oc_res['status'], oc_res['output']) -+ ) -+ return False -+ -+ # If kubeconfig is not defined, check if token is provided. - token = self.get_option('token') or os.getenv('SOSOCPTOKEN', None) -+ - if token: - oc_res = self.exec_cmd("oc login %s --token=%s " - "--insecure-skip-tls-verify=True" --- -2.34.3 - -From 61765992812afb785e9552e01e3b5579118a6963 Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Fri, 1 Apr 2022 12:05:36 +0200 -Subject: [PATCH] Add one more container for plugin enablement - -Signed-off-by: Nadia Pinaeva ---- - sos/report/plugins/openshift_ovn.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py -index b4787b8e..98522b1e 100644 ---- a/sos/report/plugins/openshift_ovn.py -+++ b/sos/report/plugins/openshift_ovn.py -@@ -16,7 +16,7 @@ class OpenshiftOVN(Plugin, RedHatPlugin): - """ - short_desc = 'Openshift OVN' - plugin_name = "openshift_ovn" -- containers = ('ovnkube-master', 'ovn-ipsec') -+ containers = ('ovnkube-master', 'ovnkube-node', 'ovn-ipsec') - profiles = ('openshift',) - - def setup(self): --- -2.34.3 - -From d3aa071efc85507341cf65dd61414a734654f50a Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Mon, 28 Mar 2022 14:47:09 -0400 -Subject: [PATCH] [presets] Adjust OCP preset options - -Adjust the options used by the 'ocp' preset to better reflect the -current collection needs and approach. - -This includes disabling the `cgroups` plugin due to the large amount of -mostly irrelevant data captured due to the high number of containers -present on OCP nodes, ensuring the `--container-runtime` option is set -to `crio` to align container-based collections, disabling HTML report -generation and increasing the base log size rather than blindly enabling -all-logs. - -Signed-off-by: Jake Hunsaker ---- - sos/presets/redhat/__init__.py | 13 +++++++++---- - 1 file changed, 9 insertions(+), 4 deletions(-) - -diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py -index 865c9b6b..0b9f6f11 100644 ---- a/sos/presets/redhat/__init__.py -+++ b/sos/presets/redhat/__init__.py -@@ -36,10 +36,15 @@ RHOSP_OPTS = SoSOptions(plugopts=[ - - RHOCP = "ocp" - RHOCP_DESC = "OpenShift Container Platform by Red Hat" --RHOCP_OPTS = SoSOptions(all_logs=True, verify=True, plugopts=[ -- 'networking.timeout=600', -- 'networking.ethtool_namespaces=False', -- 'networking.namespaces=200']) -+RHOCP_OPTS = SoSOptions( -+ verify=True, skip_plugins=['cgroups'], container_runtime='crio', -+ no_report=True, log_size=100, -+ plugopts=[ -+ 'crio.timeout=600', -+ 'networking.timeout=600', -+ 'networking.ethtool_namespaces=False', -+ 'networking.namespaces=200' -+ ]) - - RH_CFME = "cfme" - RH_CFME_DESC = "Red Hat CloudForms" --- -2.34.3 - -From f2b67ab820070063995689fed03492cdaa012d01 Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Fri, 1 Apr 2022 17:01:35 +0200 -Subject: [PATCH] Use /etc/os-release instead of /etc/redhat-release as the - most compatible way to find host release - -Signed-off-by: Nadia Pinaeva ---- - sos/policies/distros/redhat.py | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index 0c72a5e4..2e117f37 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -40,7 +40,6 @@ class RedHatPolicy(LinuxPolicy): - ('Distribution Website', 'https://www.redhat.com/'), - ('Commercial Support', 'https://www.access.redhat.com/') - ] -- _redhat_release = '/etc/redhat-release' - _tmp_dir = "/var/tmp" - _in_container = False - default_scl_prefix = '/opt/rh' -@@ -471,7 +470,7 @@ support representative. - atomic = False - if ENV_HOST_SYSROOT not in os.environ: - return atomic -- host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release -+ host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE - if not os.path.exists(host_release): - return False - try: -@@ -558,7 +557,7 @@ support representative. - coreos = False - if ENV_HOST_SYSROOT not in os.environ: - return coreos -- host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release -+ host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE - try: - for line in open(host_release, 'r').read().splitlines(): - coreos |= 'Red Hat Enterprise Linux CoreOS' in line --- -2.34.3 - -From ee0dd68199a2c9296eafe64ead5b2263c8270e4a Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Wed, 6 Apr 2022 11:56:41 +0200 -Subject: [PATCH] Use --force-pull-image option for pods created with oc. Set - --force-pull-image=True by default, can be turned off with - --force-pull-image=False - -Signed-off-by: Nadia Pinaeva ---- - man/en/sos-collect.1 | 16 +++++++++++----- - sos/collector/__init__.py | 9 +++++---- - sos/collector/transports/oc.py | 2 ++ - sos/options.py | 20 ++++++++++++++------ - 4 files changed, 32 insertions(+), 15 deletions(-) - -diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 -index 9b0a5d7b..2f60332b 100644 ---- a/man/en/sos-collect.1 -+++ b/man/en/sos-collect.1 -@@ -28,7 +28,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes - [\-\-no\-local] - [\-\-primary PRIMARY] - [\-\-image IMAGE] -- [\-\-force-pull-image] -+ [\-\-force-pull-image TOGGLE, --pull TOGGLE] - [\-\-registry-user USER] - [\-\-registry-password PASSWORD] - [\-\-registry-authfile FILE] -@@ -262,10 +262,16 @@ Specify an image to use for the temporary container created for collections on - containerized host, if you do not want to use the default image specifed by the - host's policy. Note that this should include the registry. - .TP --\fB\-\-force-pull-image\fR --Use this option to force the container runtime to pull the specified image (even --if it is the policy default image) even if the image already exists on the host. --This may be useful to update an older container image on containerized hosts. -+\fB\-\-force-pull-image TOGGLE, \-\-pull TOGGLE\fR -+When collecting an sos report from a containerized host, force the host to always -+pull the specified image, even if that image already exists on the host. -+This is useful to ensure that the latest version of that image is always in use. -+Disabling this option will use whatever version of the image is present on the node, -+and only attempt a pull if there is no copy of the image present at all. -+ -+Enable with true/on/yes or disable with false/off/no -+ -+Default: true - .TP - \fB\-\-registry-user USER\fR - Specify the username to authenticate to the registry with in order to pull the container -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index d898ca34..66c3d932 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -27,7 +27,7 @@ from pipes import quote - from textwrap import fill - from sos.cleaner import SoSCleaner - from sos.collector.sosnode import SosNode --from sos.options import ClusterOption -+from sos.options import ClusterOption, str_to_bool - from sos.component import SoSComponent - from sos.utilities import bold - from sos import __version__ -@@ -85,7 +85,7 @@ class SoSCollector(SoSComponent): - 'encrypt_pass': '', - 'group': None, - 'image': '', -- 'force_pull_image': False, -+ 'force_pull_image': True, - 'jobs': 4, - 'keywords': [], - 'keyword_file': None, -@@ -357,8 +357,9 @@ class SoSCollector(SoSComponent): - collect_grp.add_argument('--image', - help=('Specify the container image to use for' - ' containerized hosts.')) -- collect_grp.add_argument('--force-pull-image', '--pull', default=False, -- action='store_true', -+ collect_grp.add_argument('--force-pull-image', '--pull', -+ default=True, choices=(True, False), -+ type=str_to_bool, - help='Force pull the container image even if ' - 'it already exists on the host') - collect_grp.add_argument('--registry-user', default=None, -diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py -index 90a802b2..8f6aa9b4 100644 ---- a/sos/collector/transports/oc.py -+++ b/sos/collector/transports/oc.py -@@ -147,6 +147,8 @@ class OCTransport(RemoteTransport): - "tty": True - } - ], -+ "imagePullPolicy": -+ "Always" if self.opts.force_pull_image else "IfNotPresent", - "restartPolicy": "Never", - "nodeName": self.address, - "hostNetwork": True, -diff --git a/sos/options.py b/sos/options.py -index 4846a509..2d5a5135 100644 ---- a/sos/options.py -+++ b/sos/options.py -@@ -18,6 +18,16 @@ def _is_seq(val): - return val_type is list or val_type is tuple - - -+def str_to_bool(val): -+ _val = val.lower() -+ if _val in ['true', 'on', 'yes']: -+ return True -+ elif _val in ['false', 'off', 'no']: -+ return False -+ else: -+ return None -+ -+ - class SoSOptions(): - - def _merge_opt(self, opt, src, is_default): -@@ -153,15 +163,13 @@ class SoSOptions(): - if isinstance(self.arg_defaults[key], list): - return [v for v in val.split(',')] - if isinstance(self.arg_defaults[key], bool): -- _val = val.lower() -- if _val in ['true', 'on', 'yes']: -- return True -- elif _val in ['false', 'off', 'no']: -- return False -- else: -+ val = str_to_bool(val) -+ if val is None: - raise Exception( - "Value of '%s' in %s must be True or False or analagous" - % (key, conf)) -+ else: -+ return val - if isinstance(self.arg_defaults[key], int): - try: - return int(val) --- -2.34.3 - -From ce289a3ae7101a898efdb84ddfd575576ba5819b Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 5 Apr 2022 11:32:11 -0400 -Subject: [PATCH] [ocp, openshift] Re-align API collection options and rename - option - -Previously, in #2888, the `openshift` plugin was extended to allow API -collections by using a default-available kubeconfig file rather than -relying on user-provided tokens. This also included flipping the default -value of the `no-oc` plugin option to `True` (meaning do not collect API -output by default). - -This worked for the plugin, but it introduced a gap in `sos collect` -whereby the cluster profile could no longer reliably enable API -collections when trying to leverage the new functionality of not -requiring a user token. - -Fix this by updating the cluster profile to align with the new -default-off approach of API collections. - -Along with this, add a toggle to the cluster profile directly to allow -users to toggle API collections on or off (default off) directly. This -is done via a new `with-api` cluster option (e.g. `-c ocp.with-api`). -Further, rename the `openshift` plugin option from `no-oc` to -`with-api`. This change not only makes the option use case far more -obvious, it will also align the use of the option to both `collect` and -`report` so that users need only be aware of a single option for either -method. - -The cluster profile also has logic to detect which plugin option, -`no-oc` or `with-api` to use based on the (RHEL) sos version installed -on the nodes being inspected by the `ocp` cluster profile. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/ocp.py | 72 +++++++++++++++++++++++++++------ - sos/report/plugins/openshift.py | 26 +++++++----- - 2 files changed, 77 insertions(+), 21 deletions(-) - -diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py -index 9beb2f9b..e31d1903 100644 ---- a/sos/collector/clusters/ocp.py -+++ b/sos/collector/clusters/ocp.py -@@ -30,7 +30,11 @@ class ocp(Cluster): - clusterAdmin privileges. - - If this requires the use of a secondary configuration file, specify that -- path with the 'kubeconfig' cluster option. -+ path with the 'kubeconfig' cluster option. This config file will also be -+ used on a single master node to perform API collections if the `with-api` -+ option is enabled (default disabled). If no `kubeconfig` option is given, -+ but `with-api` is enabled, the cluster profile will attempt to use a -+ well-known default kubeconfig file if it is available on the host. - - Alternatively, provide a clusterAdmin access token either via the 'token' - cluster option or, preferably, the SOSOCPTOKEN environment variable. -@@ -45,7 +49,7 @@ class ocp(Cluster): - option mentioned above. - - To avoid redundant collections of OCP API information (e.g. 'oc get' -- commands), this profile will attempt to enable the openshift plugin on only -+ commands), this profile will attempt to enable the API collections on only - a single master node. If the none of the master nodes have a functional - 'oc' binary available, *and* the --no-local option is used, that means that - no API data will be collected. -@@ -63,7 +67,8 @@ class ocp(Cluster): - ('label', '', 'Colon delimited list of labels to select nodes with'), - ('role', 'master', 'Colon delimited list of roles to filter on'), - ('kubeconfig', '', 'Path to the kubeconfig file'), -- ('token', '', 'Service account token to use for oc authorization') -+ ('token', '', 'Service account token to use for oc authorization'), -+ ('with-api', False, 'Collect OCP API data from a master node') - ] - - def fmt_oc_cmd(self, cmd): -@@ -219,13 +224,52 @@ class ocp(Cluster): - return False - return 'master' in self.node_dict[sosnode.address]['roles'] - -+ def _toggle_api_opt(self, node, use_api): -+ """In earlier versions of sos, the openshift plugin option that is -+ used to toggle the API collections was called `no-oc` rather than -+ `with-api`. This older plugin option had the inverse logic of the -+ current `with-api` option. -+ -+ Use this to toggle the correct plugin option given the node's sos -+ version. Note that the use of version 4.2 here is tied to the RHEL -+ release (the only usecase for this cluster profile) rather than -+ the upstream version given the backports for that downstream. -+ -+ :param node: The node being inspected for API collections -+ :type node: ``SoSNode`` -+ -+ :param use_api: Should this node enable API collections? -+ :type use_api: ``bool`` -+ """ -+ if node.check_sos_version('4.2-16'): -+ _opt = 'with-api' -+ _val = 'on' if use_api else 'off' -+ else: -+ _opt = 'no-oc' -+ _val = 'off' if use_api else 'on' -+ node.plugopts.append("openshift.%s=%s" % (_opt, _val)) -+ - def set_primary_options(self, node): -+ - node.enable_plugins.append('openshift') -+ if not self.get_option('with-api'): -+ self._toggle_api_opt(node, False) -+ return - if self.api_collect_enabled: - # a primary has already been enabled for API collection, disable - # it among others -- node.plugopts.append('openshift.no-oc=on') -+ self._toggle_api_opt(node, False) - else: -+ # running in a container, so reference the /host mount point -+ master_kube = ( -+ '/host/etc/kubernetes/static-pod-resources/' -+ 'kube-apiserver-certs/secrets/node-kubeconfigs/' -+ 'localhost.kubeconfig' -+ ) -+ _optconfig = self.get_option('kubeconfig') -+ if _optconfig and not _optconfig.startswith('/host'): -+ _optconfig = '/host/' + _optconfig -+ _kubeconfig = _optconfig or master_kube - _oc_cmd = 'oc' - if node.host.containerized: - _oc_cmd = '/host/bin/oc' -@@ -244,17 +288,21 @@ class ocp(Cluster): - need_root=True) - if can_oc['status'] == 0: - # the primary node can already access the API -+ self._toggle_api_opt(node, True) - self.api_collect_enabled = True - elif self.token: - node.sos_env_vars['SOSOCPTOKEN'] = self.token -+ self._toggle_api_opt(node, True) -+ self.api_collect_enabled = True -+ elif node.file_exists(_kubeconfig): -+ # if the file exists, then the openshift sos plugin will use it -+ # if the with-api option is turned on -+ if not _kubeconfig == master_kube: -+ node.plugopts.append( -+ "openshift.kubeconfig=%s" % _kubeconfig -+ ) -+ self._toggle_api_opt(node, True) - self.api_collect_enabled = True -- elif self.get_option('kubeconfig'): -- kc = self.get_option('kubeconfig') -- if node.file_exists(kc): -- if node.host.containerized: -- kc = "/host/%s" % kc -- node.sos_env_vars['KUBECONFIG'] = kc -- self.api_collect_enabled = True - if self.api_collect_enabled: - msg = ("API collections will be performed on %s\nNote: API " - "collections may extend runtime by 10s of minutes\n" -@@ -264,6 +312,6 @@ class ocp(Cluster): - - def set_node_options(self, node): - # don't attempt OC API collections on non-primary nodes -- node.plugopts.append('openshift.no-oc=on') -+ self._toggle_api_opt(node, False) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py -index d643f04c..a41ab62b 100644 ---- a/sos/report/plugins/openshift.py -+++ b/sos/report/plugins/openshift.py -@@ -19,7 +19,10 @@ class Openshift(Plugin, RedHatPlugin): - further extending the kubernetes plugin (or the OCP 3.x extensions included - in the Red Hat version of the kube plugin). - -- By default, this plugin will collect cluster information and inspect the -+ This plugin may collect OCP API information when the `with-api` option is -+ enabled. This option is disabled by default. -+ -+ When enabled, this plugin will collect cluster information and inspect the - default namespaces/projects that are created during deployment - i.e. the - namespaces of the cluster projects matching openshift.* and kube.*. At the - time of this plugin's creation that number of default projects is already -@@ -34,16 +37,20 @@ class Openshift(Plugin, RedHatPlugin): - - Users will need to either: - -- 1) Provide the bearer token via the `-k openshift.token` option -- 2) Provide the bearer token via the `SOSOCPTOKEN` environment variable -- 3) Otherwise ensure that the root user can successfully run `oc` and -+ 1) Accept the use of a well-known stock kubeconfig file provided via a -+ static pod resource for the kube-apiserver -+ 2) Provide the bearer token via the `-k openshift.token` option -+ 3) Provide the bearer token via the `SOSOCPTOKEN` environment variable -+ 4) Otherwise ensure that the root user can successfully run `oc` and - get proper output prior to running this plugin - - -- It is highly suggested that option #2 be used first, as this will prevent -- the token from being recorded in output saved to the archive. Option #1 may -+ It is highly suggested that option #1 be used first, as this uses well -+ known configurations and requires the least information from the user. If -+ using a token, it is recommended to use option #3 as this will prevent -+ the token from being recorded in output saved to the archive. Option #2 may - be used if this is considered an acceptable risk. It is not recommended to -- rely on option #3, though it will provide the functionality needed. -+ rely on option #4, though it will provide the functionality needed. - """ - - short_desc = 'Openshift Container Platform 4.x' -@@ -65,7 +72,8 @@ class Openshift(Plugin, RedHatPlugin): - desc='Path to a locally available kubeconfig file'), - PluginOpt('host', default='https://localhost:6443', - desc='host address to use for oc login, including port'), -- PluginOpt('no-oc', default=True, desc='do not collect `oc` output'), -+ PluginOpt('with-api', default=False, -+ desc='collect output from the OCP API'), - PluginOpt('podlogs', default=True, desc='collect logs from each pod'), - PluginOpt('podlogs-filter', default='', val_type=str, - desc='only collect logs from pods matching this pattern'), -@@ -212,7 +220,7 @@ class Openshift(Plugin, RedHatPlugin): - self.add_copy_spec('/etc/kubernetes/*') - - # see if we run `oc` commands -- if not self.get_option('no-oc'): -+ if self.get_option('with-api'): - can_run_oc = self._check_oc_logged_in() - else: - can_run_oc = False --- -2.34.3 - diff --git a/sos-bz2062908-tigervnc-update-collections.patch b/sos-bz2062908-tigervnc-update-collections.patch deleted file mode 100644 index f2767c9..0000000 --- a/sos-bz2062908-tigervnc-update-collections.patch +++ /dev/null @@ -1,67 +0,0 @@ -From 4c92968ce461cdfc6a5d913748b2ce4f148ff4a9 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 10 Mar 2022 12:31:49 -0500 -Subject: [PATCH] [tigervnc] Update collections for newer versions of TigerVNC - -First, relaxes the file specifications for collection by capturing the -entire `/etc/tigervnc/` directory. - -Second, adds collection of service status and journal output for each -configured vnc server. Collection of `vncserver -list` is kept for -backwards compatibility. - -Finally, add a short docstring for the plugin for --help output. - -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/tigervnc.py | 28 +++++++++++++++++++++++----- - 1 file changed, 23 insertions(+), 5 deletions(-) - -diff --git a/sos/report/plugins/tigervnc.py b/sos/report/plugins/tigervnc.py -index 1302f6d4..e31aee25 100644 ---- a/sos/report/plugins/tigervnc.py -+++ b/sos/report/plugins/tigervnc.py -@@ -12,17 +12,35 @@ from sos.report.plugins import Plugin, RedHatPlugin - - - class TigerVNC(Plugin, RedHatPlugin): -+ """ -+ This plugin gathers information for VNC servers provided by the tigervnc -+ package. This is explicitly for server-side collections, not clients. -+ -+ By default, this plugin will capture the contents of /etc/tigervnc, which -+ may include usernames. If usernames are sensitive information for end -+ users of sos, consider using the `--clean` option to obfuscate these -+ names. -+ """ - - short_desc = 'TigerVNC server configuration' - plugin_name = 'tigervnc' - packages = ('tigervnc-server',) - - def setup(self): -- self.add_copy_spec([ -- '/etc/tigervnc/vncserver-config-defaults', -- '/etc/tigervnc/vncserver-config-mandatory', -- '/etc/tigervnc/vncserver.users' -- ]) -+ self.add_copy_spec('/etc/tigervnc/') -+ -+ # service names are 'vncserver@$port' where $port is :1,, :2, etc... -+ # however they are not reported via list-unit-files, only list-units -+ vncs = self.exec_cmd( -+ 'systemctl list-units --type=service --no-legend vncserver*' -+ ) -+ if vncs['status'] == 0: -+ for serv in vncs['output'].splitlines(): -+ vnc = serv.split() -+ if not vnc: -+ continue -+ self.add_service_status(vnc[0]) -+ self.add_journal(vnc[0]) - - self.add_cmd_output('vncserver -list') - --- -2.34.3 - diff --git a/sos-bz2065805-collect-pacemaker-cluster.patch b/sos-bz2065805-collect-pacemaker-cluster.patch deleted file mode 100644 index d573ea2..0000000 --- a/sos-bz2065805-collect-pacemaker-cluster.patch +++ /dev/null @@ -1,230 +0,0 @@ -From 3b84b4ccfa9e4924a5a3829d3810568dfb69bf63 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 18 Mar 2022 16:25:35 -0400 -Subject: [PATCH 1/2] [pacemaker] Redesign node enumeration logic - -It has been found that `pcs status` output is liable to change, which -ends up breaking our parsing of node lists when using it on newer -versions. - -Instead, first try to parse through `crm_mon` output, which is what `pcs -status` uses under the hood, but as a stable and reliable xml format. - -Failing that, for example if the `--primary` node is not functioning as -part of the cluster, source `/etc/corosync/corosync.conf` instead. - -Related: RHBZ2065805 -Related: RHBZ2065811 - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/pacemaker.py | 110 +++++++++++++++++++--------- - 1 file changed, 76 insertions(+), 34 deletions(-) - -diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py -index 55024314..49d0ce51 100644 ---- a/sos/collector/clusters/pacemaker.py -+++ b/sos/collector/clusters/pacemaker.py -@@ -8,7 +8,11 @@ - # - # See the LICENSE file in the source distribution for further information. - -+import re -+ - from sos.collector.clusters import Cluster -+from setuptools._vendor.packaging import version -+from xml.etree import ElementTree - - - class pacemaker(Cluster): -@@ -18,42 +22,80 @@ class pacemaker(Cluster): - packages = ('pacemaker',) - option_list = [ - ('online', True, 'Collect nodes listed as online'), -- ('offline', True, 'Collect nodes listed as offline') -+ ('offline', True, 'Collect nodes listed as offline'), -+ ('only-corosync', False, 'Only use corosync.conf to enumerate nodes') - ] - - def get_nodes(self): -- self.res = self.exec_primary_cmd('pcs status') -- if self.res['status'] != 0: -- self.log_error('Cluster status could not be determined. Is the ' -- 'cluster running on this node?') -- return [] -- if 'node names do not match' in self.res['output']: -- self.log_warn('Warning: node name mismatch reported. Attempts to ' -- 'connect to some nodes may fail.\n') -- return self.parse_pcs_output() -- -- def parse_pcs_output(self): -- nodes = [] -- if self.get_option('online'): -- nodes += self.get_online_nodes() -- if self.get_option('offline'): -- nodes += self.get_offline_nodes() -- return nodes -- -- def get_online_nodes(self): -- for line in self.res['output'].splitlines(): -- if line.startswith('Online:'): -- nodes = line.split('[')[1].split(']')[0] -- return [n for n in nodes.split(' ') if n] -- -- def get_offline_nodes(self): -- offline = [] -- for line in self.res['output'].splitlines(): -- if line.startswith('Node') and line.endswith('(offline)'): -- offline.append(line.split()[1].replace(':', '')) -- if line.startswith('OFFLINE:'): -- nodes = line.split('[')[1].split(']')[0] -- offline.extend([n for n in nodes.split(' ') if n]) -- return offline -+ self.nodes = [] -+ # try crm_mon first -+ try: -+ if not self.get_option('only-corosync'): -+ try: -+ self.get_nodes_from_crm() -+ except Exception as err: -+ self.log_warn("Falling back to sourcing corosync.conf. " -+ "Could not parse crm_mon output: %s" % err) -+ if not self.nodes: -+ # fallback to corosync.conf, in case the node we're inspecting -+ # is offline from the cluster -+ self.get_nodes_from_corosync() -+ except Exception as err: -+ self.log_error("Could not determine nodes from cluster: %s" % err) -+ -+ _shorts = [n for n in self.nodes if '.' not in n] -+ if _shorts: -+ self.log_warn( -+ "WARNING: Node addresses '%s' may not resolve locally if you " -+ "are not running on a node in the cluster. Try using option " -+ "'-c pacemaker.only-corosync' if these connections fail." -+ % ','.join(_shorts) -+ ) -+ return self.nodes -+ -+ def get_nodes_from_crm(self): -+ """ -+ Try to parse crm_mon output for node list and status. -+ """ -+ xmlopt = '--output-as=xml' -+ # older pacemaker had a different option for xml output -+ _ver = self.exec_primary_cmd('crm_mon --version') -+ if _ver['status'] == 0: -+ cver = _ver['output'].split()[1].split('-')[0] -+ if not version.parse(cver) > version.parse('2.0.3'): -+ xmlopt = '--as-xml' -+ else: -+ return -+ _out = self.exec_primary_cmd( -+ "crm_mon --one-shot --inactive %s" % xmlopt, -+ need_root=True -+ ) -+ if _out['status'] == 0: -+ self.parse_crm_xml(_out['output']) -+ -+ def parse_crm_xml(self, xmlstring): -+ """ -+ Parse the xml output string provided by crm_mon -+ """ -+ _xml = ElementTree.fromstring(xmlstring) -+ nodes = _xml.find('nodes') -+ for node in nodes: -+ _node = node.attrib -+ if self.get_option('online') and _node['online'] == 'true': -+ self.nodes.append(_node['name']) -+ elif self.get_option('offline') and _node['online'] == 'false': -+ self.nodes.append(_node['name']) -+ -+ def get_nodes_from_corosync(self): -+ """ -+ As a fallback measure, read corosync.conf to get the node list. Note -+ that this prevents us from separating online nodes from offline nodes. -+ """ -+ self.log_warn("WARNING: unable to distinguish online nodes from " -+ "offline nodes when sourcing from corosync.conf") -+ cc = self.primary.read_file('/etc/corosync/corosync.conf') -+ nodes = re.findall(r'((\sring0_addr:)(.*))', cc) -+ for node in nodes: -+ self.nodes.append(node[-1].strip()) - - # vim: set et ts=4 sw=4 : --- -2.34.3 - - -From 6701a7d77ecc998b018b54ecc00f9fd102ae9518 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Mon, 21 Mar 2022 12:05:59 -0400 -Subject: [PATCH 2/2] [clusters] Allow clusters to not add localhost to node - list - -For most of our supported clusters, we end up needing to add the -local host executing `sos collect` to the node list (unless `--no-local` -is used) as that accounts for the primary node that may otherwise be -left off. However, this is not helpful for clusters that may reports -node names as something other than resolveable names. In those cases, -such as with pacemaker, adding the local hostname may result in -duplicate collections. - -Add a toggle to cluster profiles via a new `strict_node_list` class attr -that, if True, will skip this addition. This toggle is default `False` -to preserve existing behavior, and is now enabled for `pacemaker` -specifically. - -Related: RHBZ#2065821 - -Signed-off-by: Jake Hunsaker ---- - sos/collector/__init__.py | 3 ++- - sos/collector/clusters/__init__.py | 4 ++++ - sos/collector/clusters/pacemaker.py | 1 + - 3 files changed, 7 insertions(+), 1 deletion(-) - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index a8bb0064..d898ca34 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -1073,7 +1073,8 @@ class SoSCollector(SoSComponent): - for node in self.node_list: - if host == node.split('.')[0]: - self.node_list.remove(node) -- self.node_list.append(self.hostname) -+ if not self.cluster.strict_node_list: -+ self.node_list.append(self.hostname) - self.reduce_node_list() - try: - _node_max = len(max(self.node_list, key=len)) -diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py -index f3f550ad..f00677b8 100644 ---- a/sos/collector/clusters/__init__.py -+++ b/sos/collector/clusters/__init__.py -@@ -57,6 +57,10 @@ class Cluster(): - sos_plugin_options = {} - sos_preset = '' - cluster_name = None -+ # set this to True if the local host running collect should *not* be -+ # forcibly added to the node list. This can be helpful in situations where -+ # the host's fqdn and the name the cluster uses are different -+ strict_node_list = False - - def __init__(self, commons): - self.primary = None -diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py -index 49d0ce51..bebcb265 100644 ---- a/sos/collector/clusters/pacemaker.py -+++ b/sos/collector/clusters/pacemaker.py -@@ -20,6 +20,7 @@ class pacemaker(Cluster): - cluster_name = 'Pacemaker High Availability Cluster Manager' - sos_plugins = ['pacemaker'] - packages = ('pacemaker',) -+ strict_node_list = True - option_list = [ - ('online', True, 'Collect nodes listed as online'), - ('offline', True, 'Collect nodes listed as offline'), --- -2.34.3 - diff --git a/sos-bz2079187-honor-default-plugin-timeout.patch b/sos-bz2079187-honor-default-plugin-timeout.patch deleted file mode 100644 index 822565d..0000000 --- a/sos-bz2079187-honor-default-plugin-timeout.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 7069e99d1c5c443f96a98a7ed6db67fa14683e67 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 17 Feb 2022 09:14:15 +0100 -Subject: [PATCH] [report] Honor plugins' hardcoded plugin_timeout - -Currently, plugin's plugin_timeout hardcoded default is superseded by -whatever --plugin-timeout value, even when this option is not used and -we eval it to TIMEOUT_DEFAULT. - -In this case of not setting --plugin-timeout either -k plugin.timeout, -honour plugin's plugin_timeout instead. - -Resolves: #2863 -Closes: #2864 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/__init__.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index cc5cb65b..336b4d22 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -636,7 +636,10 @@ class Plugin(): - if opt_timeout is None: - _timeout = own_timeout - elif opt_timeout is not None and own_timeout == -1: -- _timeout = int(opt_timeout) -+ if opt_timeout == TIMEOUT_DEFAULT: -+ _timeout = default_timeout -+ else: -+ _timeout = int(opt_timeout) - elif opt_timeout is not None and own_timeout > -1: - _timeout = own_timeout - else: --- -2.34.3 - diff --git a/sos-bz2079484-list-plugins-ignore-options.patch b/sos-bz2079484-list-plugins-ignore-options.patch deleted file mode 100644 index f0bda41..0000000 --- a/sos-bz2079484-list-plugins-ignore-options.patch +++ /dev/null @@ -1,68 +0,0 @@ -From f3dc8cd574614572d441f76c02453fd85d0c57e2 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 27 Apr 2022 10:40:55 -0400 -Subject: [PATCH] [report] --list-plugins should report used, not default, - option values - -When using `--list-plugins`, sos should report the values that will be -used in a given command, or with a given config file, not what the -default values are. - -By reporting the set value, users can be sure their configuration or -commandline settings are being honored correctly before executing a -report collection. - -Closes: #2921 - -Signed-off-by: Jake Hunsaker ---- - sos/report/__init__.py | 22 +++++++++++++++------- - 1 file changed, 15 insertions(+), 7 deletions(-) - -diff --git a/sos/report/__init__.py b/sos/report/__init__.py -index 74c7973a..8735c903 100644 ---- a/sos/report/__init__.py -+++ b/sos/report/__init__.py -@@ -868,24 +868,32 @@ class SoSReport(SoSComponent): - _defaults = self.loaded_plugins[0][1].get_default_plugin_opts() - for _opt in _defaults: - opt = _defaults[_opt] -- val = opt.default -- if opt.default == -1: -- val = TIMEOUT_DEFAULT -+ val = opt.value -+ if opt.value == -1: -+ if _opt == 'timeout': -+ val = self.opts.plugin_timeout or TIMEOUT_DEFAULT -+ elif _opt == 'cmd-timeout': -+ val = self.opts.cmd_timeout or TIMEOUT_DEFAULT -+ else: -+ val = TIMEOUT_DEFAULT -+ if opt.name == 'postproc': -+ val = not self.opts.no_postproc - self.ui_log.info(" %-25s %-15s %s" % (opt.name, val, opt.desc)) - self.ui_log.info("") - - self.ui_log.info(_("The following plugin options are available:")) - for opt in self.all_options: - if opt.name in ('timeout', 'postproc', 'cmd-timeout'): -- continue -+ if opt.value == opt.default: -+ continue - # format option value based on its type (int or bool) -- if isinstance(opt.default, bool): -- if opt.default is True: -+ if isinstance(opt.value, bool): -+ if opt.value is True: - tmpopt = "on" - else: - tmpopt = "off" - else: -- tmpopt = opt.default -+ tmpopt = opt.value - - if tmpopt is None: - tmpopt = 0 --- -2.34.3 - diff --git a/sos-bz2079485-plugopts-valtype-str.patch b/sos-bz2079485-plugopts-valtype-str.patch deleted file mode 100644 index eaf42ab..0000000 --- a/sos-bz2079485-plugopts-valtype-str.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 9b10abcdd4aaa41e2549438d5bc52ece86dcb21f Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sat, 7 May 2022 14:23:04 +0200 -Subject: [PATCH] [plugins] Allow 'str' PlugOpt type to accept any value - -For PlugOpt type 'str', we should allow any content including e.g. -numbers, and interpret it as a string. - -Resolves: #2922 -Closes: #2935 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/__init__.py | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index d6be42b9..2a42e6b0 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -452,6 +452,10 @@ class PluginOpt(): - return self.__str__() - - def set_value(self, val): -+ # 'str' type accepts any value, incl. numbers -+ if type('') in self.val_type: -+ self.value = str(val) -+ return - if not any([type(val) == _t for _t in self.val_type]): - valid = [] - for t in self.val_type: --- -2.34.3 - diff --git a/sos-bz2079486-timeouted-exec-cmd-exception.patch b/sos-bz2079486-timeouted-exec-cmd-exception.patch deleted file mode 100644 index dc58a67..0000000 --- a/sos-bz2079486-timeouted-exec-cmd-exception.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 5e27b92a8a9f066af4c41ddd0bedc7c69187ff52 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 2 May 2022 22:13:34 +0200 -Subject: [PATCH] [utilities] Close file only when storing to file - -Call _output.close() only when to_file=true. - -Closes: #2925 - -Signed-off-by: Pavel Moravec ---- - sos/utilities.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/sos/utilities.py b/sos/utilities.py -index d2f73d86..1075d1d4 100644 ---- a/sos/utilities.py -+++ b/sos/utilities.py -@@ -212,7 +212,8 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False, - p.wait(timeout if timeout else None) - except Exception: - p.terminate() -- _output.close() -+ if to_file: -+ _output.close() - # until we separate timeouts from the `timeout` command - # handle per-cmd timeouts via Plugin status checks - return {'status': 124, 'output': reader.get_contents(), --- -2.34.3 - diff --git a/sos-bz2092969-openshift-ovn-disabled.patch b/sos-bz2092969-openshift-ovn-disabled.patch deleted file mode 100644 index 29241ba..0000000 --- a/sos-bz2092969-openshift-ovn-disabled.patch +++ /dev/null @@ -1,73 +0,0 @@ -From c2e66fa4dae51f03c7310ba5278897ddecac1aad Mon Sep 17 00:00:00 2001 -From: Nadia Pinaeva -Date: Thu, 2 Jun 2022 15:43:09 +0200 -Subject: [PATCH] crio: switch from parsing output in table format to json - -Signed-off-by: Nadia Pinaeva ---- - sos/policies/runtimes/crio.py | 30 ++++++++++++++++++++---------- - 1 file changed, 20 insertions(+), 10 deletions(-) - -diff --git a/sos/policies/runtimes/crio.py b/sos/policies/runtimes/crio.py -index 55082d07..4cae1ecc 100644 ---- a/sos/policies/runtimes/crio.py -+++ b/sos/policies/runtimes/crio.py -@@ -7,6 +7,7 @@ - # version 2 of the GNU General Public License. - # - # See the LICENSE file in the source distribution for further information. -+import json - - from sos.policies.runtimes import ContainerRuntime - from sos.utilities import sos_get_command_output -@@ -29,14 +30,15 @@ class CrioContainerRuntime(ContainerRuntime): - :type get_all: ``bool`` - """ - containers = [] -- _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '') -+ _cmd = "%s ps %s -o json" % (self.binary, '-a' if get_all else '') - if self.active: - out = sos_get_command_output(_cmd, chroot=self.policy.sysroot) -- if out['status'] == 0: -- for ent in out['output'].splitlines()[1:]: -- ent = ent.split() -+ if out["status"] == 0: -+ out_json = json.loads(out["output"]) -+ for container in out_json["containers"]: - # takes the form (container_id, container_name) -- containers.append((ent[0], ent[-3])) -+ containers.append( -+ (container["id"], container["metadata"]["name"])) - return containers - - def get_images(self): -@@ -47,13 +49,21 @@ class CrioContainerRuntime(ContainerRuntime): - """ - images = [] - if self.active: -- out = sos_get_command_output("%s images" % self.binary, -+ out = sos_get_command_output("%s images -o json" % self.binary, - chroot=self.policy.sysroot) - if out['status'] == 0: -- for ent in out['output'].splitlines(): -- ent = ent.split() -- # takes the form (image_name, image_id) -- images.append((ent[0] + ':' + ent[1], ent[2])) -+ out_json = json.loads(out["output"]) -+ for image in out_json["images"]: -+ # takes the form (repository:tag, image_id) -+ if len(image["repoTags"]) > 0: -+ for repo_tag in image["repoTags"]: -+ images.append((repo_tag, image["id"])) -+ else: -+ if len(image["repoDigests"]) == 0: -+ image_name = "" -+ else: -+ image_name = image["repoDigests"][0].split("@")[0] -+ images.append((image_name + ":", image["id"])) - return images - - def fmt_container_cmd(self, container, cmd, quotecmd): --- -2.34.3 - diff --git a/sos-bz2093993-vdsm-set-use-devicesfile-zero.patch b/sos-bz2093993-vdsm-set-use-devicesfile-zero.patch deleted file mode 100644 index 15feb15..0000000 --- a/sos-bz2093993-vdsm-set-use-devicesfile-zero.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 7d1ee59fc659467e6860e72322e976ddc5c17db3 Mon Sep 17 00:00:00 2001 -From: Juan Orti Alcaine -Date: Mon, 6 Jun 2022 16:35:51 +0200 -Subject: [PATCH] [vdsm] Set LVM option use_devicesfile=0 - -Since RHV 4.4 SP1, vdsm configures LVM to use devicesfile, causing that -the LVM filter configuration used by sos is ignored. - -This change disables the use of the devicesfile, so that the information -of the devices used for RHV storage domains can be collected. - -Fixes: RHBZ#2093993 - -Signed-off-by: Juan Orti ---- - sos/report/plugins/vdsm.py | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/sos/report/plugins/vdsm.py b/sos/report/plugins/vdsm.py -index ee5befbb1..146d223c2 100644 ---- a/sos/report/plugins/vdsm.py -+++ b/sos/report/plugins/vdsm.py -@@ -29,7 +29,8 @@ - # use_lvmetad is set to 0 in order not to show cached, old lvm metadata. - # use_lvmetad=0 - # --# preferred_names and filter config values are set to capture Vdsm devices. -+# preferred_names, use_devicesfile and filter config values are set to -+# capture Vdsm devices. - # preferred_names=[ '^/dev/mapper/' ] - # filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ] - LVM_CONFIG = """ -@@ -43,6 +44,7 @@ - ignore_suspended_devices=1 - write_cache_state=0 - disable_after_error_count=3 -+ use_devicesfile=0 - filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"] - } - """ diff --git a/sos-bz2095263-ovirt-answer-files-passwords.patch b/sos-bz2095263-ovirt-answer-files-passwords.patch deleted file mode 100644 index 67eb6a0..0000000 --- a/sos-bz2095263-ovirt-answer-files-passwords.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 5fd872c64c53af37015f366295e0c2418c969757 Mon Sep 17 00:00:00 2001 -From: Yedidyah Bar David -Date: Thu, 26 May 2022 16:43:21 +0300 -Subject: [PATCH] [ovirt] answer files: Filter out all password keys - -Instead of hard-coding specific keys and having to maintain them over -time, replace the values of all keys that have 'password' in their name. -I think this covers all our current and hopefully future keys. It might -add "false positives" - keys that are not passwords but have 'password' -in their name - and I think that's a risk worth taking. - -Sadly, the engine admin password prompt's name is -'OVESETUP_CONFIG_ADMIN_SETUP', which does not include 'password', so has -to be listed specifically. - -A partial list of keys added since the replaced code was written: -- grafana-related stuff -- keycloak-related stuff -- otopi-style answer files - -Signed-off-by: Yedidyah Bar David -Change-Id: I416c6e4078e7c3638493eb271d08d73a0c22b5ba ---- - sos/report/plugins/ovirt.py | 23 +++++++++++++---------- - 1 file changed, 13 insertions(+), 10 deletions(-) - -diff --git a/sos/report/plugins/ovirt.py b/sos/report/plugins/ovirt.py -index 09647bf1..3b1bb29b 100644 ---- a/sos/report/plugins/ovirt.py -+++ b/sos/report/plugins/ovirt.py -@@ -241,19 +241,22 @@ class Ovirt(Plugin, RedHatPlugin): - r'{key}=********'.format(key=key) - ) - -- # Answer files contain passwords -- for key in ( -- 'OVESETUP_CONFIG/adminPassword', -- 'OVESETUP_CONFIG/remoteEngineHostRootPassword', -- 'OVESETUP_DWH_DB/password', -- 'OVESETUP_DB/password', -- 'OVESETUP_REPORTS_CONFIG/adminPassword', -- 'OVESETUP_REPORTS_DB/password', -+ # Answer files contain passwords. -+ # Replace all keys that have 'password' in them, instead of hard-coding -+ # here the list of keys, which changes between versions. -+ # Sadly, the engine admin password prompt name does not contain -+ # 'password'... so neither does the env key. -+ for item in ( -+ 'password', -+ 'OVESETUP_CONFIG_ADMIN_SETUP', - ): - self.do_path_regex_sub( - r'/var/lib/ovirt-engine/setup/answers/.*', -- r'{key}=(.*)'.format(key=key), -- r'{key}=********'.format(key=key) -+ re.compile( -+ r'(?P[^=]*{item}[^=]*)=.*'.format(item=item), -+ flags=re.IGNORECASE -+ ), -+ r'\g=********' - ) - - # aaa profiles contain passwords --- -2.34.3 - diff --git a/sos-bz2099598-forbidden-path-efficient.patch b/sos-bz2099598-forbidden-path-efficient.patch deleted file mode 100644 index 5322765..0000000 --- a/sos-bz2099598-forbidden-path-efficient.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 1dc3625fabea7331570f713fd1c87ac812d72d92 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 18 May 2022 13:39:38 -0400 -Subject: [PATCH] [Plugin] Make forbidden path checks more efficient - -Forbidden path checks have up until now worked by taking a given file -path (potentially with globs), expanding that against all discovered -files that actually exist on the system, and then comparing a potential -collection path against that list. - -While this works, and works reasonably fast for most scenarios, it isn't -very efficient and causes significant slow downs when a non-standard -configuration is in play - e.g. thousands of block devices which sos -would individually have to compare against tens of thousands of paths -for every path the `block` plugin wants to collect. - -Improve this by first not expanding the forbidden path globs, but taking -them as distinct patterns, translating from shell-style (to maintain -historical precedent of using globs to specify paths to be skipped) to -python regex patterns as needed. Second, use `re` to handle our pattern -matching for comparison against the distinct patterns provided by a -plugin to skip. - -Closes: #2938 - -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/__init__.py | 20 +++++++++----------- - sos/report/plugins/cgroups.py | 6 ++---- - sos/report/plugins/pulpcore.py | 2 +- - sos/report/plugins/rhui.py | 2 +- - 4 files changed, 13 insertions(+), 17 deletions(-) - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 2a42e6b0a..ba1397a8a 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -46,11 +46,6 @@ def _mangle_command(command, name_max): - return mangledname - - --def _path_in_path_list(path, path_list): -- return any((p == path or path.startswith(os.path.abspath(p)+os.sep) -- for p in path_list)) -- -- - def _node_type(st): - """ return a string indicating the type of special node represented by - the stat buffer st (block, character, fifo, socket). -@@ -1407,7 +1402,9 @@ def _get_dest_for_srcpath(self, srcpath): - return None - - def _is_forbidden_path(self, path): -- return _path_in_path_list(path, self.forbidden_paths) -+ return any( -+ re.match(forbid, path) for forbid in self.forbidden_paths -+ ) - - def _is_policy_forbidden_path(self, path): - return any([ -@@ -1495,14 +1492,12 @@ def _do_copy_path(self, srcpath, dest=None): - 'symlink': "no" - }) - -- def add_forbidden_path(self, forbidden, recursive=False): -+ def add_forbidden_path(self, forbidden): - """Specify a path, or list of paths, to not copy, even if it's part of - an ``add_copy_spec()`` call - - :param forbidden: A filepath to forbid collection from - :type forbidden: ``str`` or a ``list`` of strings -- -- :param recursive: Should forbidden glob be applied recursively - """ - if isinstance(forbidden, str): - forbidden = [forbidden] -@@ -1512,8 +1507,11 @@ def add_forbidden_path(self, forbidden, recursive=False): - - for forbid in forbidden: - self._log_info("adding forbidden path '%s'" % forbid) -- for path in glob.glob(forbid, recursive=recursive): -- self.forbidden_paths.append(path) -+ if "*" in forbid: -+ # calling translate() here on a dir-level path will break the -+ # re.match() call during path comparison -+ forbid = fnmatch.translate(forbid) -+ self.forbidden_paths.append(forbid) - - def set_option(self, optionname, value): - """Set the named option to value. Ensure the original type of the -diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py -index 6c4237cae..f6bc194c7 100644 ---- a/sos/report/plugins/pulpcore.py -+++ b/sos/report/plugins/pulpcore.py -@@ -89,7 +89,7 @@ class PulpCore(Plugin, IndependentPlugin - "/etc/pki/pulp/*" - ]) - # skip collecting certificate keys -- self.add_forbidden_path("/etc/pki/pulp/**/*.key", recursive=True) -+ self.add_forbidden_path("/etc/pki/pulp/**/*.key") - - self.add_cmd_output("rq info -u redis://localhost:6379/8", - env={"LC_ALL": "en_US.UTF-8"}, -diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py -index add024613..8063fd51c 100644 ---- a/sos/report/plugins/rhui.py -+++ b/sos/report/plugins/rhui.py -@@ -30,7 +30,7 @@ def setup(self): - "/var/log/rhui/*", - ]) - # skip collecting certificate keys -- self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True) -+ self.add_forbidden_path("/etc/pki/rhui/**/*.key") - - # call rhui-manager commands with 1m timeout and - # with an env. variable ensuring that "RHUI Username:" diff --git a/sos.spec b/sos.spec index 85612ab..ed13942 100644 --- a/sos.spec +++ b/sos.spec @@ -4,8 +4,8 @@ Summary: A set of tools to gather troubleshooting information from a system Name: sos -Version: 4.3 -Release: 3%{?dist} +Version: 4.4 +Release: 1%{?dist} Group: Applications/System Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz Source1: sos-audit-%{auditversion}.tgz @@ -14,25 +14,13 @@ BuildArch: noarch Url: https://github.com/sosreport/sos BuildRequires: python3-devel BuildRequires: gettext -Requires: libxml2-python3 Requires: bzip2 Requires: xz +Requires: python3-magic +Requires: python3-requests +Recommends: python3-pexpect Conflicts: vdsm < 4.40 Obsoletes: sos-collector -Recommends: python3-pexpect -Recommends: python3-requests -Patch1: sos-bz2055002-rebase-sos-add-sos-help.patch -Patch2: sos-bz2095263-ovirt-answer-files-passwords.patch -Patch3: sos-bz2079485-plugopts-valtype-str.patch -Patch4: sos-bz2062908-tigervnc-update-collections.patch -Patch5: sos-bz2065805-collect-pacemaker-cluster.patch -Patch6: sos-bz2079187-honor-default-plugin-timeout.patch -Patch7: sos-bz2079484-list-plugins-ignore-options.patch -Patch8: sos-bz2079486-timeouted-exec-cmd-exception.patch -Patch9: sos-bz2058279-ocp-backports.patch -Patch10: sos-bz2092969-openshift-ovn-disabled.patch -Patch11: sos-bz2093993-vdsm-set-use-devicesfile-zero.patch -Patch12: sos-bz2099598-forbidden-path-efficient.patch %description @@ -44,18 +32,6 @@ support technicians and developers. %prep %setup -qn %{name}-%{version} %setup -T -D -a1 -q -%patch1 -p1 -%patch2 -p1 -%patch3 -p1 -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 -%patch7 -p1 -%patch8 -p1 -%patch9 -p1 -%patch10 -p1 -%patch11 -p1 -%patch12 -p1 %build @@ -124,6 +100,48 @@ of the system. Currently storage and filesystem commands are audited. %ghost /etc/audit/rules.d/40-sos-storage.rules %changelog +* Fri Sep 09 2022 Pavel Moravec = 4.4-1 +- Rebase on upstream 4.4 + Resolves: bz2082614 +- [redhat] Honour credential-less --upload-url on RedHat distro properly + Resolves: bz2059572 +- [sos] Fix unhandled exception when concurrently removing temp dir + Resolves: bz2088439 +- [specfile] drop python3-libxml2 dependency + Resolves: bz2125486 +- [md] Restrict data capture to raid members + Resolves: bz2125485 +- [cleaner] Use compiled regex lists for parsers by default + Resolves: bz2043233 +- [cgroups] not collect memory.kmem.slabinfo + Resolves: bz1995120 +- [report] Fix loop devices data gathering + Resolves: bz2010735 +- [insights] Collect /var/lib/insights + Resolves: bz2103233 +- [candlepin] collect information about SCA + Resolves: bz2060925 +- [manpages] Clarify --upload-directory applicable to FTP protocol only + Resolves: bz2063259 +- [cleaner] Dont obfuscate tmpdir path of local private_map + Resolves: bz2064815 +- [fibrechannel] collect Cisco fnic statistics + Resolves: bz2074715 +- [pulpcore] Collect db_tables_sizes + Resolves: bz2081433 +- [fibrechannel]: Update fibrechannel plugin to collect HBA logs + Resolves: bz2089591 +- [arcconf]: Update arcconf plugin to collect UART logs + Resolves: bz2090283 +- [pulpcore] Stop collecting commands relevant to old taskig system + Resolves: bz2093191 +- [dnf,yum] Merge plugins into dnf, remove yum plugin + Resolves: bz2100154 +- [policies] Simplify flow in _container_init() + Resolves: bz2100480 +- [pacemaker] Update collect cluster profile for pacemaker + Resolves: bz2065821 + * Mon Aug 29 2022 Pavel Moravec = 4.3-3 - [vdsm] Set LVM option use_devicesfile=0 Resolves: bz2093993 diff --git a/sources b/sources index cd8aacb..6154f27 100644 --- a/sources +++ b/sources @@ -1,2 +1,2 @@ -SHA512 (sos-4.3.tar.gz) = 6119abb52e2be650099c9a660a179856f1a2f64cc098a3318b685a407fdb2b8e1ad296bc124c1a6acd1142f75e106db353ce9d2b065d8cf4c077b98c40221d9c +SHA512 (sos-4.4.tar.gz) = 0e040bd48463960872e7f14ac773edcce40a5bfb1e05967498c8c9dc41560dfe901c60f08aad5e2a987d5de8068b21729d2317ae99602c739aa4f78eb7f72251 SHA512 (sos-audit-0.3.tgz) = 32597baf6350804d08179a0dbe48470a93df148e83d2e49bb3288f6bcc2d151bb1433761913bfbccd912c14de92435939fef5bcd7e091dfe33a345d61ea842ea