From 21676fc4bbf114120b22a2b63509db007ac4154a Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 10 May 2022 03:14:41 -0400 Subject: [PATCH] import sos-4.2-15.el8 --- .gitignore | 2 +- .sos.metadata | 2 +- SOURCES/sos-bz1665947-rhui-plugin.patch | 387 -- .../sos-bz1873185-estimate-only-option.patch | 1316 +++++ .../sos-bz1886711-enhance-tc-hw-offload.patch | 32 - ...-bz1923938-sos-log-effective-options.patch | 284 - SOURCES/sos-bz1925419-all-gluster-files.patch | 39 - ...1930181-collect-cleaning-consistency.patch | 243 - SOURCES/sos-bz1935603-manpages-see-also.patch | 99 - ...s-bz1937298-ds-mask-password-in-ldif.patch | 50 - SOURCES/sos-bz1937418-add-cmd-timeout.patch | 315 - ...s-bz1939963-gather-cups-browsed-logs.patch | 30 - ...sos-bz1940502-sssd-memcache-and-logs.patch | 62 - ...sos-bz1942276-ibmvNIC-dynamic-debugs.patch | 29 - SOURCES/sos-bz1956673-pulpcore-plugin.patch | 147 - SOURCES/sos-bz1959413-saphana-traceback.patch | 30 - ...s-bz1959598-conversions-and-upgrades.patch | 50 - ...apper-plugin-and-allocation-failures.patch | 121 - SOURCES/sos-bz1961458-collect-nstat.patch | 36 - ...z1964499-obfuscate-fqdn-from-dnf-log.patch | 78 - ...1965001-fix-avc-copystating-proc-sys.patch | 135 - SOURCES/sos-bz1967613-sssd-common.patch | 36 - .../sos-bz1973675-ocp-cluster-cleaner.patch | 2156 ------- ...1985037-cleaner-AD-users-obfuscation.patch | 142 - ...5986-potential-issues-static-analyse.patch | 65 - ...pacapture-under-allow-system-changes.patch | 49 + ...1998521-unpackaged-recursive-symlink.patch | 42 + ...6-iptables-save-under-nf_tables-kmod.patch | 73 + SOURCES/sos-bz2002145-kernel-psi.patch | 33 + ...2004929-openvswitch-offline-analysis.patch | 151 + .../sos-bz2005195-iptables-based-on-ntf.patch | 303 + ...os-bz2011349-replace-dropbox-by-sftp.patch | 746 --- ...bz2011413-cpuX-individual-sizelimits.patch | 48 + .../sos-bz2011506-foreman-puma-status.patch | 69 + ...-bz2012856-dryrun-uncaught-exception.patch | 33 + ...7-plugin-timeout-unhandled-exception.patch | 31 + ...8033-plugin-timeouts-proper-handling.patch | 91 + ...2020777-filter-namespace-per-pattern.patch | 54 + ...23867-cleaner-hostnames-improvements.patch | 1829 ++++++ SOURCES/sos-bz2025403-nvidia-GPU-info.patch | 46 + SOURCES/sos-bz2025610-RHTS-api-change.patch | 224 + SOURCES/sos-bz2030741-rhui-logs.patch | 24 + SOURCES/sos-bz2036697-ocp-backports.patch | 5145 +++++++++++++++++ .../sos-bz2041488-virsh-in-foreground.patch | 146 + ...042966-ovn-proper-package-enablement.patch | 252 + .../sos-bz2043102-foreman-tasks-msgpack.patch | 59 + ...54882-plugopt-logging-effective-opts.patch | 94 + ...547-honour-plugins-timeout-hardcoded.patch | 39 + SPECS/sos.spec | 157 +- 49 files changed, 10277 insertions(+), 5347 deletions(-) delete mode 100644 SOURCES/sos-bz1665947-rhui-plugin.patch create mode 100644 SOURCES/sos-bz1873185-estimate-only-option.patch delete mode 100644 SOURCES/sos-bz1886711-enhance-tc-hw-offload.patch delete mode 100644 SOURCES/sos-bz1923938-sos-log-effective-options.patch delete mode 100644 SOURCES/sos-bz1925419-all-gluster-files.patch delete mode 100644 SOURCES/sos-bz1930181-collect-cleaning-consistency.patch delete mode 100644 SOURCES/sos-bz1935603-manpages-see-also.patch delete mode 100644 SOURCES/sos-bz1937298-ds-mask-password-in-ldif.patch delete mode 100644 SOURCES/sos-bz1937418-add-cmd-timeout.patch delete mode 100644 SOURCES/sos-bz1939963-gather-cups-browsed-logs.patch delete mode 100644 SOURCES/sos-bz1940502-sssd-memcache-and-logs.patch delete mode 100644 SOURCES/sos-bz1942276-ibmvNIC-dynamic-debugs.patch delete mode 100644 SOURCES/sos-bz1956673-pulpcore-plugin.patch delete mode 100644 SOURCES/sos-bz1959413-saphana-traceback.patch delete mode 100644 SOURCES/sos-bz1959598-conversions-and-upgrades.patch delete mode 100644 SOURCES/sos-bz1961229-snapper-plugin-and-allocation-failures.patch delete mode 100644 SOURCES/sos-bz1961458-collect-nstat.patch delete mode 100644 SOURCES/sos-bz1964499-obfuscate-fqdn-from-dnf-log.patch delete mode 100644 SOURCES/sos-bz1965001-fix-avc-copystating-proc-sys.patch delete mode 100644 SOURCES/sos-bz1967613-sssd-common.patch delete mode 100644 SOURCES/sos-bz1973675-ocp-cluster-cleaner.patch delete mode 100644 SOURCES/sos-bz1985037-cleaner-AD-users-obfuscation.patch delete mode 100644 SOURCES/sos-bz1985986-potential-issues-static-analyse.patch create mode 100644 SOURCES/sos-bz1998433-opacapture-under-allow-system-changes.patch create mode 100644 SOURCES/sos-bz1998521-unpackaged-recursive-symlink.patch create mode 100644 SOURCES/sos-bz2001096-iptables-save-under-nf_tables-kmod.patch create mode 100644 SOURCES/sos-bz2002145-kernel-psi.patch create mode 100644 SOURCES/sos-bz2004929-openvswitch-offline-analysis.patch create mode 100644 SOURCES/sos-bz2005195-iptables-based-on-ntf.patch delete mode 100644 SOURCES/sos-bz2011349-replace-dropbox-by-sftp.patch create mode 100644 SOURCES/sos-bz2011413-cpuX-individual-sizelimits.patch create mode 100644 SOURCES/sos-bz2011506-foreman-puma-status.patch create mode 100644 SOURCES/sos-bz2012856-dryrun-uncaught-exception.patch create mode 100644 SOURCES/sos-bz2012857-plugin-timeout-unhandled-exception.patch create mode 100644 SOURCES/sos-bz2018033-plugin-timeouts-proper-handling.patch create mode 100644 SOURCES/sos-bz2020777-filter-namespace-per-pattern.patch create mode 100644 SOURCES/sos-bz2023867-cleaner-hostnames-improvements.patch create mode 100644 SOURCES/sos-bz2025403-nvidia-GPU-info.patch create mode 100644 SOURCES/sos-bz2025610-RHTS-api-change.patch create mode 100644 SOURCES/sos-bz2030741-rhui-logs.patch create mode 100644 SOURCES/sos-bz2036697-ocp-backports.patch create mode 100644 SOURCES/sos-bz2041488-virsh-in-foreground.patch create mode 100644 SOURCES/sos-bz2042966-ovn-proper-package-enablement.patch create mode 100644 SOURCES/sos-bz2043102-foreman-tasks-msgpack.patch create mode 100644 SOURCES/sos-bz2054882-plugopt-logging-effective-opts.patch create mode 100644 SOURCES/sos-bz2055547-honour-plugins-timeout-hardcoded.patch diff --git a/.gitignore b/.gitignore index 0f2b270..a247b61 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/sos-4.1.tar.gz +SOURCES/sos-4.2.tar.gz SOURCES/sos-audit-0.3.tgz diff --git a/.sos.metadata b/.sos.metadata index 950a640..054c91f 100644 --- a/.sos.metadata +++ b/.sos.metadata @@ -1,2 +1,2 @@ -7d4d03af232e2357e3359ad564a59f4c3654eac0 SOURCES/sos-4.1.tar.gz +fe82967b0577076aac104412a9fe35cdb444bde4 SOURCES/sos-4.2.tar.gz 9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz diff --git a/SOURCES/sos-bz1665947-rhui-plugin.patch b/SOURCES/sos-bz1665947-rhui-plugin.patch deleted file mode 100644 index e884396..0000000 --- a/SOURCES/sos-bz1665947-rhui-plugin.patch +++ /dev/null @@ -1,387 +0,0 @@ -From 94b9b90c818eb18f0ca8d78fe063dc5b0677c885 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 22 Jun 2021 12:58:03 +0200 -Subject: [PATCH] [rhui] add plugin to RHUI - -Add a new/revoked plugin for RHUI (newly based on python3 and pulp-3). - -Edditionally, collect /etc/pki/pulp certificates except for RSA keys. - -Resolves: #2590 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/pulpcore.py | 7 ++++- - sos/report/plugins/rhui.py | 49 ++++++++++++++++++++++++++++++++++ - 2 files changed, 55 insertions(+), 1 deletion(-) - create mode 100644 sos/report/plugins/rhui.py - -diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py -index ccaac3185..77ceacb92 100644 ---- a/sos/report/plugins/pulpcore.py -+++ b/sos/report/plugins/pulpcore.py -@@ -77,7 +77,12 @@ def separate_value(line, sep=':'): - def setup(self): - self.parse_settings_config() - -- self.add_copy_spec("/etc/pulp/settings.py") -+ self.add_copy_spec([ -+ "/etc/pulp/settings.py", -+ "/etc/pki/pulp/*" -+ ]) -+ # skip collecting certificate keys -+ self.add_forbidden_path("/etc/pki/pulp/*.key") - - self.add_cmd_output("rq info -u redis://localhost:6379/8", - env={"LC_ALL": "en_US.UTF-8"}, -diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py -new file mode 100644 -index 000000000..7acd3f49e ---- /dev/null -+++ b/sos/report/plugins/rhui.py -@@ -0,0 +1,49 @@ -+# Copyright (C) 2021 Red Hat, Inc., Pavel Moravec -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.report.plugins import Plugin, RedHatPlugin -+ -+ -+class Rhui(Plugin, RedHatPlugin): -+ -+ short_desc = 'Red Hat Update Infrastructure' -+ -+ plugin_name = "rhui" -+ commands = ("rhui-manager",) -+ files = ("/etc/ansible/facts.d/rhui_auth.fact", "/usr/lib/rhui/cds.py") -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/rhui/rhui-tools.conf", -+ "/etc/rhui/registered_subscriptions.conf", -+ "/etc/pki/rhui/*", -+ "/var/log/rhui-subscription-sync.log", -+ "/var/cache/rhui/*", -+ "/root/.rhui/*", -+ ]) -+ # skip collecting certificate keys -+ self.add_forbidden_path("/etc/pki/rhui/*.key") -+ -+ self.add_cmd_output([ -+ "rhui-manager status", -+ "rhui-manager cert info", -+ "ls -lR /var/lib/rhui/remote_share", -+ ]) -+ -+ def postproc(self): -+ # obfuscate admin_pw and secret_key values -+ for prop in ["admin_pw", "secret_key"]: -+ self.do_path_regex_sub( -+ "/etc/ansible/facts.d/rhui_auth.fact", -+ r"(%s\s*=\s*)(.*)" % prop, -+ r"\1********") -+ -+ -+# vim: set et ts=4 sw=4 : -From bd15dc764c9d4554d8e8f08163228d65ca099985 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 24 Jun 2021 17:53:27 +0200 -Subject: [PATCH 1/4] [plugins] Allow add_forbidden_path to apply glob - recursively - -Add option to apply glob.glob to forbidden path recursively. - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/__init__.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 06923300..6fd1a3b2 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -1187,12 +1187,14 @@ class Plugin(object): - 'symlink': "no" - }) - -- def add_forbidden_path(self, forbidden): -+ def add_forbidden_path(self, forbidden, recursive=False): - """Specify a path, or list of paths, to not copy, even if it's part of - an ``add_copy_spec()`` call - - :param forbidden: A filepath to forbid collection from - :type forbidden: ``str`` or a ``list`` of strings -+ -+ :param recursive: Should forbidden glob be applied recursively - """ - if isinstance(forbidden, str): - forbidden = [forbidden] -@@ -1202,7 +1204,7 @@ class Plugin(object): - - for forbid in forbidden: - self._log_info("adding forbidden path '%s'" % forbid) -- for path in glob.glob(forbid): -+ for path in glob.glob(forbid, recursive=recursive): - self.forbidden_paths.append(path) - - def get_all_options(self): --- -2.31.1 - - -From b695201baeb629a6543445d98dbb04f357670621 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 24 Jun 2021 17:57:48 +0200 -Subject: [PATCH 2/4] [pulpcore] improve settings.py parsing - -- deal with /etc/pulp/settings.py as a one-line string -- parse dbname from it as well -- dont collect any *.key file from whole /etc/pki/pulp dir - -Related: #2593 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/pulpcore.py | 23 +++++++++++++++-------- - 1 file changed, 15 insertions(+), 8 deletions(-) - -diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py -index 77ceacb9..be526035 100644 ---- a/sos/report/plugins/pulpcore.py -+++ b/sos/report/plugins/pulpcore.py -@@ -28,9 +28,10 @@ class PulpCore(Plugin, IndependentPlugin): - databases_scope = False - self.dbhost = "localhost" - self.dbport = 5432 -+ self.dbname = "pulpcore" - self.dbpasswd = "" - # TODO: read also redis config (we dont expect much customisations) -- # TODO: read also db user (pulp) and database name (pulpcore) -+ # TODO: read also db user (pulp) - self.staticroot = "/var/lib/pulp/assets" - self.uploaddir = "/var/lib/pulp/media/upload" - -@@ -44,7 +45,10 @@ class PulpCore(Plugin, IndependentPlugin): - return val - - try: -- for line in open("/etc/pulp/settings.py").read().splitlines(): -+ # split the lines to "one option per line" format -+ for line in open("/etc/pulp/settings.py").read() \ -+ .replace(',', ',\n').replace('{', '{\n') \ -+ .replace('}', '\n}').splitlines(): - # skip empty lines and lines with comments - if not line or line[0] == '#': - continue -@@ -53,11 +57,14 @@ class PulpCore(Plugin, IndependentPlugin): - continue - # example HOST line to parse: - # 'HOST': 'localhost', -- if databases_scope and match(r"\s+'HOST'\s*:\s+\S+", line): -+ pattern = r"\s*['|\"]%s['|\"]\s*:\s*\S+" -+ if databases_scope and match(pattern % 'HOST', line): - self.dbhost = separate_value(line) -- if databases_scope and match(r"\s+'PORT'\s*:\s+\S+", line): -+ if databases_scope and match(pattern % 'PORT', line): - self.dbport = separate_value(line) -- if databases_scope and match(r"\s+'PASSWORD'\s*:\s+\S+", line): -+ if databases_scope and match(pattern % 'NAME', line): -+ self.dbname = separate_value(line) -+ if databases_scope and match(pattern % 'PASSWORD', line): - self.dbpasswd = separate_value(line) - # if line contains closing '}' database_scope end - if databases_scope and '}' in line: -@@ -82,7 +89,7 @@ class PulpCore(Plugin, IndependentPlugin): - "/etc/pki/pulp/*" - ]) - # skip collecting certificate keys -- self.add_forbidden_path("/etc/pki/pulp/*.key") -+ self.add_forbidden_path("/etc/pki/pulp/**/*.key", recursive=True) - - self.add_cmd_output("rq info -u redis://localhost:6379/8", - env={"LC_ALL": "en_US.UTF-8"}, -@@ -104,8 +111,8 @@ class PulpCore(Plugin, IndependentPlugin): - _query = "select * from %s where pulp_last_updated > NOW() - " \ - "interval '%s days' order by pulp_last_updated" % \ - (table, task_days) -- _cmd = "psql -h %s -p %s -U pulp -d pulpcore -c %s" % \ -- (self.dbhost, self.dbport, quote(_query)) -+ _cmd = "psql -h %s -p %s -U pulp -d %s -c %s" % \ -+ (self.dbhost, self.dbport, self.dbname, quote(_query)) - self.add_cmd_output(_cmd, env=self.env, suggest_filename=table) - - def postproc(self): --- -2.31.1 - - -From 0286034da44bce43ab368dfc6815da7d74d60719 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 24 Jun 2021 17:59:36 +0200 -Subject: [PATCH 3/4] [rhui] call rhui-* commands with proper env and timeout - -rhui-manager commands timeout when not being logged in, which -should be reacted by adding proper cmd timeout. - -Adding the env.variable ensures potentially unaswered "RHUI Username:" -is also printed/colected. - -Further, prevent collecting any *.key file from the whole /etc/pki/rhui -dir. - -Related: #2593 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/rhui.py | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py -index 7acd3f49..5a152427 100644 ---- a/sos/report/plugins/rhui.py -+++ b/sos/report/plugins/rhui.py -@@ -29,13 +29,16 @@ class Rhui(Plugin, RedHatPlugin): - "/root/.rhui/*", - ]) - # skip collecting certificate keys -- self.add_forbidden_path("/etc/pki/rhui/*.key") -+ self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True) - -+ # call rhui-manager commands with 1m timeout and -+ # with an env. variable ensuring that "RHUI Username:" -+ # even unanswered prompt gets collected - self.add_cmd_output([ - "rhui-manager status", - "rhui-manager cert info", - "ls -lR /var/lib/rhui/remote_share", -- ]) -+ ], timeout=60, env={'PYTHONUNBUFFERED': '1'}) - - def postproc(self): - # obfuscate admin_pw and secret_key values --- -2.31.1 - - -From a656bd239ab86dfd8973f733ae2c0fbd0c57d416 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 24 Jun 2021 18:01:14 +0200 -Subject: [PATCH 4/4] [rhui] fix broken obfuscation - -- /etc/ansible/facts.d/rhui_*.fact must be collected by -rhui plugin to let some file to be obfuscated there -- obfuscate also cookies values that can grant login access - -Resolves: #2593 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/ansible.py | 3 +++ - sos/report/plugins/rhui.py | 7 +++++++ - 2 files changed, 10 insertions(+) - -diff --git a/sos/report/plugins/ansible.py b/sos/report/plugins/ansible.py -index 3e5d3d37..5991b786 100644 ---- a/sos/report/plugins/ansible.py -+++ b/sos/report/plugins/ansible.py -@@ -29,4 +29,7 @@ class Ansible(Plugin, RedHatPlugin, UbuntuPlugin): - "ansible --version" - ]) - -+ # let rhui plugin collects the RHUI specific files -+ self.add_forbidden_path("/etc/ansible/facts.d/rhui_*.fact") -+ - # vim: set et ts=4 sw=4 : -diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py -index 5a152427..1d479f85 100644 ---- a/sos/report/plugins/rhui.py -+++ b/sos/report/plugins/rhui.py -@@ -27,6 +27,7 @@ class Rhui(Plugin, RedHatPlugin): - "/var/log/rhui-subscription-sync.log", - "/var/cache/rhui/*", - "/root/.rhui/*", -+ "/etc/ansible/facts.d/rhui_*.fact", - ]) - # skip collecting certificate keys - self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True) -@@ -47,6 +48,12 @@ class Rhui(Plugin, RedHatPlugin): - "/etc/ansible/facts.d/rhui_auth.fact", - r"(%s\s*=\s*)(.*)" % prop, - r"\1********") -+ # obfuscate twoo cookies for login session -+ for cookie in ["csrftoken", "sessionid"]: -+ self.do_path_regex_sub( -+ r"/root/\.rhui/.*/cookies.txt", -+ r"(%s\s+)(\S+)" % cookie, -+ r"\1********") - - - # vim: set et ts=4 sw=4 : --- -2.31.1 - -From 4e5bebffca9936bcdf4d38aad9989970a15dd72b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 3 Aug 2021 21:54:33 +0200 -Subject: [PATCH] [rhui] Update the plugin on several places - -- obfuscate "rhui_manager_password: xxx" in /root/.rhui/answers.yaml* -- no need to collect or obfuscate anything from /etc/ansible/facts.d -- newly detect the plugin via /etc/rhui/rhui-tools.conf file or rhui-manager - command (only) - -Resolves: #2637 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/rhui.py | 14 ++++++-------- - 1 file changed, 6 insertions(+), 8 deletions(-) - -diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py -index 1d479f85..52065fb4 100644 ---- a/sos/report/plugins/rhui.py -+++ b/sos/report/plugins/rhui.py -@@ -16,8 +16,8 @@ class Rhui(Plugin, RedHatPlugin): - short_desc = 'Red Hat Update Infrastructure' - - plugin_name = "rhui" -- commands = ("rhui-manager",) -- files = ("/etc/ansible/facts.d/rhui_auth.fact", "/usr/lib/rhui/cds.py") -+ commands = ("rhui-manager", ) -+ files = ("/etc/rhui/rhui-tools.conf", ) - - def setup(self): - self.add_copy_spec([ -@@ -27,7 +27,6 @@ class Rhui(Plugin, RedHatPlugin): - "/var/log/rhui-subscription-sync.log", - "/var/cache/rhui/*", - "/root/.rhui/*", -- "/etc/ansible/facts.d/rhui_*.fact", - ]) - # skip collecting certificate keys - self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True) -@@ -42,11 +41,10 @@ class Rhui(Plugin, RedHatPlugin): - ], timeout=60, env={'PYTHONUNBUFFERED': '1'}) - - def postproc(self): -- # obfuscate admin_pw and secret_key values -- for prop in ["admin_pw", "secret_key"]: -- self.do_path_regex_sub( -- "/etc/ansible/facts.d/rhui_auth.fact", -- r"(%s\s*=\s*)(.*)" % prop, -+ # hide rhui_manager_password value in (also rotated) answers file -+ self.do_path_regex_sub( -+ r"/root/\.rhui/answers.yaml.*", -+ r"(\s*rhui_manager_password\s*:)\s*(\S+)", - r"\1********") - # obfuscate twoo cookies for login session - for cookie in ["csrftoken", "sessionid"]: --- -2.31.1 - diff --git a/SOURCES/sos-bz1873185-estimate-only-option.patch b/SOURCES/sos-bz1873185-estimate-only-option.patch new file mode 100644 index 0000000..a1a96c4 --- /dev/null +++ b/SOURCES/sos-bz1873185-estimate-only-option.patch @@ -0,0 +1,1316 @@ +From 5b245b1e449c6a05d09034bcb8290bffded79327 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 8 Sep 2021 17:04:58 +0200 +Subject: [PATCH] [report] Implement --estimate-only + +Add report option --estimate-only to estimate disk space requirements +when running a sos report. + +Resolves: #2673 + +Signed-off-by: Pavel Moravec +--- + man/en/sos-report.1 | 13 +++++++- + sos/report/__init__.py | 74 ++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 84 insertions(+), 3 deletions(-) + +diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 +index 36b337df..e8efc8f8 100644 +--- a/man/en/sos-report.1 ++++ b/man/en/sos-report.1 +@@ -14,7 +14,7 @@ sos report \- Collect and package diagnostic and support data + [--preset preset] [--add-preset add_preset]\fR + [--del-preset del_preset] [--desc description]\fR + [--batch] [--build] [--debug] [--dry-run]\fR +- [--label label] [--case-id id]\fR ++ [--estimate-only] [--label label] [--case-id id]\fR + [--threads threads]\fR + [--plugin-timeout TIMEOUT]\fR + [--cmd-timeout TIMEOUT]\fR +@@ -317,6 +317,17 @@ output, or string data from the system. The resulting logs may be used + to understand the actions that sos would have taken without the dry run + option. + .TP ++.B \--estimate-only ++Estimate disk space requirements when running sos report. This can be valuable ++to prevent sosreport working dir to consume all free disk space. No plugin data ++is available at the end. ++ ++Plugins will be collected sequentially, size of collected files and commands outputs ++will be calculated and the plugin files will be immediatelly deleted prior execution ++of the next plugin. This still can consume whole free disk space, though. Please note, ++size estimations may not be accurate for highly utilized systems due to changes between ++an estimate and a real execution. ++.TP + .B \--upload + If specified, attempt to upload the resulting archive to a vendor defined location. + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index 82484f1d..b033f621 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -86,6 +86,7 @@ class SoSReport(SoSComponent): + 'desc': '', + 'domains': [], + 'dry_run': False, ++ 'estimate_only': False, + 'experimental': False, + 'enable_plugins': [], + 'keywords': [], +@@ -137,6 +138,7 @@ class SoSReport(SoSComponent): + self._args = args + self.sysroot = "/" + self.preset = None ++ self.estimated_plugsizes = {} + + self.print_header() + self._set_debug() +@@ -223,6 +225,11 @@ class SoSReport(SoSComponent): + help="Description for a new preset",) + report_grp.add_argument("--dry-run", action="store_true", + help="Run plugins but do not collect data") ++ report_grp.add_argument("--estimate-only", action="store_true", ++ help="Approximate disk space requirements for " ++ "a real sos run; disables --clean and " ++ "--collect, sets --threads=1 and " ++ "--no-postproc") + report_grp.add_argument("--experimental", action="store_true", + dest="experimental", default=False, + help="enable experimental plugins") +@@ -700,6 +700,33 @@ class SoSReport(SoSComponent): + self.all_options.append((plugin, plugin_name, optname, + optparm)) + ++ def _set_estimate_only(self): ++ # set estimate-only mode by enforcing some options settings ++ # and return a corresponding log messages string ++ msg = "\nEstimate-only mode enabled" ++ ext_msg = [] ++ if self.opts.threads > 1: ++ ext_msg += ["--threads=%s overriden to 1" % self.opts.threads, ] ++ self.opts.threads = 1 ++ if not self.opts.build: ++ ext_msg += ["--build enabled", ] ++ self.opts.build = True ++ if not self.opts.no_postproc: ++ ext_msg += ["--no-postproc enabled", ] ++ self.opts.no_postproc = True ++ if self.opts.clean: ++ ext_msg += ["--clean disabled", ] ++ self.opts.clean = False ++ if self.opts.upload: ++ ext_msg += ["--upload* options disabled", ] ++ self.opts.upload = False ++ if ext_msg: ++ msg += ", which overrides some options:\n " + "\n ".join(ext_msg) ++ else: ++ msg += "." ++ msg += "\n\n" ++ return msg ++ + def _report_profiles_and_plugins(self): + self.ui_log.info("") + if len(self.loaded_plugins): +@@ -875,10 +909,12 @@ class SoSReport(SoSComponent): + return True + + def batch(self): ++ msg = self.policy.get_msg() ++ if self.opts.estimate_only: ++ msg += self._set_estimate_only() + if self.opts.batch: +- self.ui_log.info(self.policy.get_msg()) ++ self.ui_log.info(msg) + else: +- msg = self.policy.get_msg() + msg += _("Press ENTER to continue, or CTRL-C to quit.\n") + try: + input(msg) +@@ -1011,6 +1047,22 @@ class SoSReport(SoSComponent): + self.running_plugs.remove(plugin[1]) + self.loaded_plugins[plugin[0]-1][1].set_timeout_hit() + pool._threads.clear() ++ if self.opts.estimate_only: ++ from pathlib import Path ++ tmpdir_path = Path(self.archive.get_tmp_dir()) ++ self.estimated_plugsizes[plugin[1]] = sum( ++ [f.stat().st_size for f in tmpdir_path.glob('**/*') ++ if (os.path.isfile(f) and not os.path.islink(f))]) ++ # remove whole tmp_dir content - including "sos_commands" and ++ # similar dirs that will be re-created on demand by next plugin ++ # if needed; it is less error-prone approach than skipping ++ # deletion of some dirs but deleting their content ++ for f in os.listdir(self.archive.get_tmp_dir()): ++ f = os.path.join(self.archive.get_tmp_dir(), f) ++ if os.path.isdir(f): ++ rmtree(f) ++ else: ++ os.unlink(f) + return True + + def collect_plugin(self, plugin): +@@ -1330,6 +1382,24 @@ class SoSReport(SoSComponent): + self.policy.display_results(archive, directory, checksum, + map_file=map_file) + ++ if self.opts.estimate_only: ++ from sos.utilities import get_human_readable ++ _sum = get_human_readable(sum(self.estimated_plugsizes.values())) ++ self.ui_log.info("Estimated disk space requirement for whole " ++ "uncompressed sos report directory: %s" % _sum) ++ bigplugins = sorted(self.estimated_plugsizes.items(), ++ key=lambda x: x[1], reverse=True)[:3] ++ bp_out = ", ".join("%s: %s" % ++ (p, get_human_readable(v, precision=0)) ++ for p, v in bigplugins) ++ self.ui_log.info("Three biggest plugins: %s" % bp_out) ++ self.ui_log.info("") ++ self.ui_log.info("Please note the estimation is relevant to the " ++ "current options.") ++ self.ui_log.info("Be aware that the real disk space requirements " ++ "might be different.") ++ self.ui_log.info("") ++ + if self.opts.upload or self.opts.upload_url: + if not self.opts.build: + try: +-- +2.31.1 + +From 7ae47e6c0717c0b56c3368008dd99a87f7f436d5 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 13 Oct 2021 20:21:16 +0200 +Subject: [PATCH] [report] Count with sos_logs and sos_reports in + --estimate-only + +Currently, we estimate just plugins' disk space and ignore sos_logs +or sos_reports directories - although they can occupy nontrivial disk +space as well. + +Resolves: #2723 + +Signed-off-by: Pavel Moravec +--- + sos/report/__init__.py | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index e35c7e8d..7feb31ee 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -1380,6 +1380,14 @@ class SoSReport(SoSComponent): + + if self.opts.estimate_only: + from sos.utilities import get_human_readable ++ from pathlib import Path ++ # add sos_logs, sos_reports dirs, etc., basically everything ++ # that remained in self.tmpdir after plugins' contents removal ++ # that still will be moved to the sos report final directory path ++ tmpdir_path = Path(self.tmpdir) ++ self.estimated_plugsizes['sos_logs_reports'] = sum( ++ [f.stat().st_size for f in tmpdir_path.glob('**/*')]) ++ + _sum = get_human_readable(sum(self.estimated_plugsizes.values())) + self.ui_log.info("Estimated disk space requirement for whole " + "uncompressed sos report directory: %s" % _sum) +-- +2.31.1 + +From 4293f3317505661e8f32ba94ad87310996fa1626 Mon Sep 17 00:00:00 2001 +From: Eric Desrochers +Date: Tue, 19 Oct 2021 12:18:40 -0400 +Subject: [PATCH] [report] check for symlink before rmtree when opt + estimate-only is use + +Check if the dir is also symlink before performing rmtree() +method so that unlink() method can be used instead. + +Traceback (most recent call last): + File "./bin/sos", line 22, in + sos.execute() + File "/tmp/sos/sos/__init__.py", line 186, in execute + self._component.execute() +OSError: Cannot call rmtree on a symbolic link + +Closes: #2727 + +Signed-off-by: Eric Desrochers +--- + sos/report/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index 7feb31ee..1b5bc97d 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -1059,7 +1059,7 @@ class SoSReport(SoSComponent): + # deletion of some dirs but deleting their content + for f in os.listdir(self.archive.get_tmp_dir()): + f = os.path.join(self.archive.get_tmp_dir(), f) +- if os.path.isdir(f): ++ if os.path.isdir(f) and not os.path.islink(f): + rmtree(f) + else: + os.unlink(f) +-- +2.31.1 + +From 589d47c93257b55bc796ef6ac25b88c974ee3d72 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 8 Nov 2021 16:38:24 +0100 +Subject: [PATCH] [report] Calculate sizes of dirs, symlinks and manifest in + estimate mode + +Enhance --estimate-mode to calculate sizes of also: +- symlinks +- directories themselves +- manifest.json file + +Use os.lstat() method instead of os.stat() to properly calculate the +sizes (and not destinations of symlinks, e.g.). + +Print five biggest plugins instead of three as sos logs and reports do +stand as one "plugin" in the list, often. + +Resolves: #2752 + +Signed-off-by: Pavel Moravec +--- + sos/report/__init__.py | 56 +++++++++++++++++++++--------------------- + 1 file changed, 28 insertions(+), 28 deletions(-) + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index 10952566..a4c92acc 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -1050,8 +1050,7 @@ class SoSReport(SoSComponent): + from pathlib import Path + tmpdir_path = Path(self.archive.get_tmp_dir()) + self.estimated_plugsizes[plugin[1]] = sum( +- [f.stat().st_size for f in tmpdir_path.glob('**/*') +- if (os.path.isfile(f) and not os.path.islink(f))]) ++ [f.lstat().st_size for f in tmpdir_path.glob('**/*')]) + # remove whole tmp_dir content - including "sos_commands" and + # similar dirs that will be re-created on demand by next plugin + # if needed; it is less error-prone approach than skipping +@@ -1273,6 +1272,33 @@ class SoSReport(SoSComponent): + short_name='manifest.json' + ) + ++ # print results in estimate mode (to include also just added manifest) ++ if self.opts.estimate_only: ++ from sos.utilities import get_human_readable ++ from pathlib import Path ++ # add sos_logs, sos_reports dirs, etc., basically everything ++ # that remained in self.tmpdir after plugins' contents removal ++ # that still will be moved to the sos report final directory path ++ tmpdir_path = Path(self.tmpdir) ++ self.estimated_plugsizes['sos_logs_reports'] = sum( ++ [f.lstat().st_size for f in tmpdir_path.glob('**/*')]) ++ ++ _sum = get_human_readable(sum(self.estimated_plugsizes.values())) ++ self.ui_log.info("Estimated disk space requirement for whole " ++ "uncompressed sos report directory: %s" % _sum) ++ bigplugins = sorted(self.estimated_plugsizes.items(), ++ key=lambda x: x[1], reverse=True)[:5] ++ bp_out = ", ".join("%s: %s" % ++ (p, get_human_readable(v, precision=0)) ++ for p, v in bigplugins) ++ self.ui_log.info("Five biggest plugins: %s" % bp_out) ++ self.ui_log.info("") ++ self.ui_log.info("Please note the estimation is relevant to the " ++ "current options.") ++ self.ui_log.info("Be aware that the real disk space requirements " ++ "might be different.") ++ self.ui_log.info("") ++ + # package up and compress the results + if not self.opts.build: + old_umask = os.umask(0o077) +@@ -1377,32 +1403,6 @@ class SoSReport(SoSComponent): + self.policy.display_results(archive, directory, checksum, + map_file=map_file) + +- if self.opts.estimate_only: +- from sos.utilities import get_human_readable +- from pathlib import Path +- # add sos_logs, sos_reports dirs, etc., basically everything +- # that remained in self.tmpdir after plugins' contents removal +- # that still will be moved to the sos report final directory path +- tmpdir_path = Path(self.tmpdir) +- self.estimated_plugsizes['sos_logs_reports'] = sum( +- [f.stat().st_size for f in tmpdir_path.glob('**/*')]) +- +- _sum = get_human_readable(sum(self.estimated_plugsizes.values())) +- self.ui_log.info("Estimated disk space requirement for whole " +- "uncompressed sos report directory: %s" % _sum) +- bigplugins = sorted(self.estimated_plugsizes.items(), +- key=lambda x: x[1], reverse=True)[:3] +- bp_out = ", ".join("%s: %s" % +- (p, get_human_readable(v, precision=0)) +- for p, v in bigplugins) +- self.ui_log.info("Three biggest plugins: %s" % bp_out) +- self.ui_log.info("") +- self.ui_log.info("Please note the estimation is relevant to the " +- "current options.") +- self.ui_log.info("Be aware that the real disk space requirements " +- "might be different.") +- self.ui_log.info("") +- + if self.opts.upload or self.opts.upload_url: + if not self.opts.build: + try: +-- +2.31.1 + +From c6a5bbb8d75aadd5c7f76d3f469929aba2cf8060 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 5 Jan 2022 10:33:58 +0100 +Subject: [PATCH] [report] Provide better warning about estimate-mode + +As --estimate-only calculates disk usage based on `stat` data that +differs from outputs of other commands like `du`, enhance the warning +about reliability of the calculated estimation. + +Also add a rule-of-thumb recommendation of real disk space requirements. + +Resolves: #2815 + +Signed-off-by: Pavel Moravec +--- + man/en/sos-report.1 | 10 +++++++--- + sos/report/__init__.py | 3 ++- + 2 files changed, 9 insertions(+), 4 deletions(-) + +diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 +index 464a77e54..e34773986 100644 +--- a/man/en/sos-report.1 ++++ b/man/en/sos-report.1 +@@ -343,9 +343,13 @@ is available at the end. + + Plugins will be collected sequentially, size of collected files and commands outputs + will be calculated and the plugin files will be immediatelly deleted prior execution +-of the next plugin. This still can consume whole free disk space, though. Please note, +-size estimations may not be accurate for highly utilized systems due to changes between +-an estimate and a real execution. ++of the next plugin. This still can consume whole free disk space, though. ++ ++Please note, size estimations may not be accurate for highly utilized systems due to ++changes between an estimate and a real execution. Also some difference between ++estimation (using `stat` command) and other commands used (i.e. `du`). ++ ++A rule of thumb is to reserve at least double the estimation. + .TP + .B \--upload + If specified, attempt to upload the resulting archive to a vendor defined location. +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index ef61fb344..e0617b45e 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -1330,7 +1330,8 @@ def final_work(self): + self.ui_log.info("Please note the estimation is relevant to the " + "current options.") + self.ui_log.info("Be aware that the real disk space requirements " +- "might be different.") ++ "might be different. A rule of thumb is to " ++ "reserve at least double the estimation.") + self.ui_log.info("") + + # package up and compress the results +From f22efe044f1f0565b57d6aeca2081a5227e0312c Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 14 Feb 2022 09:37:30 -0500 +Subject: [PATCH] [utilities] Don't try to chroot to / + +With the recent fix for sysroot being `None` to always being (correctly) +`/`, we should guard against situations where `sos_get_command_output()` +would now try to chroot to `/` before running any command. Incidentally, +this would also cause our unittests to fail if they were run by a +non-root user. + +Signed-off-by: Jake Hunsaker +--- + sos/utilities.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/utilities.py b/sos/utilities.py +index 6b13415b..d782123a 100644 +--- a/sos/utilities.py ++++ b/sos/utilities.py +@@ -120,7 +120,7 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False, + # closure are caught in the parent (chroot and chdir are bound from + # the enclosing scope). + def _child_prep_fn(): +- if (chroot): ++ if chroot and chroot != '/': + os.chroot(chroot) + if (chdir): + os.chdir(chdir) +-- +2.34.1 +From 3d064102f8ca6662fd9602512e1cb05cf8746dfd Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 27 Sep 2021 19:01:16 -0400 +Subject: [PATCH] [Systemd, Policy] Correct InitSystem chrooting when chroot is + needed + +This commit resolves a situation in which `sos` is being run in a +container but the `SystemdInit` InitSystem would not properly load +information from the host, thus causing the `Plugin.is_service*()` +methods to erroneously fail or return `False`. + +Fix this scenario by pulling the `_container_init()` and related logic +to check for a containerized host sysroot out of the Red Hat specific +policy and into the base `LinuxPolicy` class so that the init system can +be initialized with the correct sysroot, which is now used to chroot the +calls to the relevant `systemctl` commands. + +For now, this does impose the use of looking for the `container` env var +(automatically set by docker, podman, and crio regardless of +distribution) and the use of the `HOST` env var to read where the host's +`/` filesystem is mounted within the container. If desired in the +future, this can be changed to allow policy-specific overrides. For now +however, this extends host collection via an sos container for all +distributions currently shipping sos. + +Note that this issue only affected the `InitSystem` abstraction for +loading information about local services, and did not affect init system +related commands called by plugins as part of those collections. + +Signed-off-by: Jake Hunsaker +--- + sos/policies/distros/__init__.py | 28 ++++++++++++++++++++++++++- + sos/policies/distros/redhat.py | 27 +------------------------- + sos/policies/init_systems/__init__.py | 13 +++++++++++-- + sos/policies/init_systems/systemd.py | 7 ++++--- + 4 files changed, 43 insertions(+), 32 deletions(-) + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index f5b9fd5b01..c33a356a75 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -29,6 +29,10 @@ + except ImportError: + REQUESTS_LOADED = False + ++# Container environment variables for detecting if we're in a container ++ENV_CONTAINER = 'container' ++ENV_HOST_SYSROOT = 'HOST' ++ + + class LinuxPolicy(Policy): + """This policy is meant to be an abc class that provides common +@@ -69,10 +73,17 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True): + probe_runtime=probe_runtime) + self.init_kernel_modules() + ++ # need to set _host_sysroot before PackageManager() ++ if sysroot: ++ self._container_init() ++ self._host_sysroot = sysroot ++ else: ++ sysroot = self._container_init() ++ + if init is not None: + self.init_system = init + elif os.path.isdir("/run/systemd/system/"): +- self.init_system = SystemdInit() ++ self.init_system = SystemdInit(chroot=sysroot) + else: + self.init_system = InitSystem() + +@@ -130,6 +141,21 @@ def get_local_name(self): + def sanitize_filename(self, name): + return re.sub(r"[^-a-z,A-Z.0-9]", "", name) + ++ def _container_init(self): ++ """Check if sos is running in a container and perform container ++ specific initialisation based on ENV_HOST_SYSROOT. ++ """ ++ if ENV_CONTAINER in os.environ: ++ if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']: ++ self._in_container = True ++ if ENV_HOST_SYSROOT in os.environ: ++ self._host_sysroot = os.environ[ENV_HOST_SYSROOT] ++ use_sysroot = self._in_container and self._host_sysroot is not None ++ if use_sysroot: ++ host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir) ++ self._tmp_dir = host_tmp_dir ++ return self._host_sysroot if use_sysroot else None ++ + def init_kernel_modules(self): + """Obtain a list of loaded kernel modules to reference later for plugin + enablement and SoSPredicate checks +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index b3a84336be..3476e21fb2 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -17,7 +17,7 @@ + from sos.presets.redhat import (RHEL_PRESETS, ATOMIC_PRESETS, RHV, RHEL, + CB, RHOSP, RHOCP, RH_CFME, RH_SATELLITE, + ATOMIC) +-from sos.policies.distros import LinuxPolicy ++from sos.policies.distros import LinuxPolicy, ENV_HOST_SYSROOT + from sos.policies.package_managers.rpm import RpmPackageManager + from sos import _sos as _ + +@@ -56,12 +56,6 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, + super(RedHatPolicy, self).__init__(sysroot=sysroot, init=init, + probe_runtime=probe_runtime) + self.usrmove = False +- # need to set _host_sysroot before PackageManager() +- if sysroot: +- self._container_init() +- self._host_sysroot = sysroot +- else: +- sysroot = self._container_init() + + self.package_manager = RpmPackageManager(chroot=sysroot, + remote_exec=remote_exec) +@@ -140,21 +134,6 @@ def transform_path(path): + else: + return files + +- def _container_init(self): +- """Check if sos is running in a container and perform container +- specific initialisation based on ENV_HOST_SYSROOT. +- """ +- if ENV_CONTAINER in os.environ: +- if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']: +- self._in_container = True +- if ENV_HOST_SYSROOT in os.environ: +- self._host_sysroot = os.environ[ENV_HOST_SYSROOT] +- use_sysroot = self._in_container and self._host_sysroot is not None +- if use_sysroot: +- host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir) +- self._tmp_dir = host_tmp_dir +- return self._host_sysroot if use_sysroot else None +- + def runlevel_by_service(self, name): + from subprocess import Popen, PIPE + ret = [] +@@ -183,10 +162,6 @@ def get_tmp_dir(self, opt_tmp_dir): + return opt_tmp_dir + + +-# Container environment variables on Red Hat systems. +-ENV_CONTAINER = 'container' +-ENV_HOST_SYSROOT = 'HOST' +- + # Legal disclaimer text for Red Hat products + disclaimer_text = """ + Any information provided to %(vendor)s will be treated in \ +diff --git a/sos/policies/init_systems/__init__.py b/sos/policies/init_systems/__init__.py +index dd663e6522..beac44cee3 100644 +--- a/sos/policies/init_systems/__init__.py ++++ b/sos/policies/init_systems/__init__.py +@@ -29,9 +29,14 @@ class InitSystem(): + status of services + :type query_cmd: ``str`` + ++ :param chroot: Location to chroot to for any command execution, i.e. the ++ sysroot if we're running in a container ++ :type chroot: ``str`` or ``None`` ++ + """ + +- def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None): ++ def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None, ++ chroot=None): + """Initialize a new InitSystem()""" + + self.services = {} +@@ -39,6 +44,7 @@ def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None): + self.init_cmd = init_cmd + self.list_cmd = "%s %s" % (self.init_cmd, list_cmd) or None + self.query_cmd = "%s %s" % (self.init_cmd, query_cmd) or None ++ self.chroot = chroot + + def is_enabled(self, name): + """Check if given service name is enabled +@@ -108,7 +114,10 @@ def _query_service(self, name): + """Query an individual service""" + if self.query_cmd: + try: +- return sos_get_command_output("%s %s" % (self.query_cmd, name)) ++ return sos_get_command_output( ++ "%s %s" % (self.query_cmd, name), ++ chroot=self.chroot ++ ) + except Exception: + return None + return None +diff --git a/sos/policies/init_systems/systemd.py b/sos/policies/init_systems/systemd.py +index 1b138f97b3..76dc57e27f 100644 +--- a/sos/policies/init_systems/systemd.py ++++ b/sos/policies/init_systems/systemd.py +@@ -15,11 +15,12 @@ + class SystemdInit(InitSystem): + """InitSystem abstraction for SystemD systems""" + +- def __init__(self): ++ def __init__(self, chroot=None): + super(SystemdInit, self).__init__( + init_cmd='systemctl', + list_cmd='list-unit-files --type=service', +- query_cmd='status' ++ query_cmd='status', ++ chroot=chroot + ) + self.load_all_services() + +@@ -30,7 +31,7 @@ def parse_query(self, output): + return 'unknown' + + def load_all_services(self): +- svcs = shell_out(self.list_cmd).splitlines()[1:] ++ svcs = shell_out(self.list_cmd, chroot=self.chroot).splitlines()[1:] + for line in svcs: + try: + name = line.split('.service')[0] +From e869bc84c714bfc2249bbcb84e14908049ee42c4 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 27 Sep 2021 12:07:08 -0400 +Subject: [PATCH] [Plugin,utilities] Add sysroot wrapper for os.path.join + +Adds a wrapper for `os.path.join()` which accounts for non-/ sysroots, +like we have done previously for other `os.path` methods. Further +updates `Plugin()` to use this wrapper where appropriate. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/__init__.py | 43 +++++++++++++++++----------------- + sos/utilities.py | 6 +++++ + 2 files changed, 28 insertions(+), 21 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index c635b8de9..1f84bca49 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -13,7 +13,7 @@ + from sos.utilities import (sos_get_command_output, import_module, grep, + fileobj, tail, is_executable, TIMEOUT_DEFAULT, + path_exists, path_isdir, path_isfile, path_islink, +- listdir) ++ listdir, path_join) + + import os + import glob +@@ -708,19 +708,6 @@ def _log_info(self, msg): + def _log_debug(self, msg): + self.soslog.debug(self._format_msg(msg)) + +- def join_sysroot(self, path): +- """Join a given path with the configured sysroot +- +- :param path: The filesystem path that needs to be joined +- :type path: ``str`` +- +- :returns: The joined filesystem path +- :rtype: ``str`` +- """ +- if path[0] == os.sep: +- path = path[1:] +- return os.path.join(self.sysroot, path) +- + def strip_sysroot(self, path): + """Remove the configured sysroot from a filesystem path + +@@ -1176,7 +1163,7 @@ def _copy_dir(self, srcpath): + + def _get_dest_for_srcpath(self, srcpath): + if self.use_sysroot(): +- srcpath = self.join_sysroot(srcpath) ++ srcpath = self.path_join(srcpath) + for copied in self.copied_files: + if srcpath == copied["srcpath"]: + return copied["dstpath"] +@@ -1284,7 +1271,7 @@ def add_forbidden_path(self, forbidden, recursive=False): + forbidden = [forbidden] + + if self.use_sysroot(): +- forbidden = [self.join_sysroot(f) for f in forbidden] ++ forbidden = [self.path_join(f) for f in forbidden] + + for forbid in forbidden: + self._log_info("adding forbidden path '%s'" % forbid) +@@ -1438,7 +1425,7 @@ def add_copy_spec(self, copyspecs, sizelimit=None, maxage=None, + since = self.get_option('since') + + logarchive_pattern = re.compile(r'.*((\.(zip|gz|bz2|xz))|[-.][\d]+)$') +- configfile_pattern = re.compile(r"^%s/*" % self.join_sysroot("etc")) ++ configfile_pattern = re.compile(r"^%s/*" % self.path_join("etc")) + + if not self.test_predicate(pred=pred): + self._log_info("skipped copy spec '%s' due to predicate (%s)" % +@@ -1468,7 +1455,7 @@ def add_copy_spec(self, copyspecs, sizelimit=None, maxage=None, + return False + + if self.use_sysroot(): +- copyspec = self.join_sysroot(copyspec) ++ copyspec = self.path_join(copyspec) + + files = self._expand_copy_spec(copyspec) + +@@ -1683,7 +1670,7 @@ def _add_device_cmd(self, cmds, devices, timeout=None, sizelimit=None, + if not _dev_ok: + continue + if prepend_path: +- device = os.path.join(prepend_path, device) ++ device = self.path_join(prepend_path, device) + _cmd = cmd % {'dev': device} + self._add_cmd_output(cmd=_cmd, timeout=timeout, + sizelimit=sizelimit, chroot=chroot, +@@ -2592,7 +2579,7 @@ def __expand(paths): + if self.path_isfile(path) or self.path_islink(path): + found_paths.append(path) + elif self.path_isdir(path) and self.listdir(path): +- found_paths.extend(__expand(os.path.join(path, '*'))) ++ found_paths.extend(__expand(self.path_join(path, '*'))) + else: + found_paths.append(path) + except PermissionError: +@@ -2608,7 +2595,7 @@ def __expand(paths): + if (os.access(copyspec, os.R_OK) and self.path_isdir(copyspec) and + self.listdir(copyspec)): + # the directory exists and is non-empty, recurse through it +- copyspec = os.path.join(copyspec, '*') ++ copyspec = self.path_join(copyspec, '*') + expanded = glob.glob(copyspec, recursive=True) + recursed_files = [] + for _path in expanded: +@@ -2877,6 +2864,20 @@ def listdir(self, path): + """ + return listdir(path, self.commons['cmdlineopts'].sysroot) + ++ def path_join(self, path, *p): ++ """Helper to call the sos.utilities wrapper that allows the ++ corresponding `os` call to account for sysroot ++ ++ :param path: The leading path passed to os.path.join() ++ :type path: ``str`` ++ ++ :param p: Following path section(s) to be joined with ``path``, ++ an empty parameter will result in a path that ends with ++ a separator ++ :type p: ``str`` ++ """ ++ return path_join(path, *p, sysroot=self.sysroot) ++ + def postproc(self): + """Perform any postprocessing. To be replaced by a plugin if required. + """ +diff --git a/sos/utilities.py b/sos/utilities.py +index c940e066d..b75751539 100644 +--- a/sos/utilities.py ++++ b/sos/utilities.py +@@ -242,6 +242,12 @@ def listdir(path, sysroot): + return _os_wrapper(path, sysroot, 'listdir', os) + + ++def path_join(path, *p, sysroot=os.sep): ++ if not path.startswith(sysroot): ++ path = os.path.join(sysroot, path.lstrip(os.sep)) ++ return os.path.join(path, *p) ++ ++ + class AsyncReader(threading.Thread): + """Used to limit command output to a given size without deadlocking + sos. +From 9596473d1779b9c48e9923c220aaf2b8d9b3bebf Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 18 Nov 2021 13:17:14 -0500 +Subject: [PATCH] [global] Align sysroot determination and usage across sos + +The determination of sysroot - being automatic, user-specified, or +controlled via environment variables in a container - has gotten muddied +over time. This has resulted in different parts of the project; +`Policy`, `Plugin`, `SoSComponent`, etc... to not always be in sync when +sysroot is not `/`, thus causing varying and unexpected/unintended +behavior. + +Fix this by only determining sysroot within `Policy()` initialization, +and then using that determination across all aspects of the project that +use or reference sysroot. + +This results in several changes: + +- `PackageManager()` will now (again) correctly reference host package + lists when sos is run in a container. + +- `ContainerRuntime()` is now able to activate when sos is running in a + container. + +- Plugins will now properly use sysroot for _all_ plugin enablement + triggers. + +- Plugins, Policy, and SoSComponents now all reference the + `self.sysroot` variable, rather than changing between `sysroot`. +`_host_sysroot`, and `commons['sysroot']`. `_host_sysroot` has been +removed from `Policy`. + +Signed-off-by: Jake Hunsaker +--- + sos/archive.py | 2 +- + sos/component.py | 2 +- + sos/policies/__init__.py | 11 +---------- + sos/policies/distros/__init__.py | 33 +++++++++++++++++++------------ + sos/policies/distros/debian.py | 2 +- + sos/policies/distros/redhat.py | 3 +-- + sos/policies/runtimes/__init__.py | 15 +++++++++----- + sos/policies/runtimes/docker.py | 4 ++-- + sos/report/__init__.py | 6 ++---- + sos/report/plugins/__init__.py | 22 +++++++++++---------- + sos/report/plugins/unpackaged.py | 7 ++++--- + sos/utilities.py | 13 ++++++++---- + 12 files changed, 64 insertions(+), 56 deletions(-) + +diff --git a/sos/archive.py b/sos/archive.py +index b02b247595..e3c68b7789 100644 +--- a/sos/archive.py ++++ b/sos/archive.py +@@ -153,7 +153,7 @@ def dest_path(self, name): + return (os.path.join(self._archive_root, name)) + + def join_sysroot(self, path): +- if path.startswith(self.sysroot): ++ if not self.sysroot or path.startswith(self.sysroot): + return path + if path[0] == os.sep: + path = path[1:] +diff --git a/sos/component.py b/sos/component.py +index 5ac6e47f4f..dba0aabf2b 100644 +--- a/sos/component.py ++++ b/sos/component.py +@@ -109,7 +109,7 @@ def __init__(self, parser, parsed_args, cmdline_args): + try: + import sos.policies + self.policy = sos.policies.load(sysroot=self.opts.sysroot) +- self.sysroot = self.policy.host_sysroot() ++ self.sysroot = self.policy.sysroot + except KeyboardInterrupt: + self._exit(0) + self._is_root = self.policy.is_root() +diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py +index fb8db1d724..ef9188deb4 100644 +--- a/sos/policies/__init__.py ++++ b/sos/policies/__init__.py +@@ -110,7 +110,6 @@ class Policy(object): + presets = {"": PresetDefaults()} + presets_path = PRESETS_PATH + _in_container = False +- _host_sysroot = '/' + + def __init__(self, sysroot=None, probe_runtime=True): + """Subclasses that choose to override this initializer should call +@@ -124,7 +123,7 @@ def __init__(self, sysroot=None, probe_runtime=True): + self.package_manager = PackageManager() + self.valid_subclasses = [IndependentPlugin] + self.set_exec_path() +- self._host_sysroot = sysroot ++ self.sysroot = sysroot + self.register_presets(GENERIC_PRESETS) + + def check(self, remote=''): +@@ -177,14 +176,6 @@ def in_container(self): + """ + return self._in_container + +- def host_sysroot(self): +- """Get the host's default sysroot +- +- :returns: Host sysroot +- :rtype: ``str`` or ``None`` +- """ +- return self._host_sysroot +- + def dist_version(self): + """ + Return the OS version +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index 7bdc81b852..c69fc1e73c 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -71,19 +71,18 @@ class LinuxPolicy(Policy): + def __init__(self, sysroot=None, init=None, probe_runtime=True): + super(LinuxPolicy, self).__init__(sysroot=sysroot, + probe_runtime=probe_runtime) +- self.init_kernel_modules() + +- # need to set _host_sysroot before PackageManager() + if sysroot: +- self._container_init() +- self._host_sysroot = sysroot ++ self.sysroot = sysroot + else: +- sysroot = self._container_init() ++ self.sysroot = self._container_init() ++ ++ self.init_kernel_modules() + + if init is not None: + self.init_system = init + elif os.path.isdir("/run/systemd/system/"): +- self.init_system = SystemdInit(chroot=sysroot) ++ self.init_system = SystemdInit(chroot=self.sysroot) + else: + self.init_system = InitSystem() + +@@ -149,27 +148,30 @@ def _container_init(self): + if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']: + self._in_container = True + if ENV_HOST_SYSROOT in os.environ: +- self._host_sysroot = os.environ[ENV_HOST_SYSROOT] +- use_sysroot = self._in_container and self._host_sysroot is not None ++ _host_sysroot = os.environ[ENV_HOST_SYSROOT] ++ use_sysroot = self._in_container and _host_sysroot is not None + if use_sysroot: +- host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir) ++ host_tmp_dir = os.path.abspath(_host_sysroot + self._tmp_dir) + self._tmp_dir = host_tmp_dir +- return self._host_sysroot if use_sysroot else None ++ return _host_sysroot if use_sysroot else None + + def init_kernel_modules(self): + """Obtain a list of loaded kernel modules to reference later for plugin + enablement and SoSPredicate checks + """ + self.kernel_mods = [] ++ release = os.uname().release + + # first load modules from lsmod +- lines = shell_out("lsmod", timeout=0).splitlines() ++ lines = shell_out("lsmod", timeout=0, chroot=self.sysroot).splitlines() + self.kernel_mods.extend([ + line.split()[0].strip() for line in lines[1:] + ]) + + # next, include kernel builtins +- builtins = "/usr/lib/modules/%s/modules.builtin" % os.uname().release ++ builtins = self.join_sysroot( ++ "/usr/lib/modules/%s/modules.builtin" % release ++ ) + try: + with open(builtins, "r") as mfile: + for line in mfile: +@@ -186,7 +188,7 @@ def init_kernel_modules(self): + 'dm_mod': 'CONFIG_BLK_DEV_DM' + } + +- booted_config = "/boot/config-%s" % os.uname().release ++ booted_config = self.join_sysroot("/boot/config-%s" % release) + kconfigs = [] + try: + with open(booted_config, "r") as kfile: +@@ -200,6 +202,11 @@ def init_kernel_modules(self): + if config_strings[builtin] in kconfigs: + self.kernel_mods.append(builtin) + ++ def join_sysroot(self, path): ++ if self.sysroot and self.sysroot != '/': ++ path = os.path.join(self.sysroot, path.lstrip('/')) ++ return path ++ + def pre_work(self): + # this method will be called before the gathering begins + +diff --git a/sos/policies/distros/debian.py b/sos/policies/distros/debian.py +index 95b389a65e..639fd5eba3 100644 +--- a/sos/policies/distros/debian.py ++++ b/sos/policies/distros/debian.py +@@ -27,7 +27,7 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, + remote_exec=None): + super(DebianPolicy, self).__init__(sysroot=sysroot, init=init, + probe_runtime=probe_runtime) +- self.package_manager = DpkgPackageManager(chroot=sysroot, ++ self.package_manager = DpkgPackageManager(chroot=self.sysroot, + remote_exec=remote_exec) + self.valid_subclasses += [DebianPlugin] + +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index eb44240736..4b14abaf3a 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -42,7 +42,6 @@ class RedHatPolicy(LinuxPolicy): + _redhat_release = '/etc/redhat-release' + _tmp_dir = "/var/tmp" + _in_container = False +- _host_sysroot = '/' + default_scl_prefix = '/opt/rh' + name_pattern = 'friendly' + upload_url = None +@@ -57,7 +56,7 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, + probe_runtime=probe_runtime) + self.usrmove = False + +- self.package_manager = RpmPackageManager(chroot=sysroot, ++ self.package_manager = RpmPackageManager(chroot=self.sysroot, + remote_exec=remote_exec) + + self.valid_subclasses += [RedHatPlugin] +diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py +index f28d6a1df3..2e60ad2361 100644 +--- a/sos/policies/runtimes/__init__.py ++++ b/sos/policies/runtimes/__init__.py +@@ -64,7 +64,7 @@ def check_is_active(self): + :returns: ``True`` if the runtime is active, else ``False`` + :rtype: ``bool`` + """ +- if is_executable(self.binary): ++ if is_executable(self.binary, self.policy.sysroot): + self.active = True + return True + return False +@@ -78,7 +78,7 @@ def get_containers(self, get_all=False): + containers = [] + _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '') + if self.active: +- out = sos_get_command_output(_cmd) ++ out = sos_get_command_output(_cmd, chroot=self.policy.sysroot) + if out['status'] == 0: + for ent in out['output'].splitlines()[1:]: + ent = ent.split() +@@ -112,8 +112,10 @@ def get_images(self): + images = [] + fmt = '{{lower .Repository}}:{{lower .Tag}} {{lower .ID}}' + if self.active: +- out = sos_get_command_output("%s images --format '%s'" +- % (self.binary, fmt)) ++ out = sos_get_command_output( ++ "%s images --format '%s'" % (self.binary, fmt), ++ chroot=self.policy.sysroot ++ ) + if out['status'] == 0: + for ent in out['output'].splitlines(): + ent = ent.split() +@@ -129,7 +131,10 @@ def get_volumes(self): + """ + vols = [] + if self.active: +- out = sos_get_command_output("%s volume ls" % self.binary) ++ out = sos_get_command_output( ++ "%s volume ls" % self.binary, ++ chroot=self.policy.sysroot ++ ) + if out['status'] == 0: + for ent in out['output'].splitlines()[1:]: + ent = ent.split() +diff --git a/sos/policies/runtimes/docker.py b/sos/policies/runtimes/docker.py +index 759dfaf6a0..e81f580ec3 100644 +--- a/sos/policies/runtimes/docker.py ++++ b/sos/policies/runtimes/docker.py +@@ -18,9 +18,9 @@ class DockerContainerRuntime(ContainerRuntime): + name = 'docker' + binary = 'docker' + +- def check_is_active(self): ++ def check_is_active(self, sysroot=None): + # the daemon must be running +- if (is_executable('docker') and ++ if (is_executable('docker', sysroot) and + (self.policy.init_system.is_running('docker') or + self.policy.init_system.is_running('snap.docker.dockerd'))): + self.active = True +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index a4c92accd3..a6c72778fc 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -173,14 +173,12 @@ def __init__(self, parser, args, cmdline): + self._set_directories() + + msg = "default" +- host_sysroot = self.policy.host_sysroot() ++ self.sysroot = self.policy.sysroot + # set alternate system root directory + if self.opts.sysroot: + msg = "cmdline" +- self.sysroot = self.opts.sysroot +- elif self.policy.in_container() and host_sysroot != os.sep: ++ elif self.policy.in_container() and self.sysroot != os.sep: + msg = "policy" +- self.sysroot = host_sysroot + self.soslog.debug("set sysroot to '%s' (%s)" % (self.sysroot, msg)) + + if self.opts.chroot not in chroot_modes: +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 46028bb124..e180ae1727 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -724,7 +724,7 @@ def strip_sysroot(self, path): + """ + if not self.use_sysroot(): + return path +- if path.startswith(self.sysroot): ++ if self.sysroot and path.startswith(self.sysroot): + return path[len(self.sysroot):] + return path + +@@ -743,8 +743,10 @@ def tmp_in_sysroot(self): + ``False`` + :rtype: ``bool`` + """ +- paths = [self.sysroot, self.archive.get_tmp_dir()] +- return os.path.commonprefix(paths) == self.sysroot ++ # if sysroot is still None, that implies '/' ++ _sysroot = self.sysroot or '/' ++ paths = [_sysroot, self.archive.get_tmp_dir()] ++ return os.path.commonprefix(paths) == _sysroot + + def is_installed(self, package_name): + """Is the package $package_name installed? +@@ -2621,7 +2623,7 @@ def __expand(paths): + return list(set(expanded)) + + def _collect_copy_specs(self): +- for path in self.copy_paths: ++ for path in sorted(self.copy_paths, reverse=True): + self._log_info("collecting path '%s'" % path) + self._do_copy_path(path) + self.generate_copyspec_tags() +@@ -2749,7 +2751,7 @@ def _check_plugin_triggers(self, files, packages, commands, services, + + return ((any(self.path_exists(fname) for fname in files) or + any(self.is_installed(pkg) for pkg in packages) or +- any(is_executable(cmd) for cmd in commands) or ++ any(is_executable(cmd, self.sysroot) for cmd in commands) or + any(self.is_module_loaded(mod) for mod in self.kernel_mods) or + any(self.is_service(svc) for svc in services) or + any(self.container_exists(cntr) for cntr in containers)) and +@@ -2817,7 +2819,7 @@ def path_exists(self, path): + :returns: True if the path exists in sysroot, else False + :rtype: ``bool`` + """ +- return path_exists(path, self.commons['cmdlineopts'].sysroot) ++ return path_exists(path, self.sysroot) + + def path_isdir(self, path): + """Helper to call the sos.utilities wrapper that allows the +@@ -2830,7 +2832,7 @@ def path_isdir(self, path): + :returns: True if the path is a dir, else False + :rtype: ``bool`` + """ +- return path_isdir(path, self.commons['cmdlineopts'].sysroot) ++ return path_isdir(path, self.sysroot) + + def path_isfile(self, path): + """Helper to call the sos.utilities wrapper that allows the +@@ -2843,7 +2845,7 @@ def path_isfile(self, path): + :returns: True if the path is a file, else False + :rtype: ``bool`` + """ +- return path_isfile(path, self.commons['cmdlineopts'].sysroot) ++ return path_isfile(path, self.sysroot) + + def path_islink(self, path): + """Helper to call the sos.utilities wrapper that allows the +@@ -2856,7 +2858,7 @@ def path_islink(self, path): + :returns: True if the path is a link, else False + :rtype: ``bool`` + """ +- return path_islink(path, self.commons['cmdlineopts'].sysroot) ++ return path_islink(path, self.sysroot) + + def listdir(self, path): + """Helper to call the sos.utilities wrapper that allows the +@@ -2869,7 +2871,7 @@ def listdir(self, path): + :returns: Contents of path, if it is a directory + :rtype: ``list`` + """ +- return listdir(path, self.commons['cmdlineopts'].sysroot) ++ return listdir(path, self.sysroot) + + def path_join(self, path, *p): + """Helper to call the sos.utilities wrapper that allows the +diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py +index 772b1d1fbb..24203c4b13 100644 +--- a/sos/report/plugins/unpackaged.py ++++ b/sos/report/plugins/unpackaged.py +@@ -58,10 +58,11 @@ def format_output(files): + """ + expanded = [] + for f in files: +- if self.path_islink(f): +- expanded.append("{} -> {}".format(f, os.readlink(f))) ++ fp = self.path_join(f) ++ if self.path_islink(fp): ++ expanded.append("{} -> {}".format(fp, os.readlink(fp))) + else: +- expanded.append(f) ++ expanded.append(fp) + return expanded + + # Check command predicate to avoid costly processing +diff --git a/sos/utilities.py b/sos/utilities.py +index b757515397..d66309334b 100644 +--- a/sos/utilities.py ++++ b/sos/utilities.py +@@ -96,11 +96,15 @@ def grep(pattern, *files_or_paths): + return matches + + +-def is_executable(command): ++def is_executable(command, sysroot=None): + """Returns if a command matches an executable on the PATH""" + + paths = os.environ.get("PATH", "").split(os.path.pathsep) + candidates = [command] + [os.path.join(p, command) for p in paths] ++ if sysroot: ++ candidates += [ ++ os.path.join(sysroot, c.lstrip('/')) for c in candidates ++ ] + return any(os.access(path, os.X_OK) for path in candidates) + + +@@ -216,8 +220,9 @@ def get_human_readable(size, precision=2): + + + def _os_wrapper(path, sysroot, method, module=os.path): +- if sysroot not in [None, '/']: +- path = os.path.join(sysroot, path.lstrip('/')) ++ if sysroot and sysroot != os.sep: ++ if not path.startswith(sysroot): ++ path = os.path.join(sysroot, path.lstrip('/')) + _meth = getattr(module, method) + return _meth(path) + +@@ -243,7 +248,7 @@ def listdir(path, sysroot): + + + def path_join(path, *p, sysroot=os.sep): +- if not path.startswith(sysroot): ++ if sysroot and not path.startswith(sysroot): + path = os.path.join(sysroot, path.lstrip(os.sep)) + return os.path.join(path, *p) + +From a43124e1f6217107838eed4d70339d100cbbc77a Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 9 Feb 2022 19:45:27 +0100 +Subject: [PATCH] [policies] Set fallback to None sysroot + +9596473 commit added a regression allowing to set sysroot to None +when running sos report on a regular system (outside a container). In +such a case, we need to fallback to '/' sysroot. + +Resolves: #2846 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index f3c1de11..9048f1c4 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -78,7 +78,7 @@ class LinuxPolicy(Policy): + if sysroot: + self.sysroot = sysroot + else: +- self.sysroot = self._container_init() ++ self.sysroot = self._container_init() or '/' + + self.init_kernel_modules() + +-- +2.34.1 + diff --git a/SOURCES/sos-bz1886711-enhance-tc-hw-offload.patch b/SOURCES/sos-bz1886711-enhance-tc-hw-offload.patch deleted file mode 100644 index ed1088b..0000000 --- a/SOURCES/sos-bz1886711-enhance-tc-hw-offload.patch +++ /dev/null @@ -1,32 +0,0 @@ -From bbb7f8bf522960a8ca7625f539e9e5d109abb704 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 19 May 2021 08:31:45 +0200 -Subject: [PATCH] [networking] collect also tc filter show ingress - -Both "tc -s filter show dev %eth [|ingress]" commands required as -they provide different output. - -Resolves: #2550 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/networking.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py -index acfa027f..35646268 100644 ---- a/sos/report/plugins/networking.py -+++ b/sos/report/plugins/networking.py -@@ -156,7 +156,8 @@ class Networking(Plugin): - "ethtool --phy-statistics " + eth, - "ethtool --show-priv-flags " + eth, - "ethtool --show-eee " + eth, -- "tc -s filter show dev " + eth -+ "tc -s filter show dev " + eth, -+ "tc -s filter show dev " + eth + " ingress", - ], tags=eth) - - # skip EEPROM collection by default, as it might hang or --- -2.26.3 - diff --git a/SOURCES/sos-bz1923938-sos-log-effective-options.patch b/SOURCES/sos-bz1923938-sos-log-effective-options.patch deleted file mode 100644 index 120df02..0000000 --- a/SOURCES/sos-bz1923938-sos-log-effective-options.patch +++ /dev/null @@ -1,284 +0,0 @@ -From 00d12ad3cf24dcc6c73e9bcf63db1d3f17e58bb1 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 1 Jul 2021 10:50:54 -0400 -Subject: [PATCH] [sosnode] Properly format skip-commands and skip-files on - nodes - -Fixes an issue where options provided for `skip-commands` and -`skip-files` were not properly formatted, thus causing an exception -during the finalization of the node's sos command. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/sosnode.py | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 6597d236..426edcba 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -734,11 +734,12 @@ class SosNode(): - if self.check_sos_version('4.1'): - if self.opts.skip_commands: - sos_opts.append( -- '--skip-commands=%s' % (quote(self.opts.skip_commands)) -+ '--skip-commands=%s' % ( -+ quote(','.join(self.opts.skip_commands))) - ) - if self.opts.skip_files: - sos_opts.append( -- '--skip-files=%s' % (quote(self.opts.skip_files)) -+ '--skip-files=%s' % (quote(','.join(self.opts.skip_files))) - ) - - if self.check_sos_version('4.2'): --- -2.31.1 - -From de7edce3f92ed50abcb28dd0dbcbeb104dc7c679 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 2 Jul 2021 09:52:11 +0200 -Subject: [PATCH] [collector] fix a typo in --plugin-option - -Sos report uses --plugin-option or --plugopts. - -Relevant: #2606 - -Signed-off-by: Pavel Moravec ---- - sos/collector/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 6d96d692..f072287e 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -272,7 +272,7 @@ class SoSCollector(SoSComponent): - help="chroot executed commands to SYSROOT") - sos_grp.add_argument('-e', '--enable-plugins', action="extend", - help='Enable specific plugins for sosreport') -- sos_grp.add_argument('-k', '--plugin-options', action="extend", -+ sos_grp.add_argument('-k', '--plugin-option', action="extend", - help='Plugin option as plugname.option=value') - sos_grp.add_argument('--log-size', default=0, type=int, - help='Limit the size of individual logs (in MiB)') --- -2.31.1 - -From 24a79ae8df8f29276f6139c68d4ba9b05114f951 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 2 Jul 2021 09:53:47 +0200 -Subject: [PATCH] [options] allow variant option names in config file - -While cmdline allows --plugin-option as well as --plugopts, -it stores the value under `plugopts` key. Therefore parsing -config file ignores --plugin-option. - -Similarly for --name/--label and --profile/--profiles. - -When processing config file, we must unify those potentially duplicit -keys. - -Resolves: #2606 - -Signed-off-by: Pavel Moravec ---- - sos/options.py | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/sos/options.py b/sos/options.py -index 1eda55d6..a014a022 100644 ---- a/sos/options.py -+++ b/sos/options.py -@@ -186,9 +186,18 @@ class SoSOptions(): - if 'verbose' in odict.keys(): - odict['verbosity'] = int(odict.pop('verbose')) - # convert options names -+ # unify some of them if multiple variants of the -+ # cmdoption exist -+ rename_opts = { -+ 'name': 'label', -+ 'plugin_option': 'plugopts', -+ 'profile': 'profiles' -+ } - for key in list(odict): - if '-' in key: - odict[key.replace('-', '_')] = odict.pop(key) -+ if key in rename_opts: -+ odict[rename_opts[key]] = odict.pop(key) - # set the values according to the config file - for key, val in odict.items(): - if isinstance(val, str): --- -2.31.1 - -From c7d3644c0c64e9e5439806250592a55c8e2de26f Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 1 Jul 2021 08:11:15 +0200 -Subject: [PATCH] [report,collect] unify --map-file arguments - -Unify --map[-file] argument among report/collect/clean. - -Resolves: #2602 - -Signed-off-by: Pavel Moravec ---- - sos/cleaner/__init__.py | 2 +- - sos/collector/__init__.py | 2 +- - sos/report/__init__.py | 2 +- - 3 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index 7414b55e0..4c9837826 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -192,7 +192,7 @@ def add_parser_options(cls, parser): - 'file for obfuscation')) - clean_grp.add_argument('--no-update', dest='no_update', default=False, - action='store_true', -- help='Do not update the --map file with new ' -+ help='Do not update the --map-file with new ' - 'mappings from this run') - clean_grp.add_argument('--keep-binary-files', default=False, - action='store_true', -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 7b8cfcf72..6d96d6923 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -427,7 +427,7 @@ def add_parser_options(cls, parser): - cleaner_grp.add_argument('--no-update', action='store_true', - default=False, dest='no_update', - help='Do not update the default cleaner map') -- cleaner_grp.add_argument('--map', dest='map_file', -+ cleaner_grp.add_argument('--map-file', dest='map_file', - default='/etc/sos/cleaner/default_mapping', - help=('Provide a previously generated mapping' - ' file for obfuscation')) -diff --git a/sos/report/__init__.py b/sos/report/__init__.py -index 7ad2d24a4..411c4eb03 100644 ---- a/sos/report/__init__.py -+++ b/sos/report/__init__.py -@@ -341,7 +341,7 @@ def add_parser_options(cls, parser): - cleaner_grp.add_argument('--no-update', action='store_true', - default=False, dest='no_update', - help='Do not update the default cleaner map') -- cleaner_grp.add_argument('--map', dest='map_file', -+ cleaner_grp.add_argument('--map-file', dest='map_file', - default='/etc/sos/cleaner/default_mapping', - help=('Provide a previously generated mapping' - ' file for obfuscation')) -From fd75745e7a5a6c5def8e6d23190227872b9912c3 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 11 Aug 2021 10:48:41 -0400 -Subject: [PATCH] [sosnode] Fix passing of plugin options when using - `--only-plugins` - -Fixes the handling of plugin options passed by `sos collect` to each -node by first aligning the SoSOption name to those of `report` -(`plugopts`), and second re-arranges the handling of plugin options and -preset options passed by the user when also using `--only-plugins` so -that the former are preserved and passed only with the `--only-plugins` -option value. - -Resolves: #2641 - -Signed-off-by: Jake Hunsaker ---- - sos/collector/__init__.py | 5 +++-- - sos/collector/sosnode.py | 34 +++++++++++++++++----------------- - 2 files changed, 20 insertions(+), 19 deletions(-) - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 57ef074e..70b7a69e 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -84,7 +84,7 @@ class SoSCollector(SoSComponent): - 'only_plugins': [], - 'password': False, - 'password_per_node': False, -- 'plugin_options': [], -+ 'plugopts': [], - 'plugin_timeout': None, - 'cmd_timeout': None, - 'preset': '', -@@ -273,7 +273,8 @@ class SoSCollector(SoSComponent): - help="chroot executed commands to SYSROOT") - sos_grp.add_argument('-e', '--enable-plugins', action="extend", - help='Enable specific plugins for sosreport') -- sos_grp.add_argument('-k', '--plugin-option', action="extend", -+ sos_grp.add_argument('-k', '--plugin-option', '--plugopts', -+ action="extend", dest='plugopts', - help='Plugin option as plugname.option=value') - sos_grp.add_argument('--log-size', default=0, type=int, - help='Limit the size of individual logs (in MiB)') -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 426edcba..5d05c297 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -667,10 +667,10 @@ class SosNode(): - - if self.cluster.sos_plugin_options: - for opt in self.cluster.sos_plugin_options: -- if not any(opt in o for o in self.plugin_options): -+ if not any(opt in o for o in self.plugopts): - option = '%s=%s' % (opt, - self.cluster.sos_plugin_options[opt]) -- self.plugin_options.append(option) -+ self.plugopts.append(option) - - # set master-only options - if self.cluster.check_node_is_master(self): -@@ -688,7 +688,7 @@ class SosNode(): - self.only_plugins = list(self.opts.only_plugins) - self.skip_plugins = list(self.opts.skip_plugins) - self.enable_plugins = list(self.opts.enable_plugins) -- self.plugin_options = list(self.opts.plugin_options) -+ self.plugopts = list(self.opts.plugopts) - self.preset = list(self.opts.preset) - - def finalize_sos_cmd(self): -@@ -754,6 +754,20 @@ class SosNode(): - os.path.join(self.host.sos_bin_path, self.sos_bin) - ) - -+ if self.plugopts: -+ opts = [o for o in self.plugopts -+ if self._plugin_exists(o.split('.')[0]) -+ and self._plugin_option_exists(o.split('=')[0])] -+ if opts: -+ sos_opts.append('-k %s' % quote(','.join(o for o in opts))) -+ -+ if self.preset: -+ if self._preset_exists(self.preset): -+ sos_opts.append('--preset=%s' % quote(self.preset)) -+ else: -+ self.log_debug('Requested to enable preset %s but preset does ' -+ 'not exist on node' % self.preset) -+ - if self.only_plugins: - plugs = [o for o in self.only_plugins if self._plugin_exists(o)] - if len(plugs) != len(self.only_plugins): -@@ -792,20 +806,6 @@ class SosNode(): - if enable: - sos_opts.append('--enable-plugins=%s' % quote(enable)) - -- if self.plugin_options: -- opts = [o for o in self.plugin_options -- if self._plugin_exists(o.split('.')[0]) -- and self._plugin_option_exists(o.split('=')[0])] -- if opts: -- sos_opts.append('-k %s' % quote(','.join(o for o in opts))) -- -- if self.preset: -- if self._preset_exists(self.preset): -- sos_opts.append('--preset=%s' % quote(self.preset)) -- else: -- self.log_debug('Requested to enable preset %s but preset does ' -- 'not exist on node' % self.preset) -- - self.sos_cmd = "%s %s" % (sos_cmd, ' '.join(sos_opts)) - self.log_info('Final sos command set to %s' % self.sos_cmd) - self.manifest.add_field('final_sos_command', self.sos_cmd) --- -2.31.1 - diff --git a/SOURCES/sos-bz1925419-all-gluster-files.patch b/SOURCES/sos-bz1925419-all-gluster-files.patch deleted file mode 100644 index ab24429..0000000 --- a/SOURCES/sos-bz1925419-all-gluster-files.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 4fb834ec862228afb276ccbd45aa86c66044ea66 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 15 Mar 2021 09:09:51 +0100 -Subject: [PATCH] [gluster] collect public keys from the right dir - -Collection of glusterfind dir is achieved by /var/lib/gluster -so it doesn't be collected explicitly. - -/var/lib/glusterd/glusterfind/.keys/ subdir is required to be -explicitly collected, as add_copy_spec uses glob.glob() that skips -hidden files. - -Resolves: #2451 - -Signed-off-by: Pavel Moravec -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/gluster.py | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/sos/report/plugins/gluster.py b/sos/report/plugins/gluster.py -index e1a89df2..952cab63 100644 ---- a/sos/report/plugins/gluster.py -+++ b/sos/report/plugins/gluster.py -@@ -76,9 +76,8 @@ class Gluster(Plugin, RedHatPlugin): - "/var/lib/glusterd/", - # collect nfs-ganesha related configuration - "/run/gluster/shared_storage/nfs-ganesha/", -- # collect status files and public ssh keys -- "/var/lib/glusterd/.keys/", -- "/var/lib/glusterd/glusterfind/" -+ # collect public ssh keys (a_s_c skips implicit hidden files) -+ "/var/lib/glusterd/glusterfind/.keys/", - ] + glob.glob('/run/gluster/*tier-dht/*')) - - if not self.get_option("all_logs"): --- -2.26.3 - diff --git a/SOURCES/sos-bz1930181-collect-cleaning-consistency.patch b/SOURCES/sos-bz1930181-collect-cleaning-consistency.patch deleted file mode 100644 index 0ded10a..0000000 --- a/SOURCES/sos-bz1930181-collect-cleaning-consistency.patch +++ /dev/null @@ -1,243 +0,0 @@ -From fc0218638f3e865c4315823e72aef2f46d012d07 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 14 Apr 2021 11:55:03 -0400 -Subject: [PATCH 1/2] [clean] Load maps from all archives before obfuscation - loop - -Previously, maps were being prepped via archives after extraction. This -reduced the amount of file IO being done, but made it so that necessary -obfuscations from later archives in a series would not be obfuscated in -the archives obfuscated before those later archives were extracted. - -Fix this by extracting the map prep files into memory for each archive -to prep the maps before we enter the obfuscation loop entirely. - -Closes: #2490 -Related: RHBZ#1930181 -Resolves: #2492 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 69 +++++++++++++++----------- - sos/cleaner/parsers/username_parser.py | 13 +++-- - 2 files changed, 45 insertions(+), 37 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index b9eb61ef..d10cdc55 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -292,6 +292,7 @@ third party. - - # we have at least one valid target to obfuscate - self.completed_reports = [] -+ self.preload_all_archives_into_maps() - self.obfuscate_report_paths() - - if not self.completed_reports: -@@ -473,6 +474,44 @@ third party. - self.ui_log.info("Exiting on user cancel") - os._exit(130) - -+ def preload_all_archives_into_maps(self): -+ """Before doing the actual obfuscation, if we have multiple archives -+ to obfuscate then we need to preload each of them into the mappings -+ to ensure that node1 is obfuscated in node2 as well as node2 being -+ obfuscated in node1's archive. -+ """ -+ self.log_info("Pre-loading multiple archives into obfuscation maps") -+ for _arc in self.report_paths: -+ is_dir = os.path.isdir(_arc) -+ if is_dir: -+ _arc_name = _arc -+ else: -+ archive = tarfile.open(_arc) -+ _arc_name = _arc.split('/')[-1].split('.tar')[0] -+ # for each parser, load the map_prep_file into memory, and then -+ # send that for obfuscation. We don't actually obfuscate the file -+ # here, do that in the normal archive loop -+ for _parser in self.parsers: -+ if not _parser.prep_map_file: -+ continue -+ _arc_path = os.path.join(_arc_name, _parser.prep_map_file) -+ try: -+ if is_dir: -+ _pfile = open(_arc_path, 'r') -+ content = _pfile.read() -+ else: -+ _pfile = archive.extractfile(_arc_path) -+ content = _pfile.read().decode('utf-8') -+ _pfile.close() -+ if isinstance(_parser, SoSUsernameParser): -+ _parser.load_usernames_into_map(content) -+ for line in content.splitlines(): -+ if isinstance(_parser, SoSHostnameParser): -+ _parser.load_hostname_into_map(line) -+ self.obfuscate_line(line, _parser.prep_map_file) -+ except Exception as err: -+ self.log_debug("Could not prep %s: %s" % (_arc_path, err)) -+ - def obfuscate_report(self, report): - """Individually handle each archive or directory we've discovered by - running through each file therein. -@@ -493,7 +532,6 @@ third party. - start_time = datetime.now() - arc_md.add_field('start_time', start_time) - archive.extract() -- self.prep_maps_from_archive(archive) - archive.report_msg("Beginning obfuscation...") - - file_list = archive.get_file_list() -@@ -542,35 +580,6 @@ third party. - self.ui_log.info("Exception while processing %s: %s" - % (report, err)) - -- def prep_maps_from_archive(self, archive): -- """Open specific files from an archive and try to load those values -- into our mappings before iterating through the entire archive. -- -- Positional arguments: -- -- :param archive SoSObfuscationArchive: An open archive object -- """ -- for parser in self.parsers: -- if not parser.prep_map_file: -- continue -- prep_file = archive.get_file_path(parser.prep_map_file) -- if not prep_file: -- self.log_debug("Could not prepare %s: %s does not exist" -- % (parser.name, parser.prep_map_file), -- caller=archive.archive_name) -- continue -- # this is a bit clunky, but we need to load this particular -- # parser in a different way due to how hostnames are validated for -- # obfuscation -- if isinstance(parser, SoSHostnameParser): -- with open(prep_file, 'r') as host_file: -- hostname = host_file.readline().strip() -- parser.load_hostname_into_map(hostname) -- if isinstance(parser, SoSUsernameParser): -- parser.load_usernames_into_map(prep_file) -- self.obfuscate_file(prep_file, parser.prep_map_file, -- archive.archive_name) -- - def obfuscate_file(self, filename, short_name=None, arc_name=None): - """Obfuscate and individual file, line by line. - -diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py -index 5223c018..2bb6c7f3 100644 ---- a/sos/cleaner/parsers/username_parser.py -+++ b/sos/cleaner/parsers/username_parser.py -@@ -39,16 +39,15 @@ class SoSUsernameParser(SoSCleanerParser): - super(SoSUsernameParser, self).__init__(conf_file) - self.mapping.load_names_from_options(opt_names) - -- def load_usernames_into_map(self, fname): -+ def load_usernames_into_map(self, content): - """Since we don't get the list of usernames from a straight regex for - this parser, we need to override the initial parser prepping here. - """ -- with open(fname, 'r') as lastfile: -- for line in lastfile.read().splitlines()[1:]: -- user = line.split()[0] -- if user in self.skip_list: -- continue -- self.mapping.get(user) -+ for line in content.splitlines()[1:]: -+ user = line.split()[0] -+ if user in self.skip_list: -+ continue -+ self.mapping.get(user) - - def parse_line(self, line): - count = 0 --- -2.26.3 - - -From b713f458bfa92427147de754ea36054bfde53d71 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 14 Apr 2021 12:22:28 -0400 -Subject: [PATCH 2/2] [clean] Remove duplicate file skipping within - obfuscate_line() - -A redundant file skipping check was being executed within -`obfuscate_line()` that would cause subsequent archives being obfuscated -to skip line obfuscation within a file, despite iterating through the -entire file. - -Remove this redundant check, thus allowing proper obfuscation. - -Closes: #2490 -Related: RHBZ#1930181 -Resolves: #2492 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 11 +++-------- - sos/cleaner/obfuscation_archive.py | 2 -- - 2 files changed, 3 insertions(+), 10 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index d10cdc55..bdd24f95 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -508,7 +508,7 @@ third party. - for line in content.splitlines(): - if isinstance(_parser, SoSHostnameParser): - _parser.load_hostname_into_map(line) -- self.obfuscate_line(line, _parser.prep_map_file) -+ self.obfuscate_line(line) - except Exception as err: - self.log_debug("Could not prep %s: %s" % (_arc_path, err)) - -@@ -606,7 +606,7 @@ third party. - if not line.strip(): - continue - try: -- line, count = self.obfuscate_line(line, short_name) -+ line, count = self.obfuscate_line(line) - subs += count - tfile.write(line) - except Exception as err: -@@ -631,7 +631,7 @@ third party. - pass - return string_data - -- def obfuscate_line(self, line, filename): -+ def obfuscate_line(self, line): - """Run a line through each of the obfuscation parsers, keeping a - cumulative total of substitutions done on that particular line. - -@@ -639,16 +639,11 @@ third party. - - :param line str: The raw line as read from the file being - processed -- :param filename str: Filename the line was read from - - Returns the fully obfuscated line and the number of substitutions made - """ - count = 0 - for parser in self.parsers: -- if filename and any([ -- re.match(_s, filename) for _s in parser.skip_files -- ]): -- continue - try: - line, _count = parser.parse_line(line) - count += _count -diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/obfuscation_archive.py -index 84ca30cd..c64ab13b 100644 ---- a/sos/cleaner/obfuscation_archive.py -+++ b/sos/cleaner/obfuscation_archive.py -@@ -219,8 +219,6 @@ class SoSObfuscationArchive(): - :param filename str: Filename relative to the extracted - archive root - """ -- if filename in self.file_sub_list: -- return True - - if not os.path.isfile(self.get_file_path(filename)): - return True --- -2.26.3 - diff --git a/SOURCES/sos-bz1935603-manpages-see-also.patch b/SOURCES/sos-bz1935603-manpages-see-also.patch deleted file mode 100644 index 6486b48..0000000 --- a/SOURCES/sos-bz1935603-manpages-see-also.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 3b439fb64d8d65b0c09aa8452bf0181ec20f8bcf Mon Sep 17 00:00:00 2001 -From: Jose Castillo -Date: Wed, 3 Mar 2021 13:03:16 +0100 -Subject: [PATCH] [man] Multiple fixes in man pages - -This patch fixes references to sosreport, to the -preferred 'sos report'. Also adds "SEE ALSO" consistently -for all man pages, and fixes a MAINTAINER line. - -Resolves: #2432 - -Signed-off-by: Jose Castillo -Signed-off-by: Jake Hunsaker ---- - man/en/sos-clean.1 | 5 +++++ - man/en/sos-collect.1 | 1 + - man/en/sos-report.1 | 22 ++++++++++++++-------- - 3 files changed, 20 insertions(+), 8 deletions(-) - -diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1 -index 0c62ed07..d64a0ec7 100644 ---- a/man/en/sos-clean.1 -+++ b/man/en/sos-clean.1 -@@ -77,6 +77,11 @@ Default: 4 - .TP - .B \-\-no-update - Do not write the mapping file contents to /etc/sos/cleaner/default_mapping -+.SH SEE ALSO -+.BR sos (1) -+.BR sos-report (1) -+.BR sos-collect (1) -+ - .SH MAINTAINER - .nf - Jake Hunsaker -diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 -index d4e5e648..da36542d 100644 ---- a/man/en/sos-collect.1 -+++ b/man/en/sos-collect.1 -@@ -330,6 +330,7 @@ Sosreport option. Override the default compression type. - .SH SEE ALSO - .BR sos (1) - .BR sos-report (1) -+.BR sos-clean (1) - - .SH MAINTAINER - Jake Hunsaker -diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 -index e7fae97b..81005959 100644 ---- a/man/en/sos-report.1 -+++ b/man/en/sos-report.1 -@@ -38,11 +38,12 @@ sosreport \- Collect and package diagnostic and support data - [-h|--help]\fR - - .SH DESCRIPTION --\fBsosreport\fR generates an archive of configuration and diagnostic --information from the running system. The archive may be stored locally --or centrally for recording or tracking purposes or may be sent to --technical support representatives, developers or system administrators --to assist with technical fault-finding and debugging. -+\fBreport\fR is an sos subcommand that generates an archive of -+configuration and diagnostic information from the running system. -+The archive may be stored locally or centrally for recording or -+tracking purposes or may be sent to technical support representatives, -+developers or system administrators to assist with technical -+fault-finding and debugging. - .LP - Sos is modular in design and is able to collect data from a wide - range of subsystems and packages that may be installed. An -@@ -110,8 +111,8 @@ User defined presets are saved under /var/lib/sos/presets as JSON-formatted file - .B \--add-preset ADD_PRESET [options] - Add a preset with name ADD_PRESET that enables [options] when called. - --For example, 'sosreport --add-preset mypreset --log-size=50 -n logs' will enable --a user to run 'sosreport --preset mypreset' that sets the maximum log size to -+For example, 'sos report --add-preset mypreset --log-size=50 -n logs' will enable -+a user to run 'sos report --preset mypreset' that sets the maximum log size to - 50 and disables the logs plugin. - - Note: to set a description for the preset that is displayed with \fB--list-presets\fR, -@@ -343,9 +344,14 @@ been tested for this port or may still be under active development. - .TP - .B \--help - Display usage message. -+.SH SEE ALSO -+.BR sos (1) -+.BR sos-clean (1) -+.BR sos-collect (1) -+ - .SH MAINTAINER - .nf --Bryn M. Reeves -+Jake Hunsaker - .fi - .SH AUTHORS & CONTRIBUTORS - See \fBAUTHORS\fR file in the package documentation. --- -2.26.3 - diff --git a/SOURCES/sos-bz1937298-ds-mask-password-in-ldif.patch b/SOURCES/sos-bz1937298-ds-mask-password-in-ldif.patch deleted file mode 100644 index 48aa77a..0000000 --- a/SOURCES/sos-bz1937298-ds-mask-password-in-ldif.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 153c0154050a111fd7e5bcf4a685f906a1dea737 Mon Sep 17 00:00:00 2001 -From: Jose Castillo -Date: Wed, 10 Mar 2021 15:33:50 +0100 -Subject: [PATCH] [ds] Mask password and encription keys in ldif files - -Both /etc/dirsrv/slapd*/dse.ldif{,.startOK} files contain -sensitive information : -- all the nsSymmetricKey entries : symmetric encryption key -- nsslapd-rootpw : the admin password's hash - -This patch masks these entries in the files we collect. - -Resolves: #2442 - -Signed-off-by: Jose Castillo -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/ds.py | 18 ++++++++++++++++++ - 1 file changed, 18 insertions(+) - -diff --git a/sos/report/plugins/ds.py b/sos/report/plugins/ds.py -index f4d68d6e..d467dc89 100644 ---- a/sos/report/plugins/ds.py -+++ b/sos/report/plugins/ds.py -@@ -74,4 +74,22 @@ class DirectoryServer(Plugin, RedHatPlugin): - - self.add_cmd_output("ls -l /var/lib/dirsrv/slapd-*/db/*") - -+ def postproc(self): -+ # Example for scrubbing rootpw hash -+ # -+ # nsslapd-rootpw: AAAAB3NzaC1yc2EAAAADAQABAAABAQDeXYA3juyPqaUuyfWV2HuIM -+ # v3gebb/5cvx9ehEAFF2yIKvsQN2EJGTV+hBM1DEOB4eyy/H11NqcNwm/2QsagDB3PVwYp -+ # 9VKN3BdhQjlhuoYKhLwgtYUMiGL8AX5g1qxjirIkTRJwjbXkSNuQaXig7wVjmvXnB2o7B -+ # zLtu99DiL1AizfVeZTYA+OVowYKYaXYljVmVKS+g3t29Obaom54ZLpfuoGMmyO64AJrWs -+ # -+ # to -+ # -+ # nsslapd-rootpw:******** -+ -+ regexppass = r"(nsslapd-rootpw(\s)*:(\s)*)(\S+)([\r\n]\s.*)*\n" -+ regexpkey = r"(nsSymmetricKey(\s)*::(\s)*)(\S+)([\r\n]\s.*)*\n" -+ repl = r"\1********\n" -+ self.do_path_regex_sub('/etc/dirsrv/*', regexppass, repl) -+ self.do_path_regex_sub('/etc/dirsrv/*', regexpkey, repl) -+ - # vim: set et ts=4 sw=4 : --- -2.26.3 - diff --git a/SOURCES/sos-bz1937418-add-cmd-timeout.patch b/SOURCES/sos-bz1937418-add-cmd-timeout.patch deleted file mode 100644 index db84839..0000000 --- a/SOURCES/sos-bz1937418-add-cmd-timeout.patch +++ /dev/null @@ -1,315 +0,0 @@ -From 90b6b709e9f4002376b656b155d00d85382f1828 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 29 Mar 2021 16:23:01 +0200 -Subject: [PATCH] [report] add --cmd-timeout option - -Add --cmd-timeout option to configure command timeout. Plugin-specific -option of the same name (i.e. -k logs.cmd-timeout=60) can control the -timeout per plugin. - -Option defaults and global/plugin-specific option preference follows the ---plugin-timeout rules. - -Resolves: #2466 - -Signed-off-by: Pavel Moravec -Signed-off-by: Jake Hunsaker ---- - man/en/sos-report.1 | 18 +++++++++- - sos/collector/__init__.py | 3 ++ - sos/collector/sosnode.py | 5 +++ - sos/options.py | 3 +- - sos/report/__init__.py | 5 ++- - sos/report/plugins/__init__.py | 63 ++++++++++++++++++++++++---------- - 6 files changed, 76 insertions(+), 21 deletions(-) - -diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 -index 81005959..51cf3436 100644 ---- a/man/en/sos-report.1 -+++ b/man/en/sos-report.1 -@@ -17,6 +17,7 @@ sosreport \- Collect and package diagnostic and support data - [--label label] [--case-id id]\fR - [--threads threads]\fR - [--plugin-timeout TIMEOUT]\fR -+ [--cmd-timeout TIMEOUT]\fR - [-s|--sysroot SYSROOT]\fR - [-c|--chroot {auto|always|never}\fR - [--tmp-dir directory]\fR -@@ -247,7 +248,7 @@ Specify a timeout in seconds to allow each plugin to run for. A value of 0 - means no timeout will be set. A value of -1 is used to indicate the default - timeout of 300 seconds. - --Note that this options sets the timeout for all plugins. If you want to set -+Note that this option sets the timeout for all plugins. If you want to set - a timeout for a specific plugin, use the 'timeout' plugin option available to - all plugins - e.g. '-k logs.timeout=600'. - -@@ -255,6 +256,21 @@ The plugin-specific timeout option will override this option. For example, using - \'--plugin-timeout=60 -k logs.timeout=600\' will set a timeout of 600 seconds for - the logs plugin and 60 seconds for all other enabled plugins. - .TP -+.B \--cmd-timeout TIMEOUT -+Specify a timeout limit in seconds for a command execution. Same defaults logic -+from --plugin-timeout applies here. -+ -+This option sets the command timeout for all plugins. If you want to set a cmd -+timeout for a specific plugin, use the 'cmd-timeout' plugin option available to -+all plugins - e.g. '-k logs.cmd-timeout=600'. -+ -+Again, the same plugin/global precedence logic as for --plugin-timeout applies -+here. -+ -+Note that setting --cmd-timeout (or -k logs.cmd-timeout) high should be followed -+by increasing the --plugin-timeout equivalent, otherwise the plugin can easily -+timeout on slow commands execution. -+.TP - .B \--case-id NUMBER - Specify a case identifier to associate with the archive. - Identifiers may include alphanumeric characters, commas and periods ('.'). -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 406c8f35..1ae73508 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -82,6 +82,7 @@ class SoSCollector(SoSComponent): - 'password_per_node': False, - 'plugin_options': [], - 'plugin_timeout': None, -+ 'cmd_timeout': None, - 'preset': '', - 'save_group': '', - 'since': '', -@@ -276,6 +277,8 @@ class SoSCollector(SoSComponent): - help='Do not collect env vars in sosreports') - sos_grp.add_argument('--plugin-timeout', type=int, default=None, - help='Set the global plugin timeout value') -+ sos_grp.add_argument('--cmd-timeout', type=int, default=None, -+ help='Set the global command timeout value') - sos_grp.add_argument('--since', default=None, - help=('Escapes archived files older than date. ' - 'This will also affect --all-logs. ' -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index a1679655..dbbee12e 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -664,6 +664,11 @@ class SosNode(): - '--skip-files=%s' % (quote(self.opts.skip_files)) - ) - -+ if self.check_sos_version('4.2'): -+ if self.opts.cmd_timeout: -+ sos_opts.append('--cmd-timeout=%s' -+ % quote(str(self.opts.cmd_timeout))) -+ - sos_cmd = sos_cmd.replace( - 'sosreport', - os.path.join(self.host.sos_bin_path, self.sos_bin) -diff --git a/sos/options.py b/sos/options.py -index b82a7d36..1eda55d6 100644 ---- a/sos/options.py -+++ b/sos/options.py -@@ -283,7 +283,8 @@ class SoSOptions(): - if name in ("add_preset", "del_preset", "desc", "note"): - return False - # Exception list for options that still need to be reported when 0 -- if name in ['log_size', 'plugin_timeout'] and value == 0: -+ if name in ['log_size', 'plugin_timeout', 'cmd_timeout'] \ -+ and value == 0: - return True - return has_value(name, value) - -diff --git a/sos/report/__init__.py b/sos/report/__init__.py -index 25478ba7..945d0fc1 100644 ---- a/sos/report/__init__.py -+++ b/sos/report/__init__.py -@@ -107,6 +107,7 @@ class SoSReport(SoSComponent): - 'only_plugins': [], - 'preset': 'auto', - 'plugin_timeout': 300, -+ 'cmd_timeout': 300, - 'profiles': [], - 'since': None, - 'verify': False, -@@ -266,6 +267,8 @@ class SoSReport(SoSComponent): - help="A preset identifier", default="auto") - report_grp.add_argument("--plugin-timeout", default=None, - help="set a timeout for all plugins") -+ report_grp.add_argument("--cmd-timeout", default=None, -+ help="set a command timeout for all plugins") - report_grp.add_argument("-p", "--profile", "--profiles", - action="extend", dest="profiles", type=str, - default=[], -@@ -709,7 +712,7 @@ class SoSReport(SoSComponent): - - self.ui_log.info(_("The following plugin options are available:")) - for (plug, plugname, optname, optparm) in self.all_options: -- if optname in ('timeout', 'postproc'): -+ if optname in ('timeout', 'postproc', 'cmd-timeout'): - continue - # format option value based on its type (int or bool) - if type(optparm["enabled"]) == bool: -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 02625eb1..779119af 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -472,6 +472,9 @@ class Plugin(object): - _default_plug_opts = [ - ('timeout', 'Timeout in seconds for plugin. The default value (-1) ' + - 'defers to the general plugin timeout, 300 seconds', 'fast', -1), -+ ('cmd-timeout', 'Timeout in seconds for a command execution. The ' + -+ 'default value (-1) defers to the general cmd timeout, 300 ' + -+ 'seconds', 'fast', -1), - ('postproc', 'Enable post-processing collected plugin data', 'fast', - True) - ] -@@ -532,16 +535,15 @@ class Plugin(object): - self.manifest.add_list('commands', []) - self.manifest.add_list('files', []) - -- @property -- def timeout(self): -- """Returns either the default plugin timeout value, the value as -- provided on the commandline via -k plugin.timeout=value, or the value -- of the global --plugin-timeout option. -+ def timeout_from_options(self, optname, plugoptname, default_timeout): -+ """Returns either the default [plugin|cmd] timeout value, the value as -+ provided on the commandline via -k plugin.[|cmd-]timeout=value, or the -+ value of the global --[plugin|cmd]-timeout option. - """ - _timeout = None - try: -- opt_timeout = self.get_option('plugin_timeout') -- own_timeout = int(self.get_option('timeout')) -+ opt_timeout = self.get_option(optname) -+ own_timeout = int(self.get_option(plugoptname)) - if opt_timeout is None: - _timeout = own_timeout - elif opt_timeout is not None and own_timeout == -1: -@@ -551,10 +553,30 @@ class Plugin(object): - else: - return None - except ValueError: -- return self.plugin_timeout # Default to known safe value -+ return default_timeout # Default to known safe value - if _timeout is not None and _timeout > -1: - return _timeout -- return self.plugin_timeout -+ return default_timeout -+ -+ @property -+ def timeout(self): -+ """Returns either the default plugin timeout value, the value as -+ provided on the commandline via -k plugin.timeout=value, or the value -+ of the global --plugin-timeout option. -+ """ -+ _timeout = self.timeout_from_options('plugin_timeout', 'timeout', -+ self.plugin_timeout) -+ return _timeout -+ -+ @property -+ def cmdtimeout(self): -+ """Returns either the default command timeout value, the value as -+ provided on the commandline via -k plugin.cmd-timeout=value, or the -+ value of the global --cmd-timeout option. -+ """ -+ _cmdtimeout = self.timeout_from_options('cmd_timeout', 'cmd-timeout', -+ self.cmd_timeout) -+ return _cmdtimeout - - def set_timeout_hit(self): - self._timeout_hit = True -@@ -1235,8 +1257,8 @@ class Plugin(object): - """ - - global_options = ( -- 'all_logs', 'allow_system_changes', 'log_size', 'plugin_timeout', -- 'since', 'verify' -+ 'all_logs', 'allow_system_changes', 'cmd_timeout', 'log_size', -+ 'plugin_timeout', 'since', 'verify' - ) - - if optionname in global_options: -@@ -1505,7 +1527,7 @@ class Plugin(object): - 'tags': _spec_tags - }) - -- def add_blockdev_cmd(self, cmds, devices='block', timeout=300, -+ def add_blockdev_cmd(self, cmds, devices='block', timeout=None, - sizelimit=None, chroot=True, runat=None, env=None, - binary=False, prepend_path=None, whitelist=[], - blacklist=[], tags=[]): -@@ -1569,7 +1591,7 @@ class Plugin(object): - whitelist=whitelist, blacklist=blacklist, - tags=_dev_tags) - -- def _add_device_cmd(self, cmds, devices, timeout=300, sizelimit=None, -+ def _add_device_cmd(self, cmds, devices, timeout=None, sizelimit=None, - chroot=True, runat=None, env=None, binary=False, - prepend_path=None, whitelist=[], blacklist=[], - tags=[]): -@@ -1627,7 +1649,7 @@ class Plugin(object): - changes=soscmd.changes) - - def add_cmd_output(self, cmds, suggest_filename=None, -- root_symlink=None, timeout=cmd_timeout, stderr=True, -+ root_symlink=None, timeout=None, stderr=True, - chroot=True, runat=None, env=None, binary=False, - sizelimit=None, pred=None, subdir=None, - changes=False, foreground=False, tags=[]): -@@ -1849,7 +1871,7 @@ class Plugin(object): - self._log_debug("added string ...'%s' as '%s'" % (summary, filename)) - - def _collect_cmd_output(self, cmd, suggest_filename=None, -- root_symlink=False, timeout=cmd_timeout, -+ root_symlink=False, timeout=None, - stderr=True, chroot=True, runat=None, env=None, - binary=False, sizelimit=None, subdir=None, - changes=False, foreground=False, tags=[]): -@@ -1883,6 +1905,8 @@ class Plugin(object): - if self._timeout_hit: - return - -+ if timeout is None: -+ timeout = self.cmdtimeout - _tags = [] - - if isinstance(tags, str): -@@ -1975,7 +1999,7 @@ class Plugin(object): - return result - - def collect_cmd_output(self, cmd, suggest_filename=None, -- root_symlink=False, timeout=cmd_timeout, -+ root_symlink=False, timeout=None, - stderr=True, chroot=True, runat=None, env=None, - binary=False, sizelimit=None, pred=None, - subdir=None, tags=[]): -@@ -2044,7 +2068,7 @@ class Plugin(object): - tags=tags - ) - -- def exec_cmd(self, cmd, timeout=cmd_timeout, stderr=True, chroot=True, -+ def exec_cmd(self, cmd, timeout=None, stderr=True, chroot=True, - runat=None, env=None, binary=False, pred=None, - foreground=False, container=False, quotecmd=False): - """Execute a command right now and return the output and status, but -@@ -2095,6 +2119,9 @@ class Plugin(object): - if not self.test_predicate(cmd=True, pred=pred): - return _default - -+ if timeout is None: -+ timeout = self.cmdtimeout -+ - if chroot or self.commons['cmdlineopts'].chroot == 'always': - root = self.sysroot - else: -@@ -2331,7 +2358,7 @@ class Plugin(object): - - def add_journal(self, units=None, boot=None, since=None, until=None, - lines=None, allfields=False, output=None, -- timeout=cmd_timeout, identifier=None, catalog=None, -+ timeout=None, identifier=None, catalog=None, - sizelimit=None, pred=None, tags=[]): - """Collect journald logs from one of more units. - --- -2.26.3 - diff --git a/SOURCES/sos-bz1939963-gather-cups-browsed-logs.patch b/SOURCES/sos-bz1939963-gather-cups-browsed-logs.patch deleted file mode 100644 index 3e6c393..0000000 --- a/SOURCES/sos-bz1939963-gather-cups-browsed-logs.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0d56e43299009ffa91f665d85b5a08ba76da9c1f Mon Sep 17 00:00:00 2001 -From: Jose Castillo -Date: Wed, 17 Mar 2021 13:10:36 +0100 -Subject: [PATCH] [cups] Add gathering cups-browsed logs - -Gather logs from the service cups-browsed sent -to the journal. - -Resolves: #2452 - -Signed-off-by: Jose Castillo -Signed-off-by: Bryan Quigley ---- - sos/report/plugins/cups.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/sos/report/plugins/cups.py b/sos/report/plugins/cups.py -index 29a903e8..ab7b6b70 100644 ---- a/sos/report/plugins/cups.py -+++ b/sos/report/plugins/cups.py -@@ -40,5 +40,6 @@ class Cups(Plugin, IndependentPlugin): - ]) - - self.add_journal(units="cups") -+ self.add_journal(units="cups-browsed") - - # vim: set et ts=4 sw=4 : --- -2.26.3 - diff --git a/SOURCES/sos-bz1940502-sssd-memcache-and-logs.patch b/SOURCES/sos-bz1940502-sssd-memcache-and-logs.patch deleted file mode 100644 index ebc7578..0000000 --- a/SOURCES/sos-bz1940502-sssd-memcache-and-logs.patch +++ /dev/null @@ -1,62 +0,0 @@ -From d03c2fa4439c87783293c922b2825cf86e8818bd Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pawe=C5=82=20Po=C5=82awski?= -Date: Fri, 12 Mar 2021 12:42:30 +0100 -Subject: [PATCH] [sssd] Enable collecting SSSD memory cache -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -SSSD plugin by default collects only logs and configuration. -This patch enables collecting memory cache maintained -by SSSD daemon. Cache does not contain any client sensible -data so can be safely included in the sos-report. - -Resolves: #2444 - -Signed-off-by: Paweł Poławski -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/sssd.py | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/sos/report/plugins/sssd.py b/sos/report/plugins/sssd.py -index 9469c41c..aeb68c4f 100644 ---- a/sos/report/plugins/sssd.py -+++ b/sos/report/plugins/sssd.py -@@ -10,6 +10,7 @@ - - from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin, - UbuntuPlugin, SoSPredicate) -+from glob import glob - - - class Sssd(Plugin): -@@ -22,11 +23,22 @@ class Sssd(Plugin): - - def setup(self): - self.add_copy_spec([ -+ # main config file - "/etc/sssd/sssd.conf", -- "/var/log/sssd/*", -- "/var/lib/sss/pubconf/krb5.include.d/*", - # SSSD 1.14 -- "/etc/sssd/conf.d/*.conf" -+ "/etc/sssd/conf.d/*.conf", -+ # dynamic Kerberos configuration -+ "/var/lib/sss/pubconf/krb5.include.d/*" -+ ]) -+ -+ # add individual log files -+ self.add_copy_spec(glob("/var/log/sssd/*log*")) -+ -+ # add memory cache -+ self.add_copy_spec([ -+ "/var/lib/sss/mc/passwd", -+ "/var/lib/sss/mc/group", -+ "/var/lib/sss/mc/initgroups" - ]) - - # call sssctl commands only when sssd service is running, --- -2.26.3 - diff --git a/SOURCES/sos-bz1942276-ibmvNIC-dynamic-debugs.patch b/SOURCES/sos-bz1942276-ibmvNIC-dynamic-debugs.patch deleted file mode 100644 index 7bb7fd7..0000000 --- a/SOURCES/sos-bz1942276-ibmvNIC-dynamic-debugs.patch +++ /dev/null @@ -1,29 +0,0 @@ -From dddabb07a88d398ed7b8a878e95acfd968af6698 Mon Sep 17 00:00:00 2001 -From: Mamatha Inamdar -Date: Tue, 23 Mar 2021 17:58:30 +0530 -Subject: [PATCH] This patch is to update kernel plugin to collect - dynamic_debug log files for ibmvNIC - -Resolves: #2458 - -Signed-off-by: Mamatha Inamdar -Signed-off-by: Bryan Quigley ---- - sos/report/plugins/kernel.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py -index febe2ad0..dd7b6939 100644 ---- a/sos/report/plugins/kernel.py -+++ b/sos/report/plugins/kernel.py -@@ -106,6 +106,7 @@ class Kernel(Plugin, IndependentPlugin): - "/proc/misc", - "/var/log/dmesg", - "/sys/fs/pstore", -+ "/sys/kernel/debug/dynamic_debug/control", - clocksource_path + "available_clocksource", - clocksource_path + "current_clocksource" - ]) --- -2.26.3 - diff --git a/SOURCES/sos-bz1956673-pulpcore-plugin.patch b/SOURCES/sos-bz1956673-pulpcore-plugin.patch deleted file mode 100644 index e60a494..0000000 --- a/SOURCES/sos-bz1956673-pulpcore-plugin.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 808d9f35ac504a58c337ffed14b39119a591808f Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 27 Apr 2021 22:16:08 +0200 -Subject: [PATCH] [pulpcore] add plugin for pulp-3 - -Pulp-3 / pulpcore as a revolution from pulp-2 needs a separate -plugin, since both plugins have nothing in common and there might -be deployments where is active both pulp-2 and pulp-3. - -Resolves: #2278 - -Signed-off-by: Pavel Moravec -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/pulpcore.py | 120 +++++++++++++++++++++++++++++++++ - 1 file changed, 120 insertions(+) - create mode 100644 sos/report/plugins/pulpcore.py - -diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py -new file mode 100644 -index 00000000..20403814 ---- /dev/null -+++ b/sos/report/plugins/pulpcore.py -@@ -0,0 +1,120 @@ -+# Copyright (C) 2021 Red Hat, Inc., Pavel Moravec -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.report.plugins import Plugin, IndependentPlugin -+from pipes import quote -+from re import match -+ -+ -+class PulpCore(Plugin, IndependentPlugin): -+ -+ short_desc = 'Pulp-3 aka pulpcore' -+ -+ plugin_name = "pulpcore" -+ commands = ("pulpcore-manager",) -+ files = ("/etc/pulp/settings.py",) -+ option_list = [ -+ ('task-days', 'days of tasks history', 'fast', 7) -+ ] -+ -+ def parse_settings_config(self): -+ databases_scope = False -+ self.dbhost = "localhost" -+ self.dbport = 5432 -+ self.dbpasswd = "" -+ # TODO: read also redis config (we dont expect much customisations) -+ # TODO: read also db user (pulp) and database name (pulpcore) -+ self.staticroot = "/var/lib/pulp/assets" -+ self.uploaddir = "/var/lib/pulp/media/upload" -+ -+ def separate_value(line, sep=':'): -+ # an auxiliary method to parse values from lines like: -+ # 'HOST': 'localhost', -+ val = line.split(sep)[1].lstrip().rstrip(',') -+ if (val.startswith('"') and val.endswith('"')) or \ -+ (val.startswith('\'') and val.endswith('\'')): -+ val = val[1:-1] -+ return val -+ -+ try: -+ for line in open("/etc/pulp/settings.py").read().splitlines(): -+ # skip empty lines and lines with comments -+ if not line or line[0] == '#': -+ continue -+ if line.startswith("DATABASES"): -+ databases_scope = True -+ continue -+ # example HOST line to parse: -+ # 'HOST': 'localhost', -+ if databases_scope and match(r"\s+'HOST'\s*:\s+\S+", line): -+ self.dbhost = separate_value(line) -+ if databases_scope and match(r"\s+'PORT'\s*:\s+\S+", line): -+ self.dbport = separate_value(line) -+ if databases_scope and match(r"\s+'PASSWORD'\s*:\s+\S+", line): -+ self.dbpasswd = separate_value(line) -+ # if line contains closing '}' database_scope end -+ if databases_scope and '}' in line: -+ databases_scope = False -+ if line.startswith("STATIC_ROOT = "): -+ self.staticroot = separate_value(line, sep='=') -+ if line.startswith("CHUNKED_UPLOAD_DIR = "): -+ self.uploaddir = separate_value(line, sep='=') -+ except IOError: -+ # fallback when the cfg file is not accessible -+ pass -+ # set the password to os.environ when calling psql commands to prevent -+ # printing it in sos logs -+ # we can't set os.environ directly now: other plugins can overwrite it -+ self.env = {"PGPASSWORD": self.dbpasswd} -+ -+ def setup(self): -+ self.parse_settings_config() -+ -+ self.add_copy_spec("/etc/pulp/settings.py") -+ -+ self.add_cmd_output("rq info -u redis://localhost:6379/8", -+ env={"LC_ALL": "en_US.UTF-8"}, -+ suggest_filename="rq_info") -+ self.add_cmd_output("curl -ks https://localhost/pulp/api/v3/status/", -+ suggest_filename="pulp_status") -+ dynaconf_env = {"LC_ALL": "en_US.UTF-8", -+ "PULP_SETTINGS": "/etc/pulp/settings.py", -+ "DJANGO_SETTINGS_MODULE": "pulpcore.app.settings"} -+ self.add_cmd_output("dynaconf list", env=dynaconf_env) -+ for _dir in [self.staticroot, self.uploaddir]: -+ self.add_cmd_output("ls -l %s" % _dir) -+ -+ task_days = self.get_option('task-days') -+ for table in ['core_task', 'core_taskgroup', -+ 'core_reservedresourcerecord', -+ 'core_taskreservedresourcerecord', -+ 'core_groupprogressreport', 'core_progressreport']: -+ _query = "select * from %s where pulp_last_updated > NOW() - " \ -+ "interval '%s days' order by pulp_last_updated" % \ -+ (table, task_days) -+ _cmd = "psql -h %s -p %s -U pulp -d pulpcore -c %s" % \ -+ (self.dbhost, self.dbport, quote(_query)) -+ self.add_cmd_output(_cmd, env=self.env, suggest_filename=table) -+ -+ def postproc(self): -+ # TODO obfuscate from /etc/pulp/settings.py : -+ # SECRET_KEY = "eKfeDkTnvss7p5WFqYdGPWxXfHnsbDBx" -+ # 'PASSWORD': 'tGrag2DmtLqKLTWTQ6U68f6MAhbqZVQj', -+ self.do_path_regex_sub( -+ "/etc/pulp/settings.py", -+ r"(SECRET_KEY\s*=\s*)(.*)", -+ r"\1********") -+ self.do_path_regex_sub( -+ "/etc/pulp/settings.py", -+ r"(PASSWORD\S*\s*:\s*)(.*)", -+ r"\1********") -+ -+ -+# vim: set et ts=4 sw=4 : --- -2.26.3 - diff --git a/SOURCES/sos-bz1959413-saphana-traceback.patch b/SOURCES/sos-bz1959413-saphana-traceback.patch deleted file mode 100644 index 4b784dc..0000000 --- a/SOURCES/sos-bz1959413-saphana-traceback.patch +++ /dev/null @@ -1,30 +0,0 @@ -From c998ea8c1c950586f91fc9728ee66590740968a5 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 11 May 2021 15:59:40 +0200 -Subject: [PATCH] [saphana] remove redundant unused argument of get_inst_info - -get_inst_info does not use and isnt called with 'prefix' argument - -Resolves: #2535 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/saphana.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/report/plugins/saphana.py b/sos/report/plugins/saphana.py -index 82c497b4..00e84b59 100644 ---- a/sos/report/plugins/saphana.py -+++ b/sos/report/plugins/saphana.py -@@ -51,7 +51,7 @@ class saphana(Plugin, RedHatPlugin): - inst = inst.strip()[-2:] - self.get_inst_info(sid, sidadm, inst) - -- def get_inst_info(self, prefix, sid, sidadm, inst): -+ def get_inst_info(self, sid, sidadm, inst): - proc_cmd = 'su - %s -c "sapcontrol -nr %s -function GetProcessList"' - status_fname = "%s_%s_status" % (sid, inst) - self.add_cmd_output( --- -2.26.3 - diff --git a/SOURCES/sos-bz1959598-conversions-and-upgrades.patch b/SOURCES/sos-bz1959598-conversions-and-upgrades.patch deleted file mode 100644 index a39f839..0000000 --- a/SOURCES/sos-bz1959598-conversions-and-upgrades.patch +++ /dev/null @@ -1,50 +0,0 @@ -From ee5d9d017b0a1bfeaebee9c21c17e89ef1f909a8 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 26 Jul 2021 13:30:09 +0200 -Subject: [PATCH] [MigrationResults] collect info about conversions and - upgrades - -A new tiny plugin independent on leapp and convert2rhel is proposed. - -It should collect /etc/migration-results with info about RHEL -conversions and upgrades, whenever the file is present. - -Resolves: #2627 -Relevant to: rhbz#1959598 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/migration_results.py | 21 +++++++++++++++++++++ - 1 file changed, 21 insertions(+) - create mode 100644 sos/report/plugins/migration_results.py - -diff --git a/sos/report/plugins/migration_results.py b/sos/report/plugins/migration_results.py -new file mode 100644 -index 00000000..b67480ba ---- /dev/null -+++ b/sos/report/plugins/migration_results.py -@@ -0,0 +1,21 @@ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.report.plugins import Plugin, RedHatPlugin -+ -+ -+class MigrationResults(Plugin, RedHatPlugin): -+ -+ short_desc = 'Information about conversions and upgrades' -+ -+ plugin_name = 'migration_results' -+ profiles = ('system',) -+ -+ files = ('/etc/migration-results',) -+ -+# vim: et ts=4 sw=4 --- -2.31.1 - diff --git a/SOURCES/sos-bz1961229-snapper-plugin-and-allocation-failures.patch b/SOURCES/sos-bz1961229-snapper-plugin-and-allocation-failures.patch deleted file mode 100644 index e33a89e..0000000 --- a/SOURCES/sos-bz1961229-snapper-plugin-and-allocation-failures.patch +++ /dev/null @@ -1,121 +0,0 @@ -From 60105e0705f3483b9a3e8e98dafd6f0e1e277ab7 Mon Sep 17 00:00:00 2001 -From: Mamatha Inamdar -Date: Mon, 19 Apr 2021 16:55:52 +0530 -Subject: [PATCH 1/3] [block]:Patch to update block pluging to collect disk - info - -This patch is to update block plugin to collect -state of sda - -Resolves: #2504 - -Signed-off-by: Mamatha Inamdar ---- - sos/report/plugins/block.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/sos/report/plugins/block.py b/sos/report/plugins/block.py -index f93b3231..c959d667 100644 ---- a/sos/report/plugins/block.py -+++ b/sos/report/plugins/block.py -@@ -38,7 +38,8 @@ class Block(Plugin, IndependentPlugin): - "/run/blkid/blkid.tab", - "/proc/partitions", - "/proc/diskstats", -- "/sys/block/*/queue/" -+ "/sys/block/*/queue/", -+ "/sys/block/sd*/device/state", - ]) - - cmds = [ --- -2.26.3 - - -From c6e0fe5cebd0d9581950db75fa2d234713b7e15a Mon Sep 17 00:00:00 2001 -From: Mamatha Inamdar -Date: Mon, 26 Apr 2021 23:09:19 +0530 -Subject: [PATCH 2/3] [snapper]:Ptach to update snapper plugin to collect - snapper info - -This patch is to Introduce snapper plugin to collect -/usr/lib/snapper/ information to check executable -permission for installation-helper command - -Resolves: #2504 - -Signed-off-by: Mamatha Inamdar ---- - sos/report/plugins/snapper.py | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) - create mode 100644 sos/report/plugins/snapper.py - -diff --git a/sos/report/plugins/snapper.py b/sos/report/plugins/snapper.py -new file mode 100644 -index 00000000..9ef5fec2 ---- /dev/null -+++ b/sos/report/plugins/snapper.py -@@ -0,0 +1,27 @@ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.report.plugins import Plugin, IndependentPlugin -+ -+ -+class Snapper(Plugin, IndependentPlugin): -+ -+ short_desc = 'System snapper' -+ -+ plugin_name = 'snapper' -+ commands = ("snapper",) -+ -+ def setup(self): -+ -+ self.add_cmd_output([ -+ "ls -la /usr/lib/snapper/", -+ "snapper --version", -+ "snapper list" -+ ]) -+ -+# vim: set et ts=4 sw=4 : --- -2.26.3 - - -From 61ff5ce165e654a02fe80b9de5ec8e49ed808ec9 Mon Sep 17 00:00:00 2001 -From: Mamatha Inamdar -Date: Mon, 19 Apr 2021 17:49:08 +0530 -Subject: [PATCH 3/3] [kernel]:Patch to update kernel plugin to collect debug - info - -This patch is to update kernel plugin to collect -page allocation failure info - -Resolves: #2504 - -Signed-off-by: Mamatha Inamdar ---- - sos/report/plugins/kernel.py | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py -index dd7b6939..9d53ca03 100644 ---- a/sos/report/plugins/kernel.py -+++ b/sos/report/plugins/kernel.py -@@ -107,6 +107,8 @@ class Kernel(Plugin, IndependentPlugin): - "/var/log/dmesg", - "/sys/fs/pstore", - "/sys/kernel/debug/dynamic_debug/control", -+ "/sys/kernel/debug/extfrag/unusable_index", -+ "/sys/kernel/debug/extfrag/extfrag_index", - clocksource_path + "available_clocksource", - clocksource_path + "current_clocksource" - ]) --- -2.26.3 - diff --git a/SOURCES/sos-bz1961458-collect-nstat.patch b/SOURCES/sos-bz1961458-collect-nstat.patch deleted file mode 100644 index 75b7d29..0000000 --- a/SOURCES/sos-bz1961458-collect-nstat.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 575ddeddf2f6e1d6a639922f9ccc51c7e46fbe12 Mon Sep 17 00:00:00 2001 -From: Seiichi Ikarashi -Date: Fri, 14 May 2021 09:49:33 +0900 -Subject: [PATCH] [networking] Add nstat command support - -As netstat command is being deprecated, -we need nstat as an alternative to "netstat -s". - -Signed-off-by: Seiichi Ikarashi ---- - sos/report/plugins/networking.py | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py -index 8b4614bb..acfa027f 100644 ---- a/sos/report/plugins/networking.py -+++ b/sos/report/plugins/networking.py -@@ -87,6 +87,7 @@ class Networking(Plugin): - root_symlink="netstat") - - self.add_cmd_output([ -+ "nstat -zas", - "netstat -s", - "netstat %s -agn" % self.ns_wide, - "ip route show table all", -@@ -198,6 +199,7 @@ class Networking(Plugin): - ns_cmd_prefix + "netstat %s -neopa" % self.ns_wide, - ns_cmd_prefix + "netstat -s", - ns_cmd_prefix + "netstat %s -agn" % self.ns_wide, -+ ns_cmd_prefix + "nstat -zas", - ]) - - ss_cmd = ns_cmd_prefix + "ss -peaonmi" --- -2.26.3 - diff --git a/SOURCES/sos-bz1964499-obfuscate-fqdn-from-dnf-log.patch b/SOURCES/sos-bz1964499-obfuscate-fqdn-from-dnf-log.patch deleted file mode 100644 index 07e005b..0000000 --- a/SOURCES/sos-bz1964499-obfuscate-fqdn-from-dnf-log.patch +++ /dev/null @@ -1,78 +0,0 @@ -From b27140a9126ea82efb517d60bf1b8455aaf4f5a6 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 26 Mar 2021 11:12:33 -0400 -Subject: [PATCH] [cleaner] Only skip packaging-based files for the IP parser - -Files primarily containing package information, e.g. `installed-rpms` or -`installed-debs`, were previously being skipped by all parsers. In -reality, we only need to skip these for the IP parser due to the fact -that version numbers often generate a match for IP address regexes. - -This will also fix a problem where if a system was the build host for -certain packages, the hostname would remain in these files as the -hostname parser was previously not checking these files. - -Closes: #2400 -Resolves: #2464 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/obfuscation_archive.py | 10 ---------- - sos/cleaner/parsers/ip_parser.py | 16 ++++++++++++++++ - 2 files changed, 16 insertions(+), 10 deletions(-) - -diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/obfuscation_archive.py -index 981cc05f..84ca30cd 100644 ---- a/sos/cleaner/obfuscation_archive.py -+++ b/sos/cleaner/obfuscation_archive.py -@@ -59,20 +59,10 @@ class SoSObfuscationArchive(): - Returns: list of files and file regexes - """ - return [ -- 'installed-debs', -- 'installed-rpms', -- 'sos_commands/dpkg', -- 'sos_commands/python/pip_list', -- 'sos_commands/rpm', -- 'sos_commands/yum/.*list.*', -- 'sos_commands/snappy/snap_list_--all', -- 'sos_commands/snappy/snap_--version', -- 'sos_commands/vulkan/vulkaninfo', - 'sys/firmware', - 'sys/fs', - 'sys/kernel/debug', - 'sys/module', -- 'var/log/.*dnf.*', - r'.*\.tar$', # TODO: support archive unpacking - # Be explicit with these tar matches to avoid matching commands - r'.*\.tar\.xz', -diff --git a/sos/cleaner/parsers/ip_parser.py b/sos/cleaner/parsers/ip_parser.py -index 3ea7f865..08d1cd05 100644 ---- a/sos/cleaner/parsers/ip_parser.py -+++ b/sos/cleaner/parsers/ip_parser.py -@@ -24,6 +24,22 @@ class SoSIPParser(SoSCleanerParser): - # don't match package versions recorded in journals - r'.*dnf\[.*\]:' - ] -+ -+ skip_files = [ -+ # skip these as version numbers will frequently look like IP addresses -+ # when using regex matching -+ 'installed-debs', -+ 'installed-rpms', -+ 'sos_commands/dpkg', -+ 'sos_commands/python/pip_list', -+ 'sos_commands/rpm', -+ 'sos_commands/yum/.*list.*', -+ 'sos_commands/snappy/snap_list_--all', -+ 'sos_commands/snappy/snap_--version', -+ 'sos_commands/vulkan/vulkaninfo', -+ 'var/log/.*dnf.*' -+ ] -+ - map_file_key = 'ip_map' - prep_map_file = 'sos_commands/networking/ip_-o_addr' - --- -2.26.3 - diff --git a/SOURCES/sos-bz1965001-fix-avc-copystating-proc-sys.patch b/SOURCES/sos-bz1965001-fix-avc-copystating-proc-sys.patch deleted file mode 100644 index 1b62a1a..0000000 --- a/SOURCES/sos-bz1965001-fix-avc-copystating-proc-sys.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 206d65618f20995b168dcc63090d1e6871450e90 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 26 May 2021 15:45:26 +0200 -Subject: [PATCH] [archive] skip copying SELinux context for /proc and /sys - everytime - -A supplement of #1399 fix, now also for adding strings or special -device files. - -Also adding a (vendor) test case for it. - -Resolves: #2560 - -Signed-off-by: Pavel Moravec ---- - sos/archive.py | 35 +++++++++++---------- - tests/vendor_tests/redhat/rhbz1965001.py | 39 ++++++++++++++++++++++++ - 2 files changed, 56 insertions(+), 18 deletions(-) - create mode 100644 tests/vendor_tests/redhat/rhbz1965001.py - -diff --git a/sos/archive.py b/sos/archive.py -index 4dd31d75..b02b2475 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -326,6 +326,20 @@ class FileCacheArchive(Archive): - return None - return dest - -+ def _copy_attributes(self, src, dest): -+ # copy file attributes, skip SELinux xattrs for /sys and /proc -+ try: -+ stat = os.stat(src) -+ if src.startswith("/sys/") or src.startswith("/proc/"): -+ shutil.copymode(src, dest) -+ os.utime(dest, ns=(stat.st_atime_ns, stat.st_mtime_ns)) -+ else: -+ shutil.copystat(src, dest) -+ os.chown(dest, stat.st_uid, stat.st_gid) -+ except Exception as e: -+ self.log_debug("caught '%s' setting attributes of '%s'" -+ % (e, dest)) -+ - def add_file(self, src, dest=None): - with self._path_lock: - if not dest: -@@ -348,18 +362,7 @@ class FileCacheArchive(Archive): - else: - self.log_info("File %s not collected: '%s'" % (src, e)) - -- # copy file attributes, skip SELinux xattrs for /sys and /proc -- try: -- stat = os.stat(src) -- if src.startswith("/sys/") or src.startswith("/proc/"): -- shutil.copymode(src, dest) -- os.utime(dest, ns=(stat.st_atime_ns, stat.st_mtime_ns)) -- else: -- shutil.copystat(src, dest) -- os.chown(dest, stat.st_uid, stat.st_gid) -- except Exception as e: -- self.log_debug("caught '%s' setting attributes of '%s'" -- % (e, dest)) -+ self._copy_attributes(src, dest) - file_name = "'%s'" % src - else: - # Open file case: first rewind the file to obtain -@@ -388,11 +391,7 @@ class FileCacheArchive(Archive): - content = content.decode('utf8', 'ignore') - f.write(content) - if os.path.exists(src): -- try: -- shutil.copystat(src, dest) -- except OSError as e: -- self.log_error("Unable to add '%s' to archive: %s" % -- (dest, e)) -+ self._copy_attributes(src, dest) - self.log_debug("added string at '%s' to FileCacheArchive '%s'" - % (src, self._archive_root)) - -@@ -501,7 +500,7 @@ class FileCacheArchive(Archive): - self.log_info("add_node: %s - mknod '%s'" % (msg, dest)) - return - raise e -- shutil.copystat(path, dest) -+ self._copy_attributes(path, dest) - - def name_max(self): - if 'PC_NAME_MAX' in os.pathconf_names: -diff --git a/tests/vendor_tests/redhat/rhbz1965001.py b/tests/vendor_tests/redhat/rhbz1965001.py -new file mode 100644 -index 00000000..aa16ba81 ---- /dev/null -+++ b/tests/vendor_tests/redhat/rhbz1965001.py -@@ -0,0 +1,39 @@ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+ -+import tempfile -+import shutil -+from sos_tests import StageOneReportTest -+ -+ -+class rhbz1965001(StageOneReportTest): -+ """ -+ Copying /proc/sys/vm/{compact_memory,drop_caches} must ignore SELinux -+ context, otherwise an attempt to set the context to files under some -+ directories like /tmp raises an AVC denial, and an ERROR -+ "Unable to add '...' to archive: [Errno 13] Permission denied: '...' -+ is raise. -+ -+ https://bugzilla.redhat.com/show_bug.cgi?id=1965001 -+ -+ :avocado: enable -+ :avocado: tags=stageone -+ """ -+ -+ sos_cmd = '-o system' -+ # it is crucial to run the test case with --tmp-dir=/tmp/... as that is -+ # (an example of) directory exhibiting the relabel permission deny. -+ # /var/tmp directory allows those relabels. -+ # -+ # the directory shouldn't exist at this moment, otherwise -+ # "check to prevent multiple setUp() runs" in sos_tests.py would fail -+ _tmpdir = '/tmp/rhbz1965001_avocado_test' -+ -+ def test_no_permission_denied(self): -+ self.assertSosLogNotContains("Permission denied") --- -2.26.3 - diff --git a/SOURCES/sos-bz1967613-sssd-common.patch b/SOURCES/sos-bz1967613-sssd-common.patch deleted file mode 100644 index 9937972..0000000 --- a/SOURCES/sos-bz1967613-sssd-common.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 630dfbee936050698d33b59abd1e243c44e50af8 Mon Sep 17 00:00:00 2001 -From: Jan Jansky -Date: Thu, 3 Jun 2021 15:04:57 +0200 -Subject: [PATCH] [sssd] sssd plugin when sssd-common - -We have reports that sssd logs are not -collected, when we investigated -we found associate wants to collect -sssd related logs also when only -sssd-common package is installed. - -We got this confirmed by sbr-idm. - -Resolves: #2571 - -Signed-off-by: Jan Jansky ---- - sos/report/plugins/sssd.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/report/plugins/sssd.py b/sos/report/plugins/sssd.py -index 17933935..6f98e90c 100644 ---- a/sos/report/plugins/sssd.py -+++ b/sos/report/plugins/sssd.py -@@ -19,7 +19,7 @@ class Sssd(Plugin): - - plugin_name = "sssd" - profiles = ('services', 'security', 'identity') -- packages = ('sssd',) -+ packages = ('sssd', 'sssd-common') - - def setup(self): - self.add_copy_spec([ --- -2.26.3 - diff --git a/SOURCES/sos-bz1973675-ocp-cluster-cleaner.patch b/SOURCES/sos-bz1973675-ocp-cluster-cleaner.patch deleted file mode 100644 index 205152f..0000000 --- a/SOURCES/sos-bz1973675-ocp-cluster-cleaner.patch +++ /dev/null @@ -1,2156 +0,0 @@ -From 29afda6e4ff90385d34bc61315542e7cb4baaf8d Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 9 Apr 2021 11:32:14 -0400 -Subject: [PATCH] [cleaner] Do not break iteration of parse_string_for_keys on - first match - -Previously, `parse_string_for_keys()`, called by `obfuscate_string()` -for non-regex based obfuscations, would return on the first match in the -string found for each parser. - -Instead, continue iterating over all items in each parser's dataset -before returning the (now fully) obfuscated string. - -Resolves: #2480 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/parsers/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py -index dd0451df..c77300aa 100644 ---- a/sos/cleaner/parsers/__init__.py -+++ b/sos/cleaner/parsers/__init__.py -@@ -104,7 +104,7 @@ class SoSCleanerParser(): - """ - for key, val in self.mapping.dataset.items(): - if key in string_data: -- return string_data.replace(key, val) -+ string_data = string_data.replace(key, val) - return string_data - - def get_map_contents(self): --- -2.26.3 - -From 52e6b2ae17e128f17a84ee83b7718c2901bcd5bd Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 12 May 2021 12:39:48 -0400 -Subject: [PATCH] [collect] Add options to provide registry auth for pulling - images - -Adds options that allow a user to specify registry authentication, -either via username/password or an authfile, to allow pulling an image -that exists on a non-public registry. - -If a username/password is provided, that will be used. If not, we will -attempt to use an authfile - either provided by the user or by a cluster -profile. - -Also adds an option to forcibly pull a new(er) version of the specified -image, to alleviate conditions where a too-old version of the image -already exists on the host. - -Closes: #2534 - -Signed-off-by: Jake Hunsaker ---- - man/en/sos-collect.1 | 30 +++++++++++++++++++++++ - sos/collector/__init__.py | 17 +++++++++++++ - sos/collector/sosnode.py | 40 +++++++++++++++++++++++++++---- - sos/policies/distros/__init__.py | 16 ++++++++++++- - sos/policies/distros/redhat.py | 25 ++++++++++++------- - sos/policies/runtimes/__init__.py | 25 +++++++++++++++++++ - 6 files changed, 140 insertions(+), 13 deletions(-) - -diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 -index 286bfe71..cdbc3257 100644 ---- a/man/en/sos-collect.1 -+++ b/man/en/sos-collect.1 -@@ -26,6 +26,11 @@ sos collect \- Collect sosreports from multiple (cluster) nodes - [\-\-no\-pkg\-check] - [\-\-no\-local] - [\-\-master MASTER] -+ [\-\-image IMAGE] -+ [\-\-force-pull-image] -+ [\-\-registry-user USER] -+ [\-\-registry-password PASSWORD] -+ [\-\-registry-authfile FILE] - [\-o ONLY_PLUGINS] - [\-p SSH_PORT] - [\-\-password] -@@ -245,6 +250,31 @@ Specify a master node for the cluster. - If provided, then sos collect will check the master node, not localhost, for determining - the type of cluster in use. - .TP -+\fB\-\-image IMAGE\fR -+Specify an image to use for the temporary container created for collections on -+containerized host, if you do not want to use the default image specifed by the -+host's policy. Note that this should include the registry. -+.TP -+\fB\-\-force-pull-image\fR -+Use this option to force the container runtime to pull the specified image (even -+if it is the policy default image) even if the image already exists on the host. -+This may be useful to update an older container image on containerized hosts. -+.TP -+\fB\-\-registry-user USER\fR -+Specify the username to authenticate to the registry with in order to pull the container -+image -+.TP -+\fB\-\-registry-password PASSWORD\fR -+Specify the password to authenticate to the registry with in order to pull the container -+image. If no password is required, leave this blank. -+.TP -+\fB\-\-registry-authfile FILE\fR -+Specify the filename to use for providing authentication credentials to the registry -+to pull the container image. -+ -+Note that this file must exist on the node(s) performing the pull operations, not the -+node from which \fBsos collect\fR was run. -+.TP - \fB\-o\fR ONLY_PLUGINS, \fB\-\-only\-plugins\fR ONLY_PLUGINS - Sosreport option. Run ONLY the plugins listed. - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 1c742cf5..0624caad 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -63,6 +63,7 @@ class SoSCollector(SoSComponent): - 'encrypt_pass': '', - 'group': None, - 'image': '', -+ 'force_pull_image': False, - 'jobs': 4, - 'keywords': [], - 'keyword_file': None, -@@ -84,6 +85,9 @@ class SoSCollector(SoSComponent): - 'plugin_timeout': None, - 'cmd_timeout': None, - 'preset': '', -+ 'registry_user': None, -+ 'registry_password': None, -+ 'registry_authfile': None, - 'save_group': '', - 'since': '', - 'skip_commands': [], -@@ -319,6 +323,19 @@ class SoSCollector(SoSComponent): - collect_grp.add_argument('--image', - help=('Specify the container image to use for' - ' containerized hosts.')) -+ collect_grp.add_argument('--force-pull-image', '--pull', default=False, -+ action='store_true', -+ help='Force pull the container image even if ' -+ 'it already exists on the host') -+ collect_grp.add_argument('--registry-user', default=None, -+ help='Username to authenticate to the ' -+ 'registry with for pulling an image') -+ collect_grp.add_argument('--registry-password', default=None, -+ help='Password to authenticate to the ' -+ 'registry with for pulling an image') -+ collect_grp.add_argument('--registry-authfile', default=None, -+ help='Use this authfile to provide registry ' -+ 'authentication when pulling an image') - collect_grp.add_argument('-i', '--ssh-key', help='Specify an ssh key') - collect_grp.add_argument('-j', '--jobs', default=4, type=int, - help='Number of concurrent nodes to collect') -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 48693342..d1c11824 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -134,9 +134,27 @@ class SosNode(): - """If the host is containerized, create the container we'll be using - """ - if self.host.containerized: -- res = self.run_command(self.host.create_sos_container(), -- need_root=True) -- if res['status'] in [0, 125]: # 125 means container exists -+ cmd = self.host.create_sos_container( -+ image=self.opts.image, -+ auth=self.get_container_auth(), -+ force_pull=self.opts.force_pull_image -+ ) -+ res = self.run_command(cmd, need_root=True) -+ if res['status'] in [0, 125]: -+ if res['status'] == 125: -+ if 'unable to retrieve auth token' in res['stdout']: -+ self.log_error( -+ "Could not pull image. Provide either a username " -+ "and password or authfile" -+ ) -+ raise Exception -+ elif 'unknown: Not found' in res['stdout']: -+ self.log_error('Specified image not found on registry') -+ raise Exception -+ # 'name exists' with code 125 means the container was -+ # created successfully, so ignore it. -+ # initial creations leads to an exited container, restarting it -+ # here will keep it alive for us to exec through - ret = self.run_command(self.host.restart_sos_container(), - need_root=True) - if ret['status'] == 0: -@@ -152,6 +170,20 @@ class SosNode(): - % res['stdout']) - raise Exception - -+ def get_container_auth(self): -+ """Determine what the auth string should be to pull the image used to -+ deploy our temporary container -+ """ -+ if self.opts.registry_user: -+ return self.host.runtimes['default'].fmt_registry_credentials( -+ self.opts.registry_user, -+ self.opts.registry_password -+ ) -+ else: -+ return self.host.runtimes['default'].fmt_registry_authfile( -+ self.opts.registry_authfile or self.host.container_authfile -+ ) -+ - def file_exists(self, fname): - """Checks for the presence of fname on the remote node""" - if not self.local: -@@ -343,7 +375,7 @@ class SosNode(): - % self.commons['policy'].distro) - return self.commons['policy'] - host = load(cache={}, sysroot=self.opts.sysroot, init=InitSystem(), -- probe_runtime=False, remote_exec=self.ssh_cmd, -+ probe_runtime=True, remote_exec=self.ssh_cmd, - remote_check=self.read_file('/etc/os-release')) - if host: - self.log_info("loaded policy %s for host" % host.distro) -diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py -index 9fe31513..f5b9fd5b 100644 ---- a/sos/policies/distros/__init__.py -+++ b/sos/policies/distros/__init__.py -@@ -62,6 +62,7 @@ class LinuxPolicy(Policy): - sos_bin_path = '/usr/bin' - sos_container_name = 'sos-collector-tmp' - container_version_command = None -+ container_authfile = None - - def __init__(self, sysroot=None, init=None, probe_runtime=True): - super(LinuxPolicy, self).__init__(sysroot=sysroot, -@@ -626,13 +627,26 @@ class LinuxPolicy(Policy): - """ - return '' - -- def create_sos_container(self): -+ def create_sos_container(self, image=None, auth=None, force_pull=False): - """Returns the command that will create the container that will be - used for running commands inside a container on hosts that require it. - - This will use the container runtime defined for the host type to - launch a container. From there, we use the defined runtime to exec into - the container's namespace. -+ -+ :param image: The name of the image if not using the policy default -+ :type image: ``str`` or ``None`` -+ -+ :param auth: The auth string required by the runtime to pull an -+ image from the registry -+ :type auth: ``str`` or ``None`` -+ -+ :param force_pull: Should the runtime forcibly pull the image -+ :type force_pull: ``bool`` -+ -+ :returns: The command to execute to launch the temp container -+ :rtype: ``str`` - """ - return '' - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index 241d3f13..20afbcc4 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -452,15 +452,19 @@ support representative. - - return self.find_preset(ATOMIC) - -- def create_sos_container(self): -+ def create_sos_container(self, image=None, auth=None, force_pull=False): - _cmd = ("{runtime} run -di --name {name} --privileged --ipc=host" - " --net=host --pid=host -e HOST=/host -e NAME={name} -e " -- "IMAGE={image} -v /run:/run -v /var/log:/var/log -v " -+ "IMAGE={image} {pull} -v /run:/run -v /var/log:/var/log -v " - "/etc/machine-id:/etc/machine-id -v " -- "/etc/localtime:/etc/localtime -v /:/host {image}") -+ "/etc/localtime:/etc/localtime -v /:/host {auth} {image}") -+ _image = image or self.container_image -+ _pull = '--pull=always' if force_pull else '' - return _cmd.format(runtime=self.container_runtime, - name=self.sos_container_name, -- image=self.container_image) -+ image=_image, -+ pull=_pull, -+ auth=auth or '') - - def set_cleanup_cmd(self): - return 'docker rm --force sos-collector-tmp' -@@ -482,6 +486,7 @@ support representative. - container_image = 'registry.redhat.io/rhel8/support-tools' - sos_path_strip = '/host' - container_version_command = 'rpm -q sos' -+ container_authfile = '/var/lib/kubelet/config.json' - - def __init__(self, sysroot=None, init=None, probe_runtime=True, - remote_exec=None): -@@ -511,15 +516,19 @@ support representative. - # RH OCP environments. - return self.find_preset(RHOCP) - -- def create_sos_container(self): -+ def create_sos_container(self, image=None, auth=None, force_pull=False): - _cmd = ("{runtime} run -di --name {name} --privileged --ipc=host" - " --net=host --pid=host -e HOST=/host -e NAME={name} -e " -- "IMAGE={image} -v /run:/run -v /var/log:/var/log -v " -+ "IMAGE={image} {pull} -v /run:/run -v /var/log:/var/log -v " - "/etc/machine-id:/etc/machine-id -v " -- "/etc/localtime:/etc/localtime -v /:/host {image}") -+ "/etc/localtime:/etc/localtime -v /:/host {auth} {image}") -+ _image = image or self.container_image -+ _pull = '--pull=always' if force_pull else '' - return _cmd.format(runtime=self.container_runtime, - name=self.sos_container_name, -- image=self.container_image) -+ image=_image, -+ pull=_pull, -+ auth=auth or '') - - def set_cleanup_cmd(self): - return 'podman rm --force %s' % self.sos_container_name -diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py -index 1a61b644..f28d6a1d 100644 ---- a/sos/policies/runtimes/__init__.py -+++ b/sos/policies/runtimes/__init__.py -@@ -157,6 +157,31 @@ class ContainerRuntime(): - quoted_cmd = cmd - return "%s %s %s" % (self.run_cmd, container, quoted_cmd) - -+ def fmt_registry_credentials(self, username, password): -+ """Format a string to pass to the 'run' command of the runtime to -+ enable authorization for pulling the image during `sos collect`, if -+ needed using username and optional password creds -+ -+ :param username: The name of the registry user -+ :type username: ``str`` -+ -+ :param password: The password of the registry user -+ :type password: ``str`` or ``None`` -+ -+ :returns: The string to use to enable a run command to pull the image -+ :rtype: ``str`` -+ """ -+ return "--creds=%s%s" % (username, ':' + password if password else '') -+ -+ def fmt_registry_authfile(self, authfile): -+ """Format a string to pass to the 'run' command of the runtime to -+ enable authorization for pulling the image during `sos collect`, if -+ needed using an authfile. -+ """ -+ if authfile: -+ return "--authfile %s" % authfile -+ return '' -+ - def get_logs_command(self, container): - """Get the command string used to dump container logs from the - runtime --- -2.26.3 - -From 3cbbd7df6f0700609eeef3210d7388298b9e0c21 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 12 May 2021 13:26:45 -0400 -Subject: [PATCH] [sosnode] Allow clusters to set options only for master nodes - -Adds a method the `Cluster` that allows a profile to set sos options -specifically for master nodes. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/__init__.py | 21 +++++++++++++++++++++ - sos/collector/sosnode.py | 6 ++++++ - 2 files changed, 27 insertions(+) - -diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py -index 5c002bae..bfa3aad3 100644 ---- a/sos/collector/clusters/__init__.py -+++ b/sos/collector/clusters/__init__.py -@@ -137,6 +137,27 @@ class Cluster(): - """ - self.cluster_ssh_key = key - -+ def set_master_options(self, node): -+ """If there is a need to set specific options in the sos command being -+ run on the cluster's master nodes, override this method in the cluster -+ profile and do that here. -+ -+ :param node: The master node -+ :type node: ``SoSNode`` -+ """ -+ pass -+ -+ def check_node_is_master(self, node): -+ """In the event there are multiple masters, or if the collect command -+ is being run from a system that is technically capable of enumerating -+ nodes but the cluster profiles needs to specify master-specific options -+ for other nodes, override this method in the cluster profile -+ -+ :param node: The node for the cluster to check -+ :type node: ``SoSNode`` -+ """ -+ return node.address == self.master.address -+ - def exec_master_cmd(self, cmd, need_root=False): - """Used to retrieve command output from a (master) node in a cluster - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index d1c11824..62666635 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -647,6 +647,10 @@ class SosNode(): - self.cluster.sos_plugin_options[opt]) - self.opts.plugin_options.append(option) - -+ # set master-only options -+ if self.cluster.check_node_is_master(self): -+ self.cluster.set_master_options(self) -+ - def finalize_sos_cmd(self): - """Use host facts and compare to the cluster type to modify the sos - command if needed""" -@@ -707,6 +711,8 @@ class SosNode(): - os.path.join(self.host.sos_bin_path, self.sos_bin) - ) - -+ self.update_cmd_from_cluster() -+ - if self.opts.only_plugins: - plugs = [o for o in self.opts.only_plugins - if self._plugin_exists(o)] --- -2.26.3 - -From cae9dd79a59107aa92db5f90aed356e093985bd9 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 12 May 2021 16:06:29 -0400 -Subject: [PATCH] [sosnode] Don't fail on sos-less bastion nodes used for node - lists - -If the master node is determined to not have sos installed, that is not -necessarily a fatal error for scenarios where the 'master' node is only -being used to enumerate node lists and is not actually part of the -cluster. This can happen when a user is using a bastion node to -enumerate and connect to the cluster environment, or if the local host -is being used to enumerate nodes via cluster client tooling. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/sosnode.py | 17 ++++++++++++----- - 1 file changed, 12 insertions(+), 5 deletions(-) - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 62666635..7e56483d 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -287,13 +287,20 @@ class SosNode(): - # use the containerized policy's command - pkgs = self.run_command(self.host.container_version_command, - use_container=True, need_root=True) -- ver = pkgs['stdout'].strip().split('-')[1] -- if ver: -- self.sos_info['version'] = ver -- if 'version' in self.sos_info: -+ if pkgs['status'] == 0: -+ ver = pkgs['stdout'].strip().split('-')[1] -+ if ver: -+ self.sos_info['version'] = ver -+ else: -+ self.sos_info['version'] = None -+ if self.sos_info['version']: - self.log_info('sos version is %s' % self.sos_info['version']) - else: -- self.log_error('sos is not installed on this node') -+ if not self.address == self.opts.master: -+ # in the case where the 'master' enumerates nodes but is not -+ # intended for collection (bastions), don't worry about sos not -+ # being present -+ self.log_error('sos is not installed on this node') - self.connected = False - return False - cmd = 'sosreport -l' --- -2.26.3 - -From cc5abe563d855dea9ac25f56de2e493228b48bf7 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 12 May 2021 18:26:09 -0400 -Subject: [PATCH] [sosnode] Mark sos commands as explicitly needing root for - containers - -Fixes an issue where the sos inspection commands were not properly -marked as needing to be run as root (either directly or via sudo) for -containerized hosts, which would lead to incorrect sos command -formatting. - -Mark those commands, and the final container removal command, as -explicitly needing root permissions. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/sosnode.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 7e56483d..1fc03076 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -304,7 +304,7 @@ class SosNode(): - self.connected = False - return False - cmd = 'sosreport -l' -- sosinfo = self.run_command(cmd, use_container=True) -+ sosinfo = self.run_command(cmd, use_container=True, need_root=True) - if sosinfo['status'] == 0: - self._load_sos_plugins(sosinfo['stdout']) - if self.check_sos_version('3.6'): -@@ -312,7 +312,7 @@ class SosNode(): - - def _load_sos_presets(self): - cmd = 'sosreport --list-presets' -- res = self.run_command(cmd, use_container=True) -+ res = self.run_command(cmd, use_container=True, need_root=True) - if res['status'] == 0: - for line in res['stdout'].splitlines(): - if line.strip().startswith('name:'): -@@ -996,7 +996,7 @@ class SosNode(): - self.remove_file(self.sos_path + '.md5') - cleanup = self.host.set_cleanup_cmd() - if cleanup: -- self.run_command(cleanup) -+ self.run_command(cleanup, need_root=True) - - def collect_extra_cmd(self, filenames): - """Collect the file created by a cluster outside of sos""" --- -2.26.3 - -From 55e77ad4c7e90ba14b10c5fdf18b65aa5d6b9cf8 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 12 May 2021 18:55:31 -0400 -Subject: [PATCH] [ocp] Add cluster profile for OCP4 - -Removes the previous OCP cluster profile and replaces it with an updated -one for OCP4 which is entirely separated from the kubernetes profile. - -Resolves: #2544 - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/kubernetes.py | 8 -- - sos/collector/clusters/ocp.py | 109 +++++++++++++++++++++++++++ - 2 files changed, 109 insertions(+), 8 deletions(-) - create mode 100644 sos/collector/clusters/ocp.py - -diff --git a/sos/collector/clusters/kubernetes.py b/sos/collector/clusters/kubernetes.py -index 6a867e31..08fd9554 100644 ---- a/sos/collector/clusters/kubernetes.py -+++ b/sos/collector/clusters/kubernetes.py -@@ -44,11 +44,3 @@ class kubernetes(Cluster): - return nodes - else: - raise Exception('Node enumeration did not return usable output') -- -- --class openshift(kubernetes): -- -- cluster_name = 'OpenShift Container Platform' -- packages = ('atomic-openshift',) -- sos_preset = 'ocp' -- cmd = 'oc' -diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py -new file mode 100644 -index 00000000..283fcfd1 ---- /dev/null -+++ b/sos/collector/clusters/ocp.py -@@ -0,0 +1,109 @@ -+# Copyright Red Hat 2021, Jake Hunsaker -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from pipes import quote -+from sos.collector.clusters import Cluster -+ -+ -+class ocp(Cluster): -+ """OpenShift Container Platform v4""" -+ -+ cluster_name = 'OpenShift Container Platform v4' -+ packages = ('openshift-hyperkube', 'openshift-clients') -+ -+ option_list = [ -+ ('label', '', 'Colon delimited list of labels to select nodes with'), -+ ('role', '', 'Colon delimited list of roles to select nodes with'), -+ ('kubeconfig', '', 'Path to the kubeconfig file') -+ ] -+ -+ def fmt_oc_cmd(self, cmd): -+ """Format the oc command to optionall include the kubeconfig file if -+ one is specified -+ """ -+ if self.get_option('kubeconfig'): -+ return "oc --config %s %s" % (self.get_option('kubeconfig'), cmd) -+ return "oc %s" % cmd -+ -+ def check_enabled(self): -+ if super(ocp, self).check_enabled(): -+ return True -+ _who = self.fmt_oc_cmd('whoami') -+ return self.exec_master_cmd(_who)['status'] == 0 -+ -+ def _build_dict(self, nodelist): -+ """From the output of get_nodes(), construct an easier-to-reference -+ dict of nodes that will be used in determining labels, master status, -+ etc... -+ -+ :param nodelist: The split output of `oc get nodes` -+ :type nodelist: ``list`` -+ -+ :returns: A dict of nodes with `get nodes` columns as keys -+ :rtype: ``dict`` -+ """ -+ nodes = {} -+ if 'NAME' in nodelist[0]: -+ # get the index of the fields -+ statline = nodelist.pop(0).split() -+ idx = {} -+ for state in ['status', 'roles', 'version', 'os-image']: -+ try: -+ idx[state] = statline.index(state.upper()) -+ except Exception: -+ pass -+ for node in nodelist: -+ _node = node.split() -+ nodes[_node[0]] = {} -+ for column in idx: -+ nodes[_node[0]][column] = _node[idx[column]] -+ return nodes -+ -+ def get_nodes(self): -+ nodes = [] -+ self.node_dict = {} -+ cmd = 'get nodes -o wide' -+ if self.get_option('label'): -+ labels = ','.join(self.get_option('label').split(':')) -+ cmd += " -l %s" % quote(labels) -+ res = self.exec_master_cmd(self.fmt_oc_cmd(cmd)) -+ if res['status'] == 0: -+ roles = [r for r in self.get_option('role').split(':')] -+ self.node_dict = self._build_dict(res['stdout'].splitlines()) -+ for node in self.node_dict: -+ if roles: -+ for role in roles: -+ if role in node: -+ nodes.append(node) -+ else: -+ nodes.append(node) -+ else: -+ msg = "'oc' command failed" -+ if 'Missing or incomplete' in res['stdout']: -+ msg = ("'oc' failed due to missing kubeconfig on master node." -+ " Specify one via '-c ocp.kubeconfig='") -+ raise Exception(msg) -+ return nodes -+ -+ def set_node_label(self, node): -+ if node.address not in self.node_dict: -+ return '' -+ for label in ['master', 'worker']: -+ if label in self.node_dict[node.address]['roles']: -+ return label -+ return '' -+ -+ def check_node_is_master(self, sosnode): -+ if sosnode.address not in self.node_dict: -+ return False -+ return 'master' in self.node_dict[sosnode.address]['roles'] -+ -+ def set_master_options(self, node): -+ node.opts.enable_plugins.append('openshift') --- -2.26.3 - -From a3c1caad21160545eda87ea1fde93e972a6fbf88 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 26 May 2021 11:55:24 -0400 -Subject: [PATCH] [cleaner] Don't strip empty lines from substituted files - -Fixes an issue where empty lines would be stripped from files that have -other obfuscations in them. Those empty lines may be important for file -structure and/or readability, so we should instead simply not pass empty -lines to the parsers rather than skipping them wholesale in the flow of -writing obfuscations to a temp file before replacing the source file -with a potentially changed temp file. - -Resolves: #2562 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index bdd24f95..55465b85 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -603,8 +603,6 @@ third party. - tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) - with open(filename, 'r') as fname: - for line in fname: -- if not line.strip(): -- continue - try: - line, count = self.obfuscate_line(line) - subs += count -@@ -642,7 +640,11 @@ third party. - - Returns the fully obfuscated line and the number of substitutions made - """ -+ # don't iterate over blank lines, but still write them to the tempfile -+ # to maintain the same structure when we write a scrubbed file back - count = 0 -+ if not line.strip(): -+ return line, count - for parser in self.parsers: - try: - line, _count = parser.parse_line(line) --- -2.26.3 - -From 892bbd8114703f5a4d23aa77ba5829b7ba59446f Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 5 May 2021 17:02:04 -0400 -Subject: [PATCH] [cleaner] Remove binary files by default - -Binary files generally speaking cannot be obfuscated, and as such we -should remove them from archives being obfuscated by default so that -sensitive data is not mistakenly included in an obfuscated archive. - -This commits adds a new `--keep-binary-files` option that if used will -keep any encountered binary files in the final archive. The default -option of `false` will ensure that encountered binary files are removed. - -The number of removed binary files per archive is reported when -obfuscation is completed for that archive. - -Closes: #2478 -Resolves: #2524 - -Signed-off-by: Jake Hunsaker ---- - man/en/sos-clean.1 | 12 ++++ - sos/cleaner/__init__.py | 21 +++++- - sos/cleaner/obfuscation_archive.py | 67 ++++++++++++++++++-- - sos/collector/__init__.py | 5 ++ - sos/report/__init__.py | 6 ++ - 8 files changed, 167 insertions(+), 7 deletions(-) - -diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1 -index 4856b43b..b77bc63c 100644 ---- a/man/en/sos-clean.1 -+++ b/man/en/sos-clean.1 -@@ -9,6 +9,7 @@ sos clean - Obfuscate sensitive data from one or more sosreports - [\-\-map-file] - [\-\-jobs] - [\-\-no-update] -+ [\-\-keep-binary-files] - - .SH DESCRIPTION - \fBsos clean\fR or \fBsos mask\fR is an sos subcommand used to obfuscate sensitive information from -@@ -77,6 +78,17 @@ Default: 4 - .TP - .B \-\-no-update - Do not write the mapping file contents to /etc/sos/cleaner/default_mapping -+.TP -+.B \-\-keep-binary-files -+Keep unprocessable binary files in the archive, rather than removing them. -+ -+Note that binary files cannot be obfuscated, and thus keeping them in the archive -+may result in otherwise sensitive information being included in the final archive. -+Users should review any archive that keeps binary files in place before sending to -+a third party. -+ -+Default: False (remove encountered binary files) -+ - .SH SEE ALSO - .BR sos (1) - .BR sos-report (1) -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index 55465b85..f88ff8a0 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -47,6 +47,7 @@ class SoSCleaner(SoSComponent): - 'keyword_file': None, - 'map_file': '/etc/sos/cleaner/default_mapping', - 'no_update': False, -+ 'keep_binary_files': False, - 'target': '', - 'usernames': [] - } -@@ -183,6 +184,11 @@ third party. - action='store_true', - help='Do not update the --map file with new ' - 'mappings from this run') -+ clean_grp.add_argument('--keep-binary-files', default=False, -+ action='store_true', -+ dest='keep_binary_files', -+ help='Keep unprocessable binary files in the ' -+ 'archive instead of removing them') - clean_grp.add_argument('--usernames', dest='usernames', default=[], - action='extend', - help='List of usernames to obfuscate') -@@ -467,6 +473,11 @@ third party. - "%s concurrently\n" - % (len(self.report_paths), self.opts.jobs)) - self.ui_log.info(msg) -+ if self.opts.keep_binary_files: -+ self.ui_log.warning( -+ "WARNING: binary files that potentially contain sensitive " -+ "information will NOT be removed from the final archive\n" -+ ) - pool = ThreadPoolExecutor(self.opts.jobs) - pool.map(self.obfuscate_report, self.report_paths, chunksize=1) - pool.shutdown(wait=True) -@@ -539,6 +550,10 @@ third party. - short_name = fname.split(archive.archive_name + '/')[1] - if archive.should_skip_file(short_name): - continue -+ if (not self.opts.keep_binary_files and -+ archive.should_remove_file(short_name)): -+ archive.remove_file(short_name) -+ continue - try: - count = self.obfuscate_file(fname, short_name, - archive.archive_name) -@@ -574,7 +589,11 @@ third party. - arc_md.add_field('files_obfuscated', len(archive.file_sub_list)) - arc_md.add_field('total_substitutions', archive.total_sub_count) - self.completed_reports.append(archive) -- archive.report_msg("Obfuscation completed") -+ rmsg = '' -+ if archive.removed_file_count: -+ rmsg = " [removed %s unprocessable files]" -+ rmsg = rmsg % archive.removed_file_count -+ archive.report_msg("Obfuscation completed%s" % rmsg) - - except Exception as err: - self.ui_log.info("Exception while processing %s: %s" -diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/obfuscation_archive.py -index c64ab13b..76841b51 100644 ---- a/sos/cleaner/obfuscation_archive.py -+++ b/sos/cleaner/obfuscation_archive.py -@@ -28,6 +28,7 @@ class SoSObfuscationArchive(): - - file_sub_list = [] - total_sub_count = 0 -+ removed_file_count = 0 - - def __init__(self, archive_path, tmpdir): - self.archive_path = archive_path -@@ -62,11 +63,7 @@ class SoSObfuscationArchive(): - 'sys/firmware', - 'sys/fs', - 'sys/kernel/debug', -- 'sys/module', -- r'.*\.tar$', # TODO: support archive unpacking -- # Be explicit with these tar matches to avoid matching commands -- r'.*\.tar\.xz', -- '.*.gz' -+ 'sys/module' - ] - - @property -@@ -76,6 +73,17 @@ class SoSObfuscationArchive(): - except Exception: - return False - -+ def remove_file(self, fname): -+ """Remove a file from the archive. This is used when cleaner encounters -+ a binary file, which we cannot reliably obfuscate. -+ """ -+ full_fname = self.get_file_path(fname) -+ # don't call a blank remove() here -+ if full_fname: -+ self.log_info("Removing binary file '%s' from archive" % fname) -+ os.remove(full_fname) -+ self.removed_file_count += 1 -+ - def extract(self): - if self.is_tarfile: - self.report_msg("Extracting...") -@@ -227,3 +235,52 @@ class SoSObfuscationArchive(): - if filename.startswith(_skip) or re.match(_skip, filename): - return True - return False -+ -+ def should_remove_file(self, fname): -+ """Determine if the file should be removed or not, due to an inability -+ to reliably obfuscate that file based on the filename. -+ -+ :param fname: Filename relative to the extracted archive root -+ :type fname: ``str`` -+ -+ :returns: ``True`` if the file cannot be reliably obfuscated -+ :rtype: ``bool`` -+ """ -+ obvious_removes = [ -+ r'.*\.gz', # TODO: support flat gz/xz extraction -+ r'.*\.xz', -+ r'.*\.bzip2', -+ r'.*\.tar\..*', # TODO: support archive unpacking -+ r'.*\.txz$', -+ r'.*\.tgz$', -+ r'.*\.bin', -+ r'.*\.journal', -+ r'.*\~$' -+ ] -+ -+ # if the filename matches, it is obvious we can remove them without -+ # doing the read test -+ for _arc_reg in obvious_removes: -+ if re.match(_arc_reg, fname): -+ return True -+ -+ return self.file_is_binary(fname) -+ -+ def file_is_binary(self, fname): -+ """Determine if the file is a binary file or not. -+ -+ -+ :param fname: Filename relative to the extracted archive root -+ :type fname: ``str`` -+ -+ :returns: ``True`` if file is binary, else ``False`` -+ :rtype: ``bool`` -+ """ -+ with open(self.get_file_path(fname), 'tr') as tfile: -+ try: -+ # when opened as above (tr), reading binary content will raise -+ # an exception -+ tfile.read(1) -+ return False -+ except UnicodeDecodeError: -+ return True -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 9884836c..469db60d 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -67,6 +67,7 @@ class SoSCollector(SoSComponent): - 'jobs': 4, - 'keywords': [], - 'keyword_file': None, -+ 'keep_binary_files': False, - 'label': '', - 'list_options': False, - 'log_size': 0, -@@ -410,6 +411,10 @@ class SoSCollector(SoSComponent): - dest='clean', - default=False, action='store_true', - help='Obfuscate sensistive information') -+ cleaner_grp.add_argument('--keep-binary-files', default=False, -+ action='store_true', dest='keep_binary_files', -+ help='Keep unprocessable binary files in the ' -+ 'archive instead of removing them') - cleaner_grp.add_argument('--domains', dest='domains', default=[], - action='extend', - help='Additional domain names to obfuscate') -diff --git a/sos/report/__init__.py b/sos/report/__init__.py -index d4345409..2cedc76e 100644 ---- a/sos/report/__init__.py -+++ b/sos/report/__init__.py -@@ -82,6 +82,7 @@ class SoSReport(SoSComponent): - 'case_id': '', - 'chroot': 'auto', - 'clean': False, -+ 'keep_binary_files': False, - 'desc': '', - 'domains': [], - 'dry_run': False, -@@ -344,6 +345,11 @@ class SoSReport(SoSComponent): - default='/etc/sos/cleaner/default_mapping', - help=('Provide a previously generated mapping' - ' file for obfuscation')) -+ cleaner_grp.add_argument('--keep-binary-files', default=False, -+ action='store_true', -+ dest='keep_binary_files', -+ help='Keep unprocessable binary files in the ' -+ 'archive instead of removing them') - cleaner_grp.add_argument('--usernames', dest='usernames', default=[], - action='extend', - help='List of usernames to obfuscate') - -From aed0102a1d6ef9a030c9e5349f092b51b9d1f22d Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 11 Jun 2021 23:20:59 -0400 -Subject: [PATCH 01/10] [SoSNode] Allow individually setting node options - -Like we now do for primary nodes, add the ability to individually set -node options via a new `set_node_options()` method for when blanket -setting options across all nodes via the options class attrs is not -sufficient. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/__init__.py | 10 ++++++++++ - sos/collector/sosnode.py | 6 ++++-- - 2 files changed, 14 insertions(+), 2 deletions(-) - -diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py -index 90e62d79..c4da1ab8 100644 ---- a/sos/collector/clusters/__init__.py -+++ b/sos/collector/clusters/__init__.py -@@ -137,6 +137,16 @@ class Cluster(): - """ - self.cluster_ssh_key = key - -+ def set_node_options(self, node): -+ """If there is a need to set specific options on ONLY the non-primary -+ nodes in a collection, override this method in the cluster profile -+ and do that here. -+ -+ :param node: The non-primary node -+ :type node: ``SoSNode`` -+ """ -+ pass -+ - def set_master_options(self, node): - """If there is a need to set specific options in the sos command being - run on the cluster's master nodes, override this method in the cluster -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 1fc03076..7e784aa1 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -657,6 +657,8 @@ class SosNode(): - # set master-only options - if self.cluster.check_node_is_master(self): - self.cluster.set_master_options(self) -+ else: -+ self.cluster.set_node_options(self) - - def finalize_sos_cmd(self): - """Use host facts and compare to the cluster type to modify the sos -@@ -713,13 +715,13 @@ class SosNode(): - sos_opts.append('--cmd-timeout=%s' - % quote(str(self.opts.cmd_timeout))) - -+ self.update_cmd_from_cluster() -+ - sos_cmd = sos_cmd.replace( - 'sosreport', - os.path.join(self.host.sos_bin_path, self.sos_bin) - ) - -- self.update_cmd_from_cluster() -- - if self.opts.only_plugins: - plugs = [o for o in self.opts.only_plugins - if self._plugin_exists(o)] --- -2.26.3 - - -From 96f166699d12704cc7cf73cb8b13278675f68730 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Sat, 12 Jun 2021 00:02:36 -0400 -Subject: [PATCH 02/10] [sosnode] Support passing env vars to `run_command()` - -Updates `run_command()` to support passing new environment variables to -the command being run, for that command alone. This parameter takes a -dict, and if set we will first copy the existing set of env vars on the -node and then update that set of variables using the passed dict. - -Additionally, `execute_sos_command()` will now try to pass a new -`sos_env_vars` dict (default empty) so that clusters may set environment -variables specifically for the sos command being run, without having to -modify the actual sos command being executed. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/sosnode.py | 27 ++++++++++++++++++++++++--- - 1 file changed, 24 insertions(+), 3 deletions(-) - -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 7e784aa1..40472a4e 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -45,6 +45,8 @@ class SosNode(): - self.host = None - self.cluster = None - self.hostname = None -+ self.sos_env_vars = {} -+ self._env_vars = {} - self._password = password or self.opts.password - if not self.opts.nopasswd_sudo and not self.opts.sudo_pw: - self.opts.sudo_pw = self._password -@@ -109,6 +111,21 @@ class SosNode(): - def _fmt_msg(self, msg): - return '{:<{}} : {}'.format(self._hostname, self.hostlen + 1, msg) - -+ @property -+ def env_vars(self): -+ if not self._env_vars: -+ if self.local: -+ self._env_vars = os.environ.copy() -+ else: -+ ret = self.run_command("env --null") -+ if ret['status'] == 0: -+ for ln in ret['output'].split('\x00'): -+ if not ln: -+ continue -+ _val = ln.split('=') -+ self._env_vars[_val[0]] = _val[1] -+ return self._env_vars -+ - def set_node_manifest(self, manifest): - """Set the manifest section that this node will write to - """ -@@ -404,7 +421,7 @@ class SosNode(): - return self.host.package_manager.pkg_by_name(pkg) is not None - - def run_command(self, cmd, timeout=180, get_pty=False, need_root=False, -- force_local=False, use_container=False): -+ force_local=False, use_container=False, env=None): - """Runs a given cmd, either via the SSH session or locally - - Arguments: -@@ -446,7 +463,10 @@ class SosNode(): - else: - if get_pty: - cmd = "/bin/bash -c %s" % quote(cmd) -- res = pexpect.spawn(cmd, encoding='utf-8') -+ if env: -+ _cmd_env = self.env_vars -+ _cmd_env.update(env) -+ res = pexpect.spawn(cmd, encoding='utf-8', env=_cmd_env) - if need_root: - if self.need_sudo: - res.sendline(self.opts.sudo_pw) -@@ -830,7 +850,8 @@ class SosNode(): - res = self.run_command(self.sos_cmd, - timeout=self.opts.timeout, - get_pty=True, need_root=True, -- use_container=True) -+ use_container=True, -+ env=self.sos_env_vars) - if res['status'] == 0: - for line in res['stdout'].splitlines(): - if fnmatch.fnmatch(line, '*sosreport-*tar*'): --- -2.26.3 - - -From a9e1632113406a646bdd7525982b699cf790aedb Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 15 Jun 2021 12:43:27 -0400 -Subject: [PATCH 03/10] [collect|sosnode] Avoiding clobbering sos options - between nodes - -This commit overhauls the function of `finalize_sos_cmd()` in several -ways. - -First, assign the sos report plugin related options directly to private -copies of those values for each node, so that the shared cluster profile -does not clober options between nodes. - -Second, provide a default Lock mechanism for clusters that need to -perform some node-comparison logic when assigning options based on node -role. - -Finally, finalize the sos command for each node _prior_ to the call to -`SoSNode.sosreport()` so that we can be sure that clusters are able to -appropriately compare and assign sos options across nodes before some -nodes have already started and/or finished their own sos report -collections. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/__init__.py | 14 +++++ - sos/collector/clusters/__init__.py | 2 + - sos/collector/sosnode.py | 89 +++++++++++++++++------------- - 3 files changed, 67 insertions(+), 38 deletions(-) - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 469db60d..7b8cfcf7 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -1186,6 +1186,10 @@ this utility or remote systems that it connects to. - "concurrently\n" - % (self.report_num, self.opts.jobs)) - -+ npool = ThreadPoolExecutor(self.opts.jobs) -+ npool.map(self._finalize_sos_cmd, self.client_list, chunksize=1) -+ npool.shutdown(wait=True) -+ - pool = ThreadPoolExecutor(self.opts.jobs) - pool.map(self._collect, self.client_list, chunksize=1) - pool.shutdown(wait=True) -@@ -1217,6 +1221,16 @@ this utility or remote systems that it connects to. - except Exception as err: - self.ui_log.error("Upload attempt failed: %s" % err) - -+ def _finalize_sos_cmd(self, client): -+ """Calls finalize_sos_cmd() on each node so that we have the final -+ command before we thread out the actual execution of sos -+ """ -+ try: -+ client.finalize_sos_cmd() -+ except Exception as err: -+ self.log_error("Could not finalize sos command for %s: %s" -+ % (client.address, err)) -+ - def _collect(self, client): - """Runs sosreport on each node""" - try: -diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py -index c4da1ab8..bb728bc0 100644 ---- a/sos/collector/clusters/__init__.py -+++ b/sos/collector/clusters/__init__.py -@@ -11,6 +11,7 @@ - import logging - - from sos.options import ClusterOption -+from threading import Lock - - - class Cluster(): -@@ -66,6 +67,7 @@ class Cluster(): - if cls.__name__ != 'Cluster': - self.cluster_type.append(cls.__name__) - self.node_list = None -+ self.lock = Lock() - self.soslog = logging.getLogger('sos') - self.ui_log = logging.getLogger('sos_ui') - self.options = [] -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 40472a4e..1c25cc34 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -38,6 +38,7 @@ class SosNode(): - self.address = address.strip() - self.commons = commons - self.opts = commons['cmdlineopts'] -+ self._assign_config_opts() - self.tmpdir = commons['tmpdir'] - self.hostlen = commons['hostlen'] - self.need_sudo = commons['need_sudo'] -@@ -465,8 +466,8 @@ class SosNode(): - cmd = "/bin/bash -c %s" % quote(cmd) - if env: - _cmd_env = self.env_vars -- _cmd_env.update(env) -- res = pexpect.spawn(cmd, encoding='utf-8', env=_cmd_env) -+ env = _cmd_env.update(env) -+ res = pexpect.spawn(cmd, encoding='utf-8', env=env) - if need_root: - if self.need_sudo: - res.sendline(self.opts.sudo_pw) -@@ -484,9 +485,6 @@ class SosNode(): - - def sosreport(self): - """Run a sosreport on the node, then collect it""" -- self.sos_cmd = self.finalize_sos_cmd() -- self.log_info('Final sos command set to %s' % self.sos_cmd) -- self.manifest.add_field('final_sos_command', self.sos_cmd) - try: - path = self.execute_sos_command() - if path: -@@ -656,29 +654,42 @@ class SosNode(): - This will NOT override user supplied options. - """ - if self.cluster.sos_preset: -- if not self.opts.preset: -- self.opts.preset = self.cluster.sos_preset -+ if not self.preset: -+ self.preset = self.cluster.sos_preset - else: - self.log_info('Cluster specified preset %s but user has also ' - 'defined a preset. Using user specification.' - % self.cluster.sos_preset) - if self.cluster.sos_plugins: - for plug in self.cluster.sos_plugins: -- if plug not in self.opts.enable_plugins: -- self.opts.enable_plugins.append(plug) -+ if plug not in self.enable_plugins: -+ self.enable_plugins.append(plug) - - if self.cluster.sos_plugin_options: - for opt in self.cluster.sos_plugin_options: -- if not any(opt in o for o in self.opts.plugin_options): -+ if not any(opt in o for o in self.plugin_options): - option = '%s=%s' % (opt, - self.cluster.sos_plugin_options[opt]) -- self.opts.plugin_options.append(option) -+ self.plugin_options.append(option) - - # set master-only options - if self.cluster.check_node_is_master(self): -- self.cluster.set_master_options(self) -+ with self.cluster.lock: -+ self.cluster.set_master_options(self) - else: -- self.cluster.set_node_options(self) -+ with self.cluster.lock: -+ self.cluster.set_node_options(self) -+ -+ def _assign_config_opts(self): -+ """From the global opts configuration, assign those values locally -+ to this node so that they may be acted on individually. -+ """ -+ # assign these to new, private copies -+ self.only_plugins = list(self.opts.only_plugins) -+ self.skip_plugins = list(self.opts.skip_plugins) -+ self.enable_plugins = list(self.opts.enable_plugins) -+ self.plugin_options = list(self.opts.plugin_options) -+ self.preset = list(self.opts.preset) - - def finalize_sos_cmd(self): - """Use host facts and compare to the cluster type to modify the sos -@@ -742,59 +753,61 @@ class SosNode(): - os.path.join(self.host.sos_bin_path, self.sos_bin) - ) - -- if self.opts.only_plugins: -- plugs = [o for o in self.opts.only_plugins -- if self._plugin_exists(o)] -- if len(plugs) != len(self.opts.only_plugins): -- not_only = list(set(self.opts.only_plugins) - set(plugs)) -+ if self.only_plugins: -+ plugs = [o for o in self.only_plugins if self._plugin_exists(o)] -+ if len(plugs) != len(self.only_plugins): -+ not_only = list(set(self.only_plugins) - set(plugs)) - self.log_debug('Requested plugins %s were requested to be ' - 'enabled but do not exist' % not_only) -- only = self._fmt_sos_opt_list(self.opts.only_plugins) -+ only = self._fmt_sos_opt_list(self.only_plugins) - if only: - sos_opts.append('--only-plugins=%s' % quote(only)) -- return "%s %s" % (sos_cmd, ' '.join(sos_opts)) -+ self.sos_cmd = "%s %s" % (sos_cmd, ' '.join(sos_opts)) -+ self.log_info('Final sos command set to %s' % self.sos_cmd) -+ self.manifest.add_field('final_sos_command', self.sos_cmd) -+ return - -- if self.opts.skip_plugins: -+ if self.skip_plugins: - # only run skip-plugins for plugins that are enabled -- skip = [o for o in self.opts.skip_plugins -- if self._check_enabled(o)] -- if len(skip) != len(self.opts.skip_plugins): -- not_skip = list(set(self.opts.skip_plugins) - set(skip)) -+ skip = [o for o in self.skip_plugins if self._check_enabled(o)] -+ if len(skip) != len(self.skip_plugins): -+ not_skip = list(set(self.skip_plugins) - set(skip)) - self.log_debug('Requested to skip plugins %s, but plugins are ' - 'already not enabled' % not_skip) - skipln = self._fmt_sos_opt_list(skip) - if skipln: - sos_opts.append('--skip-plugins=%s' % quote(skipln)) - -- if self.opts.enable_plugins: -+ if self.enable_plugins: - # only run enable for plugins that are disabled -- opts = [o for o in self.opts.enable_plugins -- if o not in self.opts.skip_plugins -+ opts = [o for o in self.enable_plugins -+ if o not in self.skip_plugins - and self._check_disabled(o) and self._plugin_exists(o)] -- if len(opts) != len(self.opts.enable_plugins): -- not_on = list(set(self.opts.enable_plugins) - set(opts)) -+ if len(opts) != len(self.enable_plugins): -+ not_on = list(set(self.enable_plugins) - set(opts)) - self.log_debug('Requested to enable plugins %s, but plugins ' - 'are already enabled or do not exist' % not_on) - enable = self._fmt_sos_opt_list(opts) - if enable: - sos_opts.append('--enable-plugins=%s' % quote(enable)) - -- if self.opts.plugin_options: -- opts = [o for o in self.opts.plugin_options -+ if self.plugin_options: -+ opts = [o for o in self.plugin_options - if self._plugin_exists(o.split('.')[0]) - and self._plugin_option_exists(o.split('=')[0])] - if opts: - sos_opts.append('-k %s' % quote(','.join(o for o in opts))) - -- if self.opts.preset: -- if self._preset_exists(self.opts.preset): -- sos_opts.append('--preset=%s' % quote(self.opts.preset)) -+ if self.preset: -+ if self._preset_exists(self.preset): -+ sos_opts.append('--preset=%s' % quote(self.preset)) - else: - self.log_debug('Requested to enable preset %s but preset does ' -- 'not exist on node' % self.opts.preset) -+ 'not exist on node' % self.preset) - -- _sos_cmd = "%s %s" % (sos_cmd, ' '.join(sos_opts)) -- return _sos_cmd -+ self.sos_cmd = "%s %s" % (sos_cmd, ' '.join(sos_opts)) -+ self.log_info('Final sos command set to %s' % self.sos_cmd) -+ self.manifest.add_field('final_sos_command', self.sos_cmd) - - def determine_sos_label(self): - """Determine what, if any, label should be added to the sosreport""" --- -2.26.3 - - -From 7e6c078e51143f7064190b316a251ddd8d431495 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 15 Jun 2021 18:38:34 -0400 -Subject: [PATCH 04/10] [cleaner] Improve handling of symlink obfuscation - -Improves handling of symlink obfuscation by only performing the -obfuscaiton on the ultimate target of any symlinks encountered. Now, -when a symlink is encountered, clean will obfuscate the link name and -re-write it in the archive, pointing to the (potentially obfuscated) -target name. - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 65 +++++++++++++++++++++++++++++------------ - 1 file changed, 46 insertions(+), 19 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index abfb684b..b38c8dfc 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -612,28 +612,55 @@ third party. - if not filename: - # the requested file doesn't exist in the archive - return -- self.log_debug("Obfuscating %s" % short_name or filename, -- caller=arc_name) - subs = 0 -- tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) -- with open(filename, 'r') as fname: -- for line in fname: -- try: -- line, count = self.obfuscate_line(line) -- subs += count -- tfile.write(line) -- except Exception as err: -- self.log_debug("Unable to obfuscate %s: %s" -- % (short_name, err), caller=arc_name) -- tfile.seek(0) -- if subs: -- shutil.copy(tfile.name, filename) -- tfile.close() -- _ob_filename = self.obfuscate_string(short_name) -- if _ob_filename != short_name: -+ if not os.path.islink(filename): -+ # don't run the obfuscation on the link, but on the actual file -+ # at some other point. -+ self.log_debug("Obfuscating %s" % short_name or filename, -+ caller=arc_name) -+ tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) -+ with open(filename, 'r') as fname: -+ for line in fname: -+ try: -+ line, count = self.obfuscate_line(line) -+ subs += count -+ tfile.write(line) -+ except Exception as err: -+ self.log_debug("Unable to obfuscate %s: %s" -+ % (short_name, err), caller=arc_name) -+ tfile.seek(0) -+ if subs: -+ shutil.copy(tfile.name, filename) -+ tfile.close() -+ -+ _ob_short_name = self.obfuscate_string(short_name.split('/')[-1]) -+ _ob_filename = short_name.replace(short_name.split('/')[-1], -+ _ob_short_name) -+ _sym_changed = False -+ if os.path.islink(filename): -+ _link = os.readlink(filename) -+ _ob_link = self.obfuscate_string(_link) -+ if _ob_link != _link: -+ _sym_changed = True -+ -+ if (_ob_filename != short_name) or _sym_changed: - arc_path = filename.split(short_name)[0] - _ob_path = os.path.join(arc_path, _ob_filename) -- os.rename(filename, _ob_path) -+ # ensure that any plugin subdirs that contain obfuscated strings -+ # get created with obfuscated counterparts -+ if not os.path.islink(filename): -+ os.rename(filename, _ob_path) -+ else: -+ # generate the obfuscated name of the link target -+ _target_ob = self.obfuscate_string(os.readlink(filename)) -+ # remove the unobfuscated original symlink first, in case the -+ # symlink name hasn't changed but the target has -+ os.remove(filename) -+ # create the newly obfuscated symlink, pointing to the -+ # obfuscated target name, which may not exist just yet, but -+ # when the actual file is obfuscated, will be created -+ os.symlink(_target_ob, _ob_path) -+ - return subs - - def obfuscate_string(self, string_data): --- -2.26.3 - - -From b5d166ac9ff79bc3740c5e66f16d60762f9a0ac0 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 15 Jun 2021 22:56:19 -0400 -Subject: [PATCH 05/10] [cleaner] Iterate over matches with most precise match - first - -When matching strings in parsers to do obfuscation, we should be using -the most precise matches found first, rather than matching in the order -a match is hit. This ensures that we correctly obfuscate an entire -string, rather than potentially only partial substring(s) that exist -within the entire match. - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/parsers/__init__.py | 10 +++++++--- - sos/cleaner/parsers/keyword_parser.py | 2 +- - sos/cleaner/parsers/username_parser.py | 2 +- - 3 files changed, 9 insertions(+), 5 deletions(-) - -diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py -index c77300aa..cfa20b95 100644 ---- a/sos/cleaner/parsers/__init__.py -+++ b/sos/cleaner/parsers/__init__.py -@@ -82,10 +82,12 @@ class SoSCleanerParser(): - for pattern in self.regex_patterns: - matches = [m[0] for m in re.findall(pattern, line, re.I)] - if matches: -+ matches.sort(reverse=True, key=lambda x: len(x)) - count += len(matches) - for match in matches: -- new_match = self.mapping.get(match.strip()) -- line = line.replace(match.strip(), new_match) -+ match = match.strip() -+ new_match = self.mapping.get(match) -+ line = line.replace(match, new_match) - return line, count - - def parse_string_for_keys(self, string_data): -@@ -102,7 +104,9 @@ class SoSCleanerParser(): - :returns: The obfuscated line - :rtype: ``str`` - """ -- for key, val in self.mapping.dataset.items(): -+ for pair in sorted(self.mapping.dataset.items(), reverse=True, -+ key=lambda x: len(x[0])): -+ key, val = pair - if key in string_data: - string_data = string_data.replace(key, val) - return string_data -diff --git a/sos/cleaner/parsers/keyword_parser.py b/sos/cleaner/parsers/keyword_parser.py -index 3dc2b7f0..9134f82d 100644 ---- a/sos/cleaner/parsers/keyword_parser.py -+++ b/sos/cleaner/parsers/keyword_parser.py -@@ -42,7 +42,7 @@ class SoSKeywordParser(SoSCleanerParser): - - def parse_line(self, line): - count = 0 -- for keyword in self.user_keywords: -+ for keyword in sorted(self.user_keywords, reverse=True): - if keyword in line: - line = line.replace(keyword, self.mapping.get(keyword)) - count += 1 -diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py -index 2bb6c7f3..0c3bbac4 100644 ---- a/sos/cleaner/parsers/username_parser.py -+++ b/sos/cleaner/parsers/username_parser.py -@@ -51,7 +51,7 @@ class SoSUsernameParser(SoSCleanerParser): - - def parse_line(self, line): - count = 0 -- for username in self.mapping.dataset.keys(): -+ for username in sorted(self.mapping.dataset.keys(), reverse=True): - if username in line: - count = line.count(username) - line = line.replace(username, self.mapping.get(username)) --- -2.26.3 - - -From 7ed138fcd2ee6ece3e7fbd9e48293b212e0b4e41 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 16 Jun 2021 01:15:45 -0400 -Subject: [PATCH 06/10] [cleaner] Explicitly obfuscate directory names within - archives - -This commits adds a step to `obfuscate_report()` that explicitly walks -through all directories in the archive, and obfuscates the directory -names if necessary. - -Since this uses `obfuscate_string()` for the directory names, a -`skip_keys` list has been added to maps to allow parsers/maps to -specify matched keys (such as short names for the hostname parser) that -should not be considered when obfuscating directory names (e.g. 'www'). - -Closes: #2465 - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 26 ++++++++++++++++++++++++++ - sos/cleaner/mappings/__init__.py | 4 +++- - sos/cleaner/mappings/hostname_map.py | 5 +++++ - sos/cleaner/obfuscation_archive.py | 20 ++++++++++++++++++-- - sos/cleaner/parsers/__init__.py | 2 ++ - 5 files changed, 54 insertions(+), 3 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index b38c8dfc..88d4d0ea 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -562,6 +562,11 @@ third party. - except Exception as err: - self.log_debug("Unable to parse file %s: %s" - % (short_name, err)) -+ try: -+ self.obfuscate_directory_names(archive) -+ except Exception as err: -+ self.log_info("Failed to obfuscate directories: %s" % err, -+ caller=archive.archive_name) - - # if the archive was already a tarball, repack it - method = archive.get_compression() -@@ -663,6 +668,27 @@ third party. - - return subs - -+ def obfuscate_directory_names(self, archive): -+ """For all directories that exist within the archive, obfuscate the -+ directory name if it contains sensitive strings found during execution -+ """ -+ self.log_info("Obfuscating directory names in archive %s" -+ % archive.archive_name) -+ for dirpath in sorted(archive.get_directory_list(), reverse=True): -+ for _name in os.listdir(dirpath): -+ _dirname = os.path.join(dirpath, _name) -+ _arc_dir = _dirname.split(archive.extracted_path)[-1] -+ if os.path.isdir(_dirname): -+ _ob_dirname = self.obfuscate_string(_name) -+ if _ob_dirname != _name: -+ _ob_arc_dir = _arc_dir.rstrip(_name) -+ _ob_arc_dir = os.path.join( -+ archive.extracted_path, -+ _ob_arc_dir.lstrip('/'), -+ _ob_dirname -+ ) -+ os.rename(_dirname, _ob_arc_dir) -+ - def obfuscate_string(self, string_data): - for parser in self.parsers: - try: -diff --git a/sos/cleaner/mappings/__init__.py b/sos/cleaner/mappings/__init__.py -index dd464e5a..5cf5c8b2 100644 ---- a/sos/cleaner/mappings/__init__.py -+++ b/sos/cleaner/mappings/__init__.py -@@ -20,8 +20,10 @@ class SoSMap(): - corresponding SoSMap() object, to allow for easy retrieval of obfuscated - items. - """ -- -+ # used for regex skips in parser.parse_line() - ignore_matches = [] -+ # used for filename obfuscations in parser.parse_string_for_keys() -+ skip_keys = [] - - def __init__(self): - self.dataset = {} -diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py -index e0b7bf1d..c9a44d8d 100644 ---- a/sos/cleaner/mappings/hostname_map.py -+++ b/sos/cleaner/mappings/hostname_map.py -@@ -35,6 +35,11 @@ class SoSHostnameMap(SoSMap): - '^com..*' - ] - -+ skip_keys = [ -+ 'www', -+ 'api' -+ ] -+ - host_count = 0 - domain_count = 0 - _domains = {} -diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/obfuscation_archive.py -index 88f978d9..90188358 100644 ---- a/sos/cleaner/obfuscation_archive.py -+++ b/sos/cleaner/obfuscation_archive.py -@@ -202,10 +202,22 @@ class SoSObfuscationArchive(): - """Return a list of all files within the archive""" - self.file_list = [] - for dirname, dirs, files in os.walk(self.extracted_path): -+ for _dir in dirs: -+ _dirpath = os.path.join(dirname, _dir) -+ # catch dir-level symlinks -+ if os.path.islink(_dirpath) and os.path.isdir(_dirpath): -+ self.file_list.append(_dirpath) - for filename in files: - self.file_list.append(os.path.join(dirname, filename)) - return self.file_list - -+ def get_directory_list(self): -+ """Return a list of all directories within the archive""" -+ dir_list = [] -+ for dirname, dirs, files in os.walk(self.extracted_path): -+ dir_list.append(dirname) -+ return dir_list -+ - def update_sub_count(self, fname, count): - """Called when a file has finished being parsed and used to track - total substitutions made and number of files that had changes made -@@ -230,7 +242,8 @@ class SoSObfuscationArchive(): - archive root - """ - -- if not os.path.isfile(self.get_file_path(filename)): -+ if (not os.path.isfile(self.get_file_path(filename)) and not -+ os.path.islink(self.get_file_path(filename))): - return True - - for _skip in self.skip_list: -@@ -266,7 +279,10 @@ class SoSObfuscationArchive(): - if re.match(_arc_reg, fname): - return True - -- return self.file_is_binary(fname) -+ if os.path.isfile(self.get_file_path(fname)): -+ return self.file_is_binary(fname) -+ # don't fail on dir-level symlinks -+ return False - - def file_is_binary(self, fname): - """Determine if the file is a binary file or not. -diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py -index cfa20b95..84874475 100644 ---- a/sos/cleaner/parsers/__init__.py -+++ b/sos/cleaner/parsers/__init__.py -@@ -107,6 +107,8 @@ class SoSCleanerParser(): - for pair in sorted(self.mapping.dataset.items(), reverse=True, - key=lambda x: len(x[0])): - key, val = pair -+ if key in self.mapping.skip_keys: -+ continue - if key in string_data: - string_data = string_data.replace(key, val) - return string_data --- -2.26.3 - - -From f180150277b706e72f2445287f3d0b6943efa252 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Wed, 16 Jun 2021 02:24:51 -0400 -Subject: [PATCH 07/10] [hostname parser,map] Attempt to detect strings with - FQDN substrings - -This commit updates the hostname parser and associated map to be able to -better detect and obfuscate FQDN substrings within file content and file -names, particularly when the regex patterns failed to match a hostname -that is formatted with '_' characters rather than '.' characters. - -The `get()` method has been updated to alow preserve characters and -certain extensions that are not part of the FQDN, but are brought in by -the regex pattern due to the fact that we need to use word boundary -indicators within the pattern. - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/mappings/hostname_map.py | 59 +++++++++++++++++++++++--- - sos/cleaner/parsers/__init__.py | 3 +- - sos/cleaner/parsers/hostname_parser.py | 30 ++++++++++--- - 3 files changed, 81 insertions(+), 11 deletions(-) - -diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py -index c9a44d8d..d4b2c88e 100644 ---- a/sos/cleaner/mappings/hostname_map.py -+++ b/sos/cleaner/mappings/hostname_map.py -@@ -104,7 +104,7 @@ class SoSHostnameMap(SoSMap): - host = domain.split('.') - if len(host) == 1: - # don't block on host's shortname -- return True -+ return host[0] in self.hosts.keys() - else: - domain = host[0:-1] - for known_domain in self._domains: -@@ -113,12 +113,59 @@ class SoSHostnameMap(SoSMap): - return False - - def get(self, item): -- if item.startswith(('.', '_')): -- item = item.lstrip('._') -- item = item.strip() -+ prefix = '' -+ suffix = '' -+ final = None -+ # The regex pattern match may include a leading and/or trailing '_' -+ # character due to the need to use word boundary matching, so we need -+ # to strip these from the string during processing, but still keep them -+ # in the returned string to not mangle the string replacement in the -+ # context of the file or filename -+ while item.startswith(('.', '_')): -+ prefix += item[0] -+ item = item[1:] -+ while item.endswith(('.', '_')): -+ suffix += item[-1] -+ item = item[0:-1] - if not self.domain_name_in_loaded_domains(item.lower()): - return item -- return super(SoSHostnameMap, self).get(item) -+ if item.endswith(('.yaml', '.yml', '.crt', '.key', '.pem')): -+ ext = '.' + item.split('.')[-1] -+ item = item.replace(ext, '') -+ suffix += ext -+ if item not in self.dataset.keys(): -+ # try to account for use of '-' in names that include hostnames -+ # and don't create new mappings for each of these -+ for _existing in sorted(self.dataset.keys(), reverse=True, -+ key=lambda x: len(x)): -+ _host_substr = False -+ _test = item.split(_existing) -+ _h = _existing.split('.') -+ # avoid considering a full FQDN match as a new match off of -+ # the hostname of an existing match -+ if _h[0] and _h[0] in self.hosts.keys(): -+ _host_substr = True -+ if len(_test) == 1 or not _test[0]: -+ # does not match existing obfuscation -+ continue -+ elif _test[0].endswith('.') and not _host_substr: -+ # new hostname in known domain -+ final = super(SoSHostnameMap, self).get(item) -+ break -+ elif item.split(_test[0]): -+ # string that includes existing FQDN obfuscation substring -+ # so, only obfuscate the FQDN part -+ try: -+ itm = item.split(_test[0])[1] -+ final = _test[0] + super(SoSHostnameMap, self).get(itm) -+ break -+ except Exception: -+ # fallback to still obfuscating the entire item -+ pass -+ -+ if not final: -+ final = super(SoSHostnameMap, self).get(item) -+ return prefix + final + suffix - - def sanitize_item(self, item): - host = item.split('.') -@@ -146,6 +193,8 @@ class SoSHostnameMap(SoSMap): - """Obfuscate the short name of the host with an incremented counter - based on the total number of obfuscated host names - """ -+ if not hostname: -+ return hostname - if hostname not in self.hosts: - ob_host = "host%s" % self.host_count - self.hosts[hostname] = ob_host -diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py -index 84874475..57d2020a 100644 ---- a/sos/cleaner/parsers/__init__.py -+++ b/sos/cleaner/parsers/__init__.py -@@ -87,7 +87,8 @@ class SoSCleanerParser(): - for match in matches: - match = match.strip() - new_match = self.mapping.get(match) -- line = line.replace(match, new_match) -+ if new_match != match: -+ line = line.replace(match, new_match) - return line, count - - def parse_string_for_keys(self, string_data): -diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py -index 9982024b..3de6bb08 100644 ---- a/sos/cleaner/parsers/hostname_parser.py -+++ b/sos/cleaner/parsers/hostname_parser.py -@@ -18,7 +18,7 @@ class SoSHostnameParser(SoSCleanerParser): - map_file_key = 'hostname_map' - prep_map_file = 'sos_commands/host/hostname' - regex_patterns = [ -- r'(((\b|_)[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}\b))' -+ r'(((\b|_)[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}(\b|_)))' - ] - - def __init__(self, conf_file=None, opt_domains=None): -@@ -66,10 +66,30 @@ class SoSHostnameParser(SoSCleanerParser): - """Override the default parse_line() method to also check for the - shortname of the host derived from the hostname. - """ -+ -+ def _check_line(ln, count, search, repl=None): -+ """Perform a second manual check for substrings that may have been -+ missed by regex matching -+ """ -+ if search in self.mapping.skip_keys: -+ return ln, count -+ if search in ln: -+ count += ln.count(search) -+ ln = ln.replace(search, self.mapping.get(repl or search)) -+ return ln, count -+ - count = 0 - line, count = super(SoSHostnameParser, self).parse_line(line) -- for short_name in self.short_names: -- if short_name in line: -- count += 1 -- line = line.replace(short_name, self.mapping.get(short_name)) -+ # make an additional pass checking for '_' formatted substrings that -+ # the regex patterns won't catch -+ hosts = [h for h in self.mapping.dataset.keys() if '.' in h] -+ for host in sorted(hosts, reverse=True, key=lambda x: len(x)): -+ fqdn = host -+ for c in '.-': -+ fqdn = fqdn.replace(c, '_') -+ line, count = _check_line(line, count, fqdn, host) -+ -+ for short_name in sorted(self.short_names, reverse=True): -+ line, count = _check_line(line, count, short_name) -+ - return line, count --- -2.26.3 - - -From ec46e6a8fac58ed757344be3751eb1f925eab981 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Mon, 14 Jun 2021 09:31:07 -0400 -Subject: [PATCH 08/10] [ocp] Refine OCP node options in cluster profile - -Adds explicit setting of primary/node sos options for the `openshift` -plugin within the cluster, rather than relying on default configurations -and best practices to avoid duplicate collections. - -Signed-off-by: Jake Hunsaker ---- - sos/collector/clusters/ocp.py | 65 +++++++++++++++++++++++++++++++++-- - sos/collector/sosnode.py | 4 +-- - 2 files changed, 65 insertions(+), 4 deletions(-) - -diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py -index 283fcfd1..ddff84a4 100644 ---- a/sos/collector/clusters/ocp.py -+++ b/sos/collector/clusters/ocp.py -@@ -8,6 +8,8 @@ - # - # See the LICENSE file in the source distribution for further information. - -+import os -+ - from pipes import quote - from sos.collector.clusters import Cluster - -@@ -18,10 +20,14 @@ class ocp(Cluster): - cluster_name = 'OpenShift Container Platform v4' - packages = ('openshift-hyperkube', 'openshift-clients') - -+ api_collect_enabled = False -+ token = None -+ - option_list = [ - ('label', '', 'Colon delimited list of labels to select nodes with'), - ('role', '', 'Colon delimited list of roles to select nodes with'), -- ('kubeconfig', '', 'Path to the kubeconfig file') -+ ('kubeconfig', '', 'Path to the kubeconfig file'), -+ ('token', '', 'Service account token to use for oc authorization') - ] - - def fmt_oc_cmd(self, cmd): -@@ -32,9 +38,20 @@ class ocp(Cluster): - return "oc --config %s %s" % (self.get_option('kubeconfig'), cmd) - return "oc %s" % cmd - -+ def _attempt_oc_login(self): -+ """Attempt to login to the API using the oc command using a provided -+ token -+ """ -+ _res = self.exec_primary_cmd("oc login --insecure-skip-tls-verify=True" -+ " --token=%s" % self.token) -+ return _res['status'] == 0 -+ - def check_enabled(self): - if super(ocp, self).check_enabled(): - return True -+ self.token = self.get_option('token') or os.getenv('SOSOCPTOKEN', None) -+ if self.token: -+ self._attempt_oc_login() - _who = self.fmt_oc_cmd('whoami') - return self.exec_master_cmd(_who)['status'] == 0 - -@@ -106,4 +123,48 @@ class ocp(Cluster): - return 'master' in self.node_dict[sosnode.address]['roles'] - - def set_master_options(self, node): -- node.opts.enable_plugins.append('openshift') -+ node.enable_plugins.append('openshift') -+ if self.api_collect_enabled: -+ # a primary has already been enabled for API collection, disable -+ # it among others -+ node.plugin_options.append('openshift.no-oc=on') -+ else: -+ _oc_cmd = 'oc' -+ if node.host.containerized: -+ _oc_cmd = '/host/bin/oc' -+ # when run from a container, the oc command does not inherit -+ # the default config, so if it's present then pass it here to -+ # detect a funcitonal oc command. This is sidestepped in sos -+ # report by being able to chroot the `oc` execution which we -+ # cannot do remotely -+ if node.file_exists('/root/.kube/config', need_root=True): -+ _oc_cmd += ' --kubeconfig /host/root/.kube/config' -+ can_oc = node.run_command("%s whoami" % _oc_cmd, -+ use_container=node.host.containerized, -+ # container is available only to root -+ # and if rhel, need to run sos as root -+ # anyways which will run oc as root -+ need_root=True) -+ if can_oc['status'] == 0: -+ # the primary node can already access the API -+ self.api_collect_enabled = True -+ elif self.token: -+ node.sos_env_vars['SOSOCPTOKEN'] = self.token -+ self.api_collect_enabled = True -+ elif self.get_option('kubeconfig'): -+ kc = self.get_option('kubeconfig') -+ if node.file_exists(kc): -+ if node.host.containerized: -+ kc = "/host/%s" % kc -+ node.sos_env_vars['KUBECONFIG'] = kc -+ self.api_collect_enabled = True -+ if self.api_collect_enabled: -+ msg = ("API collections will be performed on %s\nNote: API " -+ "collections may extend runtime by 10s of minutes\n" -+ % node.address) -+ self.soslog.info(msg) -+ self.ui_log.info(msg) -+ -+ def set_node_options(self, node): -+ # don't attempt OC API collections on non-primary nodes -+ node.plugin_options.append('openshift.no-oc=on') -diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py -index 1c25cc34..6597d236 100644 ---- a/sos/collector/sosnode.py -+++ b/sos/collector/sosnode.py -@@ -202,11 +202,11 @@ class SosNode(): - self.opts.registry_authfile or self.host.container_authfile - ) - -- def file_exists(self, fname): -+ def file_exists(self, fname, need_root=False): - """Checks for the presence of fname on the remote node""" - if not self.local: - try: -- res = self.run_command("stat %s" % fname) -+ res = self.run_command("stat %s" % fname, need_root=need_root) - return res['status'] == 0 - except Exception: - return False --- -2.26.3 - - -From eea8e15845a8bcba91b93a5310ba693e8c20ab9c Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 17 Jun 2021 09:52:36 -0400 -Subject: [PATCH 09/10] [cleaner] Don't obfuscate default 'core' user - -The 'core' user is a common default user on containerized hosts, and -obfuscation of it is not advantageous, much like the default 'ubuntu' -user for that distribution. - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/parsers/username_parser.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py -index 0c3bbac4..64843205 100644 ---- a/sos/cleaner/parsers/username_parser.py -+++ b/sos/cleaner/parsers/username_parser.py -@@ -28,6 +28,7 @@ class SoSUsernameParser(SoSCleanerParser): - prep_map_file = 'sos_commands/login/lastlog_-u_1000-60000' - regex_patterns = [] - skip_list = [ -+ 'core', - 'nobody', - 'nfsnobody', - 'root' --- -2.26.3 - - -From 581429ca65131711c96f9d56bf2f0e18779aec2e Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 18 Jun 2021 14:26:55 -0400 -Subject: [PATCH 10/10] [cleaner] Fix checksum and archive pruning from archive - list - -Fixes an issue where checksums may have gotten into the list of archives -to be cleaned, which would cause further issues later. Additionally, -prevents nested sosreports from top-level archives (such as from -`collect`) from being removed for being a binary file when that -top-level archive gets obfuscated. ---- - sos/cleaner/__init__.py | 5 +++-- - sos/cleaner/obfuscation_archive.py | 1 + - 2 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index 88d4d0ea..8280bc50 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -226,8 +226,7 @@ third party. - nested_archives = [] - for _file in archive.getmembers(): - if (re.match('sosreport-.*.tar', _file.name.split('/')[-1]) and not -- (_file.name.endswith('.md5') or -- _file.name.endswith('.sha256'))): -+ (_file.name.endswith(('.md5', '.sha256')))): - nested_archives.append(_file.name.split('/')[-1]) - - if nested_archives: -@@ -235,6 +234,8 @@ third party. - nested_path = self.extract_archive(archive) - for arc_file in os.listdir(nested_path): - if re.match('sosreport.*.tar.*', arc_file): -+ if arc_file.endswith(('.md5', '.sha256')): -+ continue - self.report_paths.append(os.path.join(nested_path, - arc_file)) - # add the toplevel extracted archive -diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/obfuscation_archive.py -index 90188358..e357450b 100644 ---- a/sos/cleaner/obfuscation_archive.py -+++ b/sos/cleaner/obfuscation_archive.py -@@ -58,6 +58,7 @@ class SoSObfuscationArchive(): - Returns: list of files and file regexes - """ - return [ -+ 'sosreport-', - 'sys/firmware', - 'sys/fs', - 'sys/kernel/debug', --- -2.26.3 - diff --git a/SOURCES/sos-bz1985037-cleaner-AD-users-obfuscation.patch b/SOURCES/sos-bz1985037-cleaner-AD-users-obfuscation.patch deleted file mode 100644 index 2e5835a..0000000 --- a/SOURCES/sos-bz1985037-cleaner-AD-users-obfuscation.patch +++ /dev/null @@ -1,142 +0,0 @@ -From 7e471676fe41dab155a939c60446cc7b7dab773b Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 20 Jul 2021 11:09:29 -0400 -Subject: [PATCH] [username parser] Load usernames from `last` for LDAP users - -AD/LDAP users are not reported into `lastlog` generally, however they -are reported in `last`. Conversely, `last` does not report local users -who have not logged in but still exist. - -In order to obfuscate both kinds of users, we need to look at both -sources. - -For this, first allow parsers to specify multiple prep files. Second, -update the username parser to search through all `lastlog` collections -as well as the `last` collection. - -Also includes a small update to the username parser's prep loading logic -to ensure we are iterating over each username discovered only once. - -Signed-off-by: Jake Hunsaker ---- - sos/cleaner/__init__.py | 38 ++++++++++++++------------ - sos/cleaner/parsers/__init__.py | 2 +- - sos/cleaner/parsers/username_parser.py | 24 +++++++++++++--- - 3 files changed, 42 insertions(+), 22 deletions(-) - -diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py -index ca5f93e5..6aadfe79 100644 ---- a/sos/cleaner/__init__.py -+++ b/sos/cleaner/__init__.py -@@ -518,23 +518,27 @@ third party. - for _parser in self.parsers: - if not _parser.prep_map_file: - continue -- _arc_path = os.path.join(_arc_name, _parser.prep_map_file) -- try: -- if is_dir: -- _pfile = open(_arc_path, 'r') -- content = _pfile.read() -- else: -- _pfile = archive.extractfile(_arc_path) -- content = _pfile.read().decode('utf-8') -- _pfile.close() -- if isinstance(_parser, SoSUsernameParser): -- _parser.load_usernames_into_map(content) -- for line in content.splitlines(): -- if isinstance(_parser, SoSHostnameParser): -- _parser.load_hostname_into_map(line) -- self.obfuscate_line(line) -- except Exception as err: -- self.log_debug("Could not prep %s: %s" % (_arc_path, err)) -+ if isinstance(_parser.prep_map_file, str): -+ _parser.prep_map_file = [_parser.prep_map_file] -+ for parse_file in _parser.prep_map_file: -+ _arc_path = os.path.join(_arc_name, parse_file) -+ try: -+ if is_dir: -+ _pfile = open(_arc_path, 'r') -+ content = _pfile.read() -+ else: -+ _pfile = archive.extractfile(_arc_path) -+ content = _pfile.read().decode('utf-8') -+ _pfile.close() -+ if isinstance(_parser, SoSUsernameParser): -+ _parser.load_usernames_into_map(content) -+ for line in content.splitlines(): -+ if isinstance(_parser, SoSHostnameParser): -+ _parser.load_hostname_into_map(line) -+ self.obfuscate_line(line) -+ except Exception as err: -+ self.log_debug("Could not prep %s: %s" -+ % (_arc_path, err)) - - def obfuscate_report(self, report): - """Individually handle each archive or directory we've discovered by -diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py -index 3076db39..af6e375e 100644 ---- a/sos/cleaner/parsers/__init__.py -+++ b/sos/cleaner/parsers/__init__.py -@@ -50,7 +50,7 @@ class SoSCleanerParser(): - skip_line_patterns = [] - skip_files = [] - map_file_key = 'unset' -- prep_map_file = 'unset' -+ prep_map_file = [] - - def __init__(self, conf_file=None): - # attempt to load previous run data into the mapping for the parser -diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py -index 96ce5f0c..b142e371 100644 ---- a/sos/cleaner/parsers/username_parser.py -+++ b/sos/cleaner/parsers/username_parser.py -@@ -25,13 +25,24 @@ class SoSUsernameParser(SoSCleanerParser - - name = 'Username Parser' - map_file_key = 'username_map' -- prep_map_file = 'sos_commands/login/lastlog_-u_1000-60000' -+ prep_map_file = [ -+ 'sos_commands/login/lastlog_-u_1000-60000', -+ 'sos_commands/login/lastlog_-u_60001-65536', -+ 'sos_commands/login/lastlog_-u_65537-4294967295', -+ # AD users will be reported here, but favor the lastlog files since -+ # those will include local users who have not logged in -+ 'sos_commands/login/last' -+ ] - regex_patterns = [] - skip_list = [ - 'core', - 'nobody', - 'nfsnobody', -- 'root' -+ 'shutdown', -+ 'reboot', -+ 'root', -+ 'ubuntu', -+ 'wtmp' - ] - - def __init__(self, conf_file=None, opt_names=None): -@@ -44,11 +54,17 @@ class SoSUsernameParser(SoSCleanerParser): - """Since we don't get the list of usernames from a straight regex for - this parser, we need to override the initial parser prepping here. - """ -+ users = set() - for line in content.splitlines()[1:]: -- user = line.split()[0] -+ try: -+ user = line.split()[0] -+ except Exception: -+ continue - if user in self.skip_list: - continue -- self.mapping.get(user) -+ users.add(user) -+ for each in users: -+ self.mapping.get(each) - - def parse_line(self, line): - count = 0 --- -2.31.1 - diff --git a/SOURCES/sos-bz1985986-potential-issues-static-analyse.patch b/SOURCES/sos-bz1985986-potential-issues-static-analyse.patch deleted file mode 100644 index 0c359e6..0000000 --- a/SOURCES/sos-bz1985986-potential-issues-static-analyse.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 6d5cbe90e17534d53d7fe42dff4d8ca734acf594 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Tue, 29 Jun 2021 15:49:00 -0400 -Subject: [PATCH] [yum] Fix potential traceback when yum history is empty - -Like we did in #969 for `dnf`, fix a potential issue where we would -generate a traceback in the plugin when `yum history` is empty. - -Signed-off-by: Jake Hunsaker ---- - sos/report/plugins/yum.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/report/plugins/yum.py b/sos/report/plugins/yum.py -index 54e222df..aec805e6 100644 ---- a/sos/report/plugins/yum.py -+++ b/sos/report/plugins/yum.py -@@ -91,7 +91,7 @@ class Yum(Plugin, RedHatPlugin): - # packages installed/erased/updated per transaction - if self.get_option("yum-history-info"): - history = self.exec_cmd("yum history") -- transactions = None -+ transactions = -1 - if history['status'] == 0: - for line in history['output'].splitlines(): - try: --- -2.31.1 - -From a7a4ef73faee6cddba36bf670d4a20ab0521c36f Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 30 Jun 2021 13:10:56 +0200 -Subject: [PATCH] [plugins] Set default predicate instead of None for - robustness - -Just making the code more robustness, it could be dangerous to -set pred = None and then potentially call log_skipped_cmd that -expects "pred" of SoSPredicate type. - -Currently such a call flow can not happen, but it is worth to -make the code more robust for potential future changes. - -Resolves: #2601 - -Signed-off-by: Pavel Moravec ---- - sos/report/plugins/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py -index 6fd1a3b2..b9cd28ed 100644 ---- a/sos/report/plugins/__init__.py -+++ b/sos/report/plugins/__init__.py -@@ -1629,7 +1629,7 @@ class Plugin(object): - - def _add_cmd_output(self, **kwargs): - """Internal helper to add a single command to the collection list.""" -- pred = kwargs.pop('pred') if 'pred' in kwargs else None -+ pred = kwargs.pop('pred') if 'pred' in kwargs else SoSPredicate(self) - soscmd = SoSCommand(**kwargs) - self._log_debug("packed command: " + soscmd.__str__()) - for _skip_cmd in self.skip_commands: --- -2.31.1 - diff --git a/SOURCES/sos-bz1998433-opacapture-under-allow-system-changes.patch b/SOURCES/sos-bz1998433-opacapture-under-allow-system-changes.patch new file mode 100644 index 0000000..39f9c8a --- /dev/null +++ b/SOURCES/sos-bz1998433-opacapture-under-allow-system-changes.patch @@ -0,0 +1,49 @@ +From 66ebb8256b1326573cbcb2d134545635dfead3bc Mon Sep 17 00:00:00 2001 +From: Jose Castillo +Date: Sun, 29 Aug 2021 15:35:09 +0200 +Subject: [PATCH] [omnipath_client] Ensure opacapture runs only with + allow-system-changes + +While omnipath_client plugin is collecting "opacapture", +`depmod -a` command is executed to regenerates some files +under /usr/lib/modules/$kernel. + +modules.dep +modules.dep.bin +modules.devname +modules.softdep +modules.symbols +modules.symbols.bin + +This patch ensures that the command is only run when +the option --allow-system-changes is used. + +Fixes: RHBZ#1998433 + +Signed-off-by: Jose Castillo +--- + sos/report/plugins/omnipath_client.py | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/omnipath_client.py b/sos/report/plugins/omnipath_client.py +index 1ec01384..4e988c5c 100644 +--- a/sos/report/plugins/omnipath_client.py ++++ b/sos/report/plugins/omnipath_client.py +@@ -45,7 +45,12 @@ class OmnipathClient(Plugin, RedHatPlugin): + # rather than storing it somewhere under /var/tmp and copying it via + # add_copy_spec, add it directly to sos_commands/ dir by + # building a path argument using self.get_cmd_output_path(). +- self.add_cmd_output("opacapture %s" % join(self.get_cmd_output_path(), +- "opacapture.tgz")) ++ # This command calls 'depmod -a', so lets make sure we ++ # specified the 'allow-system-changes' option before running it. ++ if self.get_option('allow_system_changes'): ++ self.add_cmd_output("opacapture %s" % ++ join(self.get_cmd_output_path(), ++ "opacapture.tgz"), ++ changes=True) + + # vim: set et ts=4 sw=4 : +-- +2.31.1 + diff --git a/SOURCES/sos-bz1998521-unpackaged-recursive-symlink.patch b/SOURCES/sos-bz1998521-unpackaged-recursive-symlink.patch new file mode 100644 index 0000000..35cc89d --- /dev/null +++ b/SOURCES/sos-bz1998521-unpackaged-recursive-symlink.patch @@ -0,0 +1,42 @@ +From e2ca3d02f36c0db4efaacfb2c1b7d502f38e371c Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 30 Aug 2021 10:18:29 +0200 +Subject: [PATCH] [unpackaged] deal with recursive loop of symlinks properly + +When the plugin processes a recursive loop of symlinks, it currently +hangs in an infinite loop trying to follow the symlinks. Use +pathlib.Path.resolve() method to return the target directly. + +Resolves: #2664 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/unpackaged.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py +index e5cc6191..9d68077c 100644 +--- a/sos/report/plugins/unpackaged.py ++++ b/sos/report/plugins/unpackaged.py +@@ -10,6 +10,7 @@ from sos.report.plugins import Plugin, RedHatPlugin + + import os + import stat ++from pathlib import Path + + + class Unpackaged(Plugin, RedHatPlugin): +@@ -41,8 +42,8 @@ class Unpackaged(Plugin, RedHatPlugin): + for name in files: + path = os.path.join(root, name) + try: +- while stat.S_ISLNK(os.lstat(path).st_mode): +- path = os.path.abspath(os.readlink(path)) ++ if stat.S_ISLNK(os.lstat(path).st_mode): ++ path = Path(path).resolve() + except Exception: + continue + file_list.append(os.path.realpath(path)) +-- +2.31.1 + diff --git a/SOURCES/sos-bz2001096-iptables-save-under-nf_tables-kmod.patch b/SOURCES/sos-bz2001096-iptables-save-under-nf_tables-kmod.patch new file mode 100644 index 0000000..e234bc6 --- /dev/null +++ b/SOURCES/sos-bz2001096-iptables-save-under-nf_tables-kmod.patch @@ -0,0 +1,73 @@ +From 7d5157aa5071e3620246e2d4aa80acb2d3ed30f0 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 28 Sep 2021 22:44:52 +0200 +Subject: [PATCH] [networking] prevent iptables-save commands to load nf_tables + kmod + +If iptables has built-in nf_tables kmod, then +'ip netns iptables-save' command requires the kmod which must +be guarded by predicate. + +Analogously for ip6tables. + +Resolves: #2703 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/networking.py | 29 ++++++++++++++++++++++++----- + 1 file changed, 24 insertions(+), 5 deletions(-) + +diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py +index c80ae719..1237f629 100644 +--- a/sos/report/plugins/networking.py ++++ b/sos/report/plugins/networking.py +@@ -182,22 +182,41 @@ class Networking(Plugin): + # per-namespace. + self.add_cmd_output("ip netns") + cmd_prefix = "ip netns exec " +- for namespace in self.get_network_namespaces( +- self.get_option("namespace_pattern"), +- self.get_option("namespaces")): ++ namespaces = self.get_network_namespaces( ++ self.get_option("namespace_pattern"), ++ self.get_option("namespaces")) ++ if (namespaces): ++ # 'ip netns exec iptables-save' must be guarded by nf_tables ++ # kmod, if 'iptables -V' output contains 'nf_tables' ++ # analogously for ip6tables ++ co = {'cmd': 'iptables -V', 'output': 'nf_tables'} ++ co6 = {'cmd': 'ip6tables -V', 'output': 'nf_tables'} ++ iptables_with_nft = (SoSPredicate(self, kmods=['nf_tables']) ++ if self.test_predicate(self, ++ pred=SoSPredicate(self, cmd_outputs=co)) ++ else None) ++ ip6tables_with_nft = (SoSPredicate(self, kmods=['nf_tables']) ++ if self.test_predicate(self, ++ pred=SoSPredicate(self, cmd_outputs=co6)) ++ else None) ++ for namespace in namespaces: + ns_cmd_prefix = cmd_prefix + namespace + " " + self.add_cmd_output([ + ns_cmd_prefix + "ip address show", + ns_cmd_prefix + "ip route show table all", + ns_cmd_prefix + "ip -s -s neigh show", + ns_cmd_prefix + "ip rule list", +- ns_cmd_prefix + "iptables-save", +- ns_cmd_prefix + "ip6tables-save", + ns_cmd_prefix + "netstat %s -neopa" % self.ns_wide, + ns_cmd_prefix + "netstat -s", + ns_cmd_prefix + "netstat %s -agn" % self.ns_wide, + ns_cmd_prefix + "nstat -zas", + ], priority=50) ++ self.add_cmd_output([ns_cmd_prefix + "iptables-save"], ++ pred=iptables_with_nft, ++ priority=50) ++ self.add_cmd_output([ns_cmd_prefix + "ip6tables-save"], ++ pred=ip6tables_with_nft, ++ priority=50) + + ss_cmd = ns_cmd_prefix + "ss -peaonmi" + # --allow-system-changes is handled directly in predicate +-- +2.31.1 + diff --git a/SOURCES/sos-bz2002145-kernel-psi.patch b/SOURCES/sos-bz2002145-kernel-psi.patch new file mode 100644 index 0000000..1a9d5e0 --- /dev/null +++ b/SOURCES/sos-bz2002145-kernel-psi.patch @@ -0,0 +1,33 @@ +From 23e523b6b9784390c7ce2c5af654ab497fb10aaf Mon Sep 17 00:00:00 2001 +From: Jose Castillo +Date: Wed, 8 Sep 2021 09:25:24 +0200 +Subject: [PATCH] [kernel] Capture Pressure Stall Information + +Kernel 4.20 includes PSI metrics for CPU, memeory and IO. +The feature is enabled after adding "psi=1" as +kernel boot parameter. +The information is captured in files +in the directory /proc/pressure. + +Signed-off-by: Jose Castillo +--- + sos/report/plugins/kernel.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py +index 8c5e5e11..803f5e30 100644 +--- a/sos/report/plugins/kernel.py ++++ b/sos/report/plugins/kernel.py +@@ -112,7 +112,8 @@ class Kernel(Plugin, IndependentPlugin): + "/sys/kernel/debug/extfrag/unusable_index", + "/sys/kernel/debug/extfrag/extfrag_index", + clocksource_path + "available_clocksource", +- clocksource_path + "current_clocksource" ++ clocksource_path + "current_clocksource", ++ "/proc/pressure/" + ]) + + if self.get_option("with-timer"): +-- +2.31.1 + diff --git a/SOURCES/sos-bz2004929-openvswitch-offline-analysis.patch b/SOURCES/sos-bz2004929-openvswitch-offline-analysis.patch new file mode 100644 index 0000000..8bd4adb --- /dev/null +++ b/SOURCES/sos-bz2004929-openvswitch-offline-analysis.patch @@ -0,0 +1,151 @@ +From 3f0ec3e55e7dcec89dd7fad10084ea7f16178608 Mon Sep 17 00:00:00 2001 +From: Salvatore Daniele +Date: Tue, 7 Sep 2021 13:48:22 -0400 +Subject: [PATCH 1/2] [openvswitch] add ovs default OpenFlow protocols + +ovs-vsctl list bridge can return an empty 'protocol' column even when +there are OpenFlow protocols in place by default. + +ovs-ofctl --version will return the range of supported ofp and should +also be used to ensure flow information for relevant protocol versions +is collected. + +OpenFlow default versions: +https://docs.openvswitch.org/en/latest/faq/openflow/ + +Signed-off-by: Salvatore Daniele +--- + sos/report/plugins/openvswitch.py | 26 ++++++++++++++++++++++++++ + 1 file changed, 26 insertions(+) + +diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py +index cd897db2..92cc7259 100644 +--- a/sos/report/plugins/openvswitch.py ++++ b/sos/report/plugins/openvswitch.py +@@ -206,6 +206,7 @@ class OpenVSwitch(Plugin): + + # Gather additional output for each OVS bridge on the host. + br_list_result = self.collect_cmd_output("ovs-vsctl -t 5 list-br") ++ ofp_ver_result = self.collect_cmd_output("ovs-ofctl -t 5 --version") + if br_list_result['status'] == 0: + for br in br_list_result['output'].splitlines(): + self.add_cmd_output([ +@@ -232,6 +233,16 @@ class OpenVSwitch(Plugin): + "OpenFlow15" + ] + ++ # Flow protocol hex identifiers ++ ofp_versions = { ++ 0x01: "OpenFlow10", ++ 0x02: "OpenFlow11", ++ 0x03: "OpenFlow12", ++ 0x04: "OpenFlow13", ++ 0x05: "OpenFlow14", ++ 0x06: "OpenFlow15", ++ } ++ + # List protocols currently in use, if any + ovs_list_bridge_cmd = "ovs-vsctl -t 5 list bridge %s" % br + br_info = self.collect_cmd_output(ovs_list_bridge_cmd) +@@ -242,6 +253,21 @@ class OpenVSwitch(Plugin): + br_protos_ln = line[line.find("[")+1:line.find("]")] + br_protos = br_protos_ln.replace('"', '').split(", ") + ++ # If 'list bridge' yeilded no protocols, use the range of ++ # protocols enabled by default on this version of ovs. ++ if br_protos == [''] and ofp_ver_result['output']: ++ ofp_version_range = ofp_ver_result['output'].splitlines() ++ ver_range = [] ++ ++ for line in ofp_version_range: ++ if "OpenFlow versions" in line: ++ v = line.split("OpenFlow versions ")[1].split(":") ++ ver_range = range(int(v[0], 16), int(v[1], 16)+1) ++ ++ for protocol in ver_range: ++ if protocol in ofp_versions: ++ br_protos.append(ofp_versions[protocol]) ++ + # Collect flow information for relevant protocol versions only + for flow in flow_versions: + if flow in br_protos: +-- +2.31.1 + + +From 5a006024f730213a726c70e82c5ecd2daf685b2b Mon Sep 17 00:00:00 2001 +From: Salvatore Daniele +Date: Tue, 7 Sep 2021 14:17:19 -0400 +Subject: [PATCH 2/2] [openvswitch] add commands for offline analysis + +Replicas of ovs-vswitchd and ovsdb-server can be recreated offline +using flow, group, and tlv dumps, and ovs conf.db. This allows for +offline anaylsis and the use of tools such as ovs-appctl +ofproto/trace and ovs-ofctl for debugging. + +This patch ensures this information is available in the sos report. +The db is copied rather than collected using ovsdb-client list dump +for two reasons: + +ovsdb-client requires interacting with the ovsdb-server which could +take it 'down' for some time, and impact large, busy clusters. + +The list-dump is not in a format that can be used to restore the db +offline. All of the information in the list dump is available and more +by copying the db. + +Signed-off-by: Salvatore Daniele +--- + sos/report/plugins/openvswitch.py | 12 ++++++++++-- + sos/report/plugins/ovn_central.py | 1 + + 2 files changed, 11 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py +index 92cc7259..003596c6 100644 +--- a/sos/report/plugins/openvswitch.py ++++ b/sos/report/plugins/openvswitch.py +@@ -75,12 +75,19 @@ class OpenVSwitch(Plugin): + "/run/openvswitch/ovs-monitor-ipsec.pid" + ]) + ++ self.add_copy_spec([ ++ path_join('/usr/local/etc/openvswitch', 'conf.db'), ++ path_join('/etc/openvswitch', 'conf.db'), ++ path_join('/var/lib/openvswitch', 'conf.db'), ++ ]) ++ ovs_dbdir = environ.get('OVS_DBDIR') ++ if ovs_dbdir: ++ self.add_copy_spec(path_join(ovs_dbdir, 'conf.db')) ++ + self.add_cmd_output([ + # The '-t 5' adds an upper bound on how long to wait to connect + # to the Open vSwitch server, avoiding hangs when running sos. + "ovs-vsctl -t 5 show", +- # Gather the database. +- "ovsdb-client -f list dump", + # List the contents of important runtime directories + "ls -laZ /run/openvswitch", + "ls -laZ /dev/hugepages/", +@@ -276,6 +283,7 @@ class OpenVSwitch(Plugin): + "ovs-ofctl -O %s dump-groups %s" % (flow, br), + "ovs-ofctl -O %s dump-group-stats %s" % (flow, br), + "ovs-ofctl -O %s dump-flows %s" % (flow, br), ++ "ovs-ofctl -O %s dump-tlv-map %s" % (flow, br), + "ovs-ofctl -O %s dump-ports-desc %s" % (flow, br) + ]) + +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index a4c483a9..d6647aad 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -138,6 +138,7 @@ class OVNCentral(Plugin): + os.path.join('/usr/local/etc/openvswitch', dbfile), + os.path.join('/etc/openvswitch', dbfile), + os.path.join('/var/lib/openvswitch', dbfile), ++ os.path.join('/var/lib/ovn/etc', dbfile), + ]) + if ovs_dbdir: + self.add_copy_spec(os.path.join(ovs_dbdir, dbfile)) +-- +2.31.1 + diff --git a/SOURCES/sos-bz2005195-iptables-based-on-ntf.patch b/SOURCES/sos-bz2005195-iptables-based-on-ntf.patch new file mode 100644 index 0000000..5ccc61f --- /dev/null +++ b/SOURCES/sos-bz2005195-iptables-based-on-ntf.patch @@ -0,0 +1,303 @@ +From 2ab8ba3ecbd52e452cc554d515e0782801dcb4b6 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 8 Sep 2021 15:31:48 +0200 +Subject: [PATCH] [firewalld] collect nft rules in firewall_tables only + +We collect 'nft list ruleset' in both plugins, while: +- nft is not shipped by firewalld package, so we should not collect +it in firewalld plugin +- running the command requires both nf_tables and nfnetlink kmods, so +we should use both kmods in the predicate + +Resolves: #2679 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/firewall_tables.py | 9 +++++---- + sos/report/plugins/firewalld.py | 8 +------- + 2 files changed, 6 insertions(+), 11 deletions(-) + +diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py +index 56058d3bf9..63a7dddeb5 100644 +--- a/sos/report/plugins/firewall_tables.py ++++ b/sos/report/plugins/firewall_tables.py +@@ -40,10 +40,11 @@ def collect_nftables(self): + """ Collects nftables rulesets with 'nft' commands if the modules + are present """ + +- self.add_cmd_output( +- "nft list ruleset", +- pred=SoSPredicate(self, kmods=['nf_tables']) +- ) ++ # collect nftables ruleset ++ nft_pred = SoSPredicate(self, ++ kmods=['nf_tables', 'nfnetlink'], ++ required={'kmods': 'all'}) ++ self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True) + + def setup(self): + # collect iptables -t for any existing table, if we can't read the +diff --git a/sos/report/plugins/firewalld.py b/sos/report/plugins/firewalld.py +index ec83527ed7..9401bfd239 100644 +--- a/sos/report/plugins/firewalld.py ++++ b/sos/report/plugins/firewalld.py +@@ -9,7 +9,7 @@ + # + # See the LICENSE file in the source distribution for further information. + +-from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate ++from sos.report.plugins import Plugin, RedHatPlugin + + + class FirewallD(Plugin, RedHatPlugin): +@@ -35,12 +35,6 @@ def setup(self): + "/var/log/firewalld", + ]) + +- # collect nftables ruleset +- nft_pred = SoSPredicate(self, +- kmods=['nf_tables', 'nfnetlink'], +- required={'kmods': 'all'}) +- self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True) +- + # use a 10s timeout to workaround dbus problems in + # docker containers. + self.add_cmd_output([ +-- +2.31.1 + + +From 2a7cf53b61943907dc823cf893530b620a87946c Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 15 Oct 2021 22:31:36 +0200 +Subject: [PATCH 1/3] [report] Use log_skipped_cmd method inside + collect_cmd_output + +Also, remove obsolete parameters of the log_skipped_cmd method. + +Related: #2724 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/__init__.py | 26 ++++++++------------------ + 1 file changed, 8 insertions(+), 18 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index ec138f83..b60ab5f6 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -876,8 +876,7 @@ class Plugin(): + return bool(pred) + return False + +- def log_skipped_cmd(self, pred, cmd, kmods=False, services=False, +- changes=False): ++ def log_skipped_cmd(self, cmd, pred, changes=False): + """Log that a command was skipped due to predicate evaluation. + + Emit a warning message indicating that a command was skipped due +@@ -887,21 +886,17 @@ class Plugin(): + message indicating that the missing data can be collected by using + the "--allow-system-changes" command line option will be included. + +- :param pred: The predicate that caused the command to be skipped +- :type pred: ``SoSPredicate`` +- + :param cmd: The command that was skipped + :type cmd: ``str`` + +- :param kmods: Did kernel modules cause the command to be skipped +- :type kmods: ``bool`` +- +- :param services: Did services cause the command to be skipped +- :type services: ``bool`` ++ :param pred: The predicate that caused the command to be skipped ++ :type pred: ``SoSPredicate`` + + :param changes: Is the `--allow-system-changes` enabled + :type changes: ``bool`` + """ ++ if pred is None: ++ pred = SoSPredicate(self) + msg = "skipped command '%s': %s" % (cmd, pred.report_failure()) + + if changes: +@@ -1700,9 +1693,7 @@ class Plugin(): + self.collect_cmds.append(soscmd) + self._log_info("added cmd output '%s'" % soscmd.cmd) + else: +- self.log_skipped_cmd(pred, soscmd.cmd, kmods=bool(pred.kmods), +- services=bool(pred.services), +- changes=soscmd.changes) ++ self.log_skipped_cmd(soscmd.cmd, pred, changes=soscmd.changes) + + def add_cmd_output(self, cmds, suggest_filename=None, + root_symlink=None, timeout=None, stderr=True, +@@ -2112,7 +2103,7 @@ class Plugin(): + root_symlink=False, timeout=None, + stderr=True, chroot=True, runat=None, env=None, + binary=False, sizelimit=None, pred=None, +- subdir=None, tags=[]): ++ changes=False, subdir=None, tags=[]): + """Execute a command and save the output to a file for inclusion in the + report, then return the results for further use by the plugin + +@@ -2163,8 +2154,7 @@ class Plugin(): + :rtype: ``dict`` + """ + if not self.test_predicate(cmd=True, pred=pred): +- self._log_info("skipped cmd output '%s' due to predicate (%s)" % +- (cmd, self.get_predicate(cmd=True, pred=pred))) ++ self.log_skipped_cmd(cmd, pred, changes=changes) + return { + 'status': None, # don't match on if result['status'] checks + 'output': '', +-- +2.31.1 + + +From 6b1bea0ffb1df7f8e5001b06cf25f0741b007ddd Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 15 Oct 2021 22:34:01 +0200 +Subject: [PATCH 2/3] [firewall_tables] call iptables -t based on nft + list + +If iptables are not realy in use, calling iptables -t
+would load corresponding nft table. + +Therefore, call iptables -t only for the tables from "nft list ruleset" +output. + +Example: nft list ruleset contains + +table ip mangle { +.. +} + +so we can collect iptable -t mangle -nvL . + +The same applies to ip6tables as well. + +Resolves: #2724 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/firewall_tables.py | 29 ++++++++++++++++++++------- + 1 file changed, 22 insertions(+), 7 deletions(-) + +diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py +index 63a7ddde..ef04d939 100644 +--- a/sos/report/plugins/firewall_tables.py ++++ b/sos/report/plugins/firewall_tables.py +@@ -44,26 +44,41 @@ class firewall_tables(Plugin, IndependentPlugin): + nft_pred = SoSPredicate(self, + kmods=['nf_tables', 'nfnetlink'], + required={'kmods': 'all'}) +- self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True) ++ return self.collect_cmd_output("nft list ruleset", pred=nft_pred, ++ changes=True) + + def setup(self): ++ # first, collect "nft list ruleset" as collecting commands like ++ # ip6tables -t mangle -nvL ++ # depends on its output ++ # store in nft_ip_tables lists of ip[|6] tables from nft list ++ nft_list = self.collect_nftables() ++ nft_ip_tables = {'ip': [], 'ip6': []} ++ nft_lines = nft_list['output'] if nft_list['status'] == 0 else '' ++ for line in nft_lines.splitlines(): ++ words = line.split()[0:3] ++ if len(words) == 3 and words[0] == 'table' and \ ++ words[1] in nft_ip_tables.keys(): ++ nft_ip_tables[words[1]].append(words[2]) + # collect iptables -t for any existing table, if we can't read the + # tables, collect 2 default ones (mangle, filter) ++ # do collect them only when relevant nft list ruleset exists ++ default_ip_tables = "mangle\nfilter\n" + try: + ip_tables_names = open("/proc/net/ip_tables_names").read() + except IOError: +- ip_tables_names = "mangle\nfilter\n" ++ ip_tables_names = default_ip_tables + for table in ip_tables_names.splitlines(): +- self.collect_iptable(table) ++ if nft_list['status'] == 0 and table in nft_ip_tables['ip']: ++ self.collect_iptable(table) + # collect the same for ip6tables + try: + ip_tables_names = open("/proc/net/ip6_tables_names").read() + except IOError: +- ip_tables_names = "mangle\nfilter\n" ++ ip_tables_names = default_ip_tables + for table in ip_tables_names.splitlines(): +- self.collect_ip6table(table) +- +- self.collect_nftables() ++ if nft_list['status'] == 0 and table in nft_ip_tables['ip6']: ++ self.collect_ip6table(table) + + # When iptables is called it will load the modules + # iptables_filter (for kernel <= 3) or +-- +2.31.1 + + +From 464bd2d2e83f203e369f2ba7671bbb7da53e06f6 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Sun, 24 Oct 2021 16:00:31 +0200 +Subject: [PATCH 3/3] [firewall_tables] Call iptables only when nft ip filter + table exists + +iptables -vnxL creates nft 'ip filter' table if it does not exist, hence +we must guard iptables execution by presence of the nft table. + +An equivalent logic applies to ip6tables. + +Resolves: #2724 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/firewall_tables.py | 26 ++++++++++++++------------ + 1 file changed, 14 insertions(+), 12 deletions(-) + +diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py +index ef04d939..7eafd60f 100644 +--- a/sos/report/plugins/firewall_tables.py ++++ b/sos/report/plugins/firewall_tables.py +@@ -80,19 +80,21 @@ class firewall_tables(Plugin, IndependentPlugin): + if nft_list['status'] == 0 and table in nft_ip_tables['ip6']: + self.collect_ip6table(table) + +- # When iptables is called it will load the modules +- # iptables_filter (for kernel <= 3) or +- # nf_tables (for kernel >= 4) if they are not loaded. ++ # When iptables is called it will load: ++ # 1) the modules iptables_filter (for kernel <= 3) or ++ # nf_tables (for kernel >= 4) if they are not loaded. ++ # 2) nft 'ip filter' table will be created + # The same goes for ipv6. +- self.add_cmd_output( +- "iptables -vnxL", +- pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables']) +- ) +- +- self.add_cmd_output( +- "ip6tables -vnxL", +- pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables']) +- ) ++ if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip']: ++ self.add_cmd_output( ++ "iptables -vnxL", ++ pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables']) ++ ) ++ if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip6']: ++ self.add_cmd_output( ++ "ip6tables -vnxL", ++ pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables']) ++ ) + + self.add_copy_spec([ + "/etc/nftables", +-- +2.31.1 + diff --git a/SOURCES/sos-bz2011349-replace-dropbox-by-sftp.patch b/SOURCES/sos-bz2011349-replace-dropbox-by-sftp.patch deleted file mode 100644 index 7c26d9b..0000000 --- a/SOURCES/sos-bz2011349-replace-dropbox-by-sftp.patch +++ /dev/null @@ -1,746 +0,0 @@ -From 5298080d7360202c72b0af2e24994e4bfad72322 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 14 May 2021 13:10:04 -0400 -Subject: [PATCH] [Policy] Add SFTP upload support - -Adds support for uploading via SFTP. This is done via pexpect calling -the system's locally available SFTP binary. If either that binary or -pexpect are unavailable on the local system, we will exit gracefully and -report the issue. This allows sos to keep python3-pexpect as a -recommends rather than a hard dependency. - -Signed-off-by: Jake Hunsaker ---- - man/en/sos-report.1 | 14 +++++ - sos/collector/__init__.py | 7 ++- - sos/policies/distros/__init__.py | 105 +++++++++++++++++++++++++++++-- - sos/report/__init__.py | 4 ++ - 4 files changed, 125 insertions(+), 5 deletions(-) - -diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 -index c38753d4a..799defafc 100644 ---- a/man/en/sos-report.1 -+++ b/man/en/sos-report.1 -@@ -35,6 +35,7 @@ sosreport \- Collect and package diagnos - [--encrypt-pass PASS]\fR - [--upload] [--upload-url url] [--upload-user user]\fR - [--upload-directory dir] [--upload-pass pass]\fR -+ [--upload-protocol protocol]\fR - [--experimental]\fR - [-h|--help]\fR - -@@ -354,6 +355,19 @@ be used provided all other required valu - Specify a directory to upload to, if one is not specified by a vendor default location - or if your destination server does not allow writes to '/'. - .TP -+.B \--upload-protocol PROTO -+Manually specify the protocol to use for uploading to the target \fBupload-url\fR. -+ -+Normally this is determined via the upload address, assuming that the protocol is part -+of the address provided, e.g. 'https://example.com'. By using this option, sos will skip -+the protocol check and use the method defined for the specified PROTO. -+ -+For RHEL systems, setting this option to \fBsftp\fR will skip the initial attempt to -+upload to the Red Hat Customer Portal, and only attempt an upload to Red Hat's SFTP server, -+which is typically used as a fallback target. -+ -+Valid values for PROTO are: 'auto' (default), 'https', 'ftp', 'sftp'. -+.TP - .B \--experimental - Enable plugins marked as experimental. Experimental plugins may not have - been tested for this port or may still be under active development. -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 5d1c599ac..1c742cf50 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -106,6 +106,7 @@ class SoSCollector(SoSComponent): - 'upload_directory': None, - 'upload_user': None, - 'upload_pass': None, -+ 'upload_protocol': 'auto' - } - - def __init__(self, parser, parsed_args, cmdline_args): -@@ -383,6 +384,9 @@ class SoSCollector(SoSComponent): - help="Username to authenticate with") - collect_grp.add_argument("--upload-pass", default=None, - help="Password to authenticate with") -+ collect_grp.add_argument("--upload-protocol", default='auto', -+ choices=['auto', 'https', 'ftp', 'sftp'], -+ help="Manually specify the upload protocol") - - # Group the cleaner options together - cleaner_grp = parser.add_argument_group( -diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py -index 4268688c6..9fe31513b 100644 ---- a/sos/policies/distros/__init__.py -+++ b/sos/policies/distros/__init__.py -@@ -20,7 +20,7 @@ - from sos.policies.runtimes.podman import PodmanContainerRuntime - from sos.policies.runtimes.docker import DockerContainerRuntime - --from sos.utilities import shell_out -+from sos.utilities import shell_out, is_executable - - - try: -@@ -295,7 +295,9 @@ def _determine_upload_type(self): - 'sftp': self.upload_sftp, - 'https': self.upload_https - } -- if '://' not in self.upload_url: -+ if self.commons['cmdlineopts'].upload_protocol in prots.keys(): -+ return prots[self.commons['cmdlineopts'].upload_protocol] -+ elif '://' not in self.upload_url: - raise Exception("Must provide protocol in upload URL") - prot, url = self.upload_url.split('://') - if prot not in prots.keys(): -@@ -361,7 +363,7 @@ def get_upload_password(self): - self.upload_password or - self._upload_password) - -- def upload_sftp(self): -+ def upload_sftp(self, user=None, password=None): - """Attempts to upload the archive to an SFTP location. - - Due to the lack of well maintained, secure, and generally widespread -@@ -371,7 +373,102 @@ def upload_sftp(self): - Do not override this method with one that uses python-paramiko, as the - upstream sos team will reject any PR that includes that dependency. - """ -- raise NotImplementedError("SFTP support is not yet implemented") -+ # if we somehow don't have sftp available locally, fail early -+ if not is_executable('sftp'): -+ raise Exception('SFTP is not locally supported') -+ -+ # soft dependency on python3-pexpect, which we need to use to control -+ # sftp login since as of this writing we don't have a viable solution -+ # via ssh python bindings commonly available among downstreams -+ try: -+ import pexpect -+ except ImportError: -+ raise Exception('SFTP upload requires python3-pexpect, which is ' -+ 'not currently installed') -+ -+ sftp_connected = False -+ -+ if not user: -+ user = self.get_upload_user() -+ if not password: -+ password = self.get_upload_password() -+ -+ # need to strip the protocol prefix here -+ sftp_url = self.get_upload_url().replace('sftp://', '') -+ sftp_cmd = "sftp -oStrictHostKeyChecking=no %s@%s" % (user, sftp_url) -+ ret = pexpect.spawn(sftp_cmd, encoding='utf-8') -+ -+ sftp_expects = [ -+ u'sftp>', -+ u'password:', -+ u'Connection refused', -+ pexpect.TIMEOUT, -+ pexpect.EOF -+ ] -+ -+ idx = ret.expect(sftp_expects, timeout=15) -+ -+ if idx == 0: -+ sftp_connected = True -+ elif idx == 1: -+ ret.sendline(password) -+ pass_expects = [ -+ u'sftp>', -+ u'Permission denied', -+ pexpect.TIMEOUT, -+ pexpect.EOF -+ ] -+ sftp_connected = ret.expect(pass_expects, timeout=10) == 0 -+ if not sftp_connected: -+ ret.close() -+ raise Exception("Incorrect username or password for %s" -+ % self.get_upload_url_string()) -+ elif idx == 2: -+ raise Exception("Connection refused by %s. Incorrect port?" -+ % self.get_upload_url_string()) -+ elif idx == 3: -+ raise Exception("Timeout hit trying to connect to %s" -+ % self.get_upload_url_string()) -+ elif idx == 4: -+ raise Exception("Unexpected error trying to connect to sftp: %s" -+ % ret.before) -+ -+ if not sftp_connected: -+ ret.close() -+ raise Exception("Unable to connect via SFTP to %s" -+ % self.get_upload_url_string()) -+ -+ put_cmd = 'put %s %s' % (self.upload_archive_name, -+ self._get_sftp_upload_name()) -+ ret.sendline(put_cmd) -+ -+ put_expects = [ -+ u'100%', -+ pexpect.TIMEOUT, -+ pexpect.EOF -+ ] -+ -+ put_success = ret.expect(put_expects, timeout=180) -+ -+ if put_success == 0: -+ ret.sendline('bye') -+ return True -+ elif put_success == 1: -+ raise Exception("Timeout expired while uploading") -+ elif put_success == 2: -+ raise Exception("Unknown error during upload: %s" % ret.before) -+ else: -+ raise Exception("Unexpected response from server: %s" % ret.before) -+ -+ def _get_sftp_upload_name(self): -+ """If a specific file name pattern is required by the SFTP server, -+ override this method in the relevant Policy. Otherwise the archive's -+ name on disk will be used -+ -+ :returns: Filename as it will exist on the SFTP server -+ :rtype: ``str`` -+ """ -+ return self.upload_archive_name.split('/')[-1] - - def _upload_https_streaming(self, archive): - """If upload_https() needs to use requests.put(), this method is used -diff --git a/sos/report/__init__.py b/sos/report/__init__.py -index df99186db..d43454092 100644 ---- a/sos/report/__init__.py -+++ b/sos/report/__init__.py -@@ -119,6 +119,7 @@ class SoSReport(SoSComponent): - 'upload_directory': None, - 'upload_user': None, - 'upload_pass': None, -+ 'upload_protocol': 'auto', - 'add_preset': '', - 'del_preset': '' - } -@@ -300,6 +301,9 @@ class SoSReport(SoSComponent): - help="Username to authenticate to server with") - report_grp.add_argument("--upload-pass", default=None, - help="Password to authenticate to server with") -+ report_grp.add_argument("--upload-protocol", default='auto', -+ choices=['auto', 'https', 'ftp', 'sftp'], -+ help="Manually specify the upload protocol") - - # Group to make add/del preset exclusive - preset_grp = report_grp.add_mutually_exclusive_group() -From d5316de87313c3eaf9fe4ce7a5eea3ed8c7d17ce Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 14 May 2021 13:11:27 -0400 -Subject: [PATCH] [Red Hat] Update policy to use SFTP instead of legacy FTP - dropbox - -As the FTP dropbox for Red Hat is being decomissioned and replaced with -an SFTP alternative, update the policy to fallback to the SFTP host and -remove legacy FTP host references. - -The default behavior for --upload remains the same, just targeting a -different location. If a username, password, and case number are given, -the first attempt will be to upload to the Red Hat Customer Portal. If -any are missing, or are invalid, then we will fallback to SFTP. During -the fallback if a valid username and password are not provided, sos will -attempt to obtain an anonymous token for the upload before failing out -entirely. - -Closes: #2467 -Resolves: #2552 - -Signed-off-by: Jake Hunsaker ---- - sos/policies/distros/redhat.py | 115 ++++++++++++++++++++++----------- - 1 file changed, 76 insertions(+), 39 deletions(-) - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index f37519910..241d3f139 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -8,6 +8,7 @@ - # - # See the LICENSE file in the source distribution for further information. - -+import json - import os - import sys - import re -@@ -20,6 +21,11 @@ - from sos.policies.package_managers.rpm import RpmPackageManager - from sos import _sos as _ - -+try: -+ import requests -+ REQUESTS_LOADED = True -+except ImportError: -+ REQUESTS_LOADED = False - - OS_RELEASE = "/etc/os-release" - RHEL_RELEASE_STR = "Red Hat Enterprise Linux" -@@ -39,9 +45,8 @@ class RedHatPolicy(LinuxPolicy): - _host_sysroot = '/' - default_scl_prefix = '/opt/rh' - name_pattern = 'friendly' -- upload_url = 'dropbox.redhat.com' -- upload_user = 'anonymous' -- upload_directory = '/incoming' -+ upload_url = None -+ upload_user = None - default_container_runtime = 'podman' - sos_pkg_name = 'sos' - sos_bin_path = '/usr/sbin' -@@ -196,7 +201,7 @@ def get_tmp_dir(self, opt_tmp_dir): - """ - - RH_API_HOST = "https://access.redhat.com" --RH_FTP_HOST = "ftp://dropbox.redhat.com" -+RH_SFTP_HOST = "sftp://sftp.access.redhat.com" - - - class RHELPolicy(RedHatPolicy): -@@ -216,9 +216,7 @@ An archive containing the collected info - generated in %(tmpdir)s and may be provided to a %(vendor)s \ - support representative. - """ + disclaimer_text + "%(vendor_text)s\n") -- _upload_url = RH_FTP_HOST -- _upload_user = 'anonymous' -- _upload_directory = '/incoming' -+ _upload_url = RH_SFTP_HOST - - def __init__(self, sysroot=None, init=None, probe_runtime=True, - remote_exec=None): -@@ -260,33 +263,17 @@ def prompt_for_upload_user(self): - return - if self.case_id and not self.get_upload_user(): - self.upload_user = input(_( -- "Enter your Red Hat Customer Portal username (empty to use " -- "public dropbox): ") -+ "Enter your Red Hat Customer Portal username for uploading [" -+ "empty for anonymous SFTP]: ") - ) -- if not self.upload_user: -- self.upload_url = RH_FTP_HOST -- self.upload_user = self._upload_user -- -- def _upload_user_set(self): -- user = self.get_upload_user() -- return user and (user != 'anonymous') - - def get_upload_url(self): - if self.upload_url: - return self.upload_url -- if self.commons['cmdlineopts'].upload_url: -+ elif self.commons['cmdlineopts'].upload_url: - return self.commons['cmdlineopts'].upload_url -- # anonymous FTP server should be used as fallback when either: -- # - case id is not set, or -- # - upload user isn't set AND batch mode prevents to prompt for it -- if (not self.case_id) or \ -- ((not self._upload_user_set()) and -- self.commons['cmdlineopts'].batch): -- self.upload_user = self._upload_user -- if self.upload_directory is None: -- self.upload_directory = self._upload_directory -- self.upload_password = None -- return RH_FTP_HOST -+ elif self.commons['cmdlineopts'].upload_protocol == 'sftp': -+ return RH_SFTP_HOST - else: - rh_case_api = "/hydra/rest/cases/%s/attachments" - return RH_API_HOST + rh_case_api % self.case_id -@@ -299,27 +286,77 @@ def _get_upload_headers(self): - def get_upload_url_string(self): - if self.get_upload_url().startswith(RH_API_HOST): - return "Red Hat Customer Portal" -- return self.upload_url or RH_FTP_HOST -+ elif self.get_upload_url().startswith(RH_SFTP_HOST): -+ return "Red Hat Secure FTP" -+ return self.upload_url - -- def get_upload_user(self): -- # if this is anything other than dropbox, annonymous won't work -- if self.upload_url != RH_FTP_HOST: -- return os.getenv('SOSUPLOADUSER', None) or self.upload_user -- return self._upload_user -+ def _get_sftp_upload_name(self): -+ """The RH SFTP server will only automatically connect file uploads to -+ cases if the filename _starts_ with the case number -+ """ -+ if self.case_id: -+ return "%s_%s" % (self.case_id, -+ self.upload_archive_name.split('/')[-1]) -+ return self.upload_archive_name -+ -+ def upload_sftp(self): -+ """Override the base upload_sftp to allow for setting an on-demand -+ generated anonymous login for the RH SFTP server if a username and -+ password are not given -+ """ -+ if RH_SFTP_HOST.split('//')[1] not in self.get_upload_url(): -+ return super(RHELPolicy, self).upload_sftp() -+ -+ if not REQUESTS_LOADED: -+ raise Exception("python3-requests is not installed and is required" -+ " for obtaining SFTP auth token.") -+ _token = None -+ _user = None -+ # we have a username and password, but we need to reset the password -+ # to be the token returned from the auth endpoint -+ if self.get_upload_user() and self.get_upload_password(): -+ url = RH_API_HOST + '/hydra/rest/v1/sftp/token' -+ auth = self.get_upload_https_auth() -+ ret = requests.get(url, auth=auth, timeout=10) -+ if ret.status_code == 200: -+ # credentials are valid -+ _user = self.get_upload_user() -+ _token = json.loads(ret.text)['token'] -+ else: -+ print("Unable to retrieve Red Hat auth token using provided " -+ "credentials. Will try anonymous.") -+ # we either do not have a username or password/token, or both -+ if not _token: -+ aurl = RH_API_HOST + '/hydra/rest/v1/sftp/token?isAnonymous=true' -+ anon = requests.get(aurl, timeout=10) -+ if anon.status_code == 200: -+ resp = json.loads(anon.text) -+ _user = resp['username'] -+ _token = resp['token'] -+ print("Using anonymous user %s for upload. Please inform your " -+ "support engineer." % _user) -+ if _user and _token: -+ return super(RHELPolicy, self).upload_sftp(user=_user, -+ password=_token) -+ raise Exception("Could not retrieve valid or anonymous credentials") - - def upload_archive(self, archive): - """Override the base upload_archive to provide for automatic failover - from RHCP failures to the public RH dropbox - """ - try: -+ if not self.get_upload_user() or not self.get_upload_password(): -+ self.upload_url = RH_SFTP_HOST - uploaded = super(RHELPolicy, self).upload_archive(archive) - except Exception: - uploaded = False -- if not uploaded and self.upload_url.startswith(RH_API_HOST): -- print("Upload to Red Hat Customer Portal failed. Trying %s" -- % RH_FTP_HOST) -- self.upload_url = RH_FTP_HOST -- uploaded = super(RHELPolicy, self).upload_archive(archive) -+ if not self.upload_url.startswith(RH_API_HOST): -+ raise -+ else: -+ print("Upload to Red Hat Customer Portal failed. Trying %s" -+ % RH_SFTP_HOST) -+ self.upload_url = RH_SFTP_HOST -+ uploaded = super(RHELPolicy, self).upload_archive(archive) - return uploaded - - def dist_version(self): -From 8a7ae6a3ac69a020758f7b0825a872e44714dbed Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 9 Apr 2021 11:05:47 -0400 -Subject: [PATCH] [ubuntu|Policy] Fix exception when attempting upload - -Fixes an issue where an upload attempt on Ubuntu systems would fail with -a traceback due to the upload archive's name not being available when we -first check to make sure we have an upload location, since the Ubuntu -default location requires an archive/file name. - -Related to this, stop clobbering `upload_archive` and assign the archive -name to an `upload_archive_name` variable instead. - -Closes: #2472 -Resolves: #2479 - -Signed-off-by: Jake Hunsaker ---- - sos/policies/distros/__init__.py | 9 +++++---- - sos/policies/distros/ubuntu.py | 4 +++- - 2 files changed, 8 insertions(+), 5 deletions(-) - -diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py -index 022ba7f4c6..a24a0e5beb 100644 ---- a/sos/policies/distros/__init__.py -+++ b/sos/policies/distros/__init__.py -@@ -149,6 +149,7 @@ def pre_work(self): - self.upload_user = cmdline_opts.upload_user - self.upload_directory = cmdline_opts.upload_directory - self.upload_password = cmdline_opts.upload_pass -+ self.upload_archive_name = '' - - if not cmdline_opts.batch and not \ - cmdline_opts.quiet: -@@ -237,7 +238,7 @@ def upload_archive(self, archive): - `get_upload_url_string()` - Print a more human-friendly string than vendor URLs - """ -- self.upload_archive = archive -+ self.upload_archive_name = archive - if not self.upload_url: - self.upload_url = self.get_upload_url() - if not self.upload_url: -@@ -384,7 +385,7 @@ def upload_https(self): - raise Exception("Unable to upload due to missing python requests " - "library") - -- with open(self.upload_archive, 'rb') as arc: -+ with open(self.upload_archive_name, 'rb') as arc: - if not self._use_https_streaming: - r = self._upload_https_no_stream(arc) - else: -@@ -467,9 +468,9 @@ def upload_ftp(self, url=None, directory=None, user=None, password=None): - % str(err)) - - try: -- with open(self.upload_archive, 'rb') as _arcfile: -+ with open(self.upload_archive_name, 'rb') as _arcfile: - session.storbinary( -- "STOR %s" % self.upload_archive.split('/')[-1], -+ "STOR %s" % self.upload_archive_name.split('/')[-1], - _arcfile - ) - session.quit() -diff --git a/sos/policies/distros/ubuntu.py b/sos/policies/distros/ubuntu.py -index 94a4a241b0..308c1e3544 100644 ---- a/sos/policies/distros/ubuntu.py -+++ b/sos/policies/distros/ubuntu.py -@@ -74,7 +74,9 @@ def get_upload_url_string(self): - - def get_upload_url(self): - if not self.upload_url or self.upload_url.startswith(self._upload_url): -- fname = os.path.basename(self.upload_archive) -+ if not self.upload_archive_name: -+ return self._upload_url -+ fname = os.path.basename(self.upload_archive_name) - return self._upload_url + fname - super(UbuntuPolicy, self).get_upload_url() - -From 2e8b5e2d4f30854cce93d149fc7d24b9d9cfd02c Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 19 Nov 2021 16:16:07 +0100 -Subject: [PATCH 1/3] [policies] strip path from SFTP upload filename - -When case_id is not supplied, we ask SFTP server to store the uploaded -file under name /var/tmp/, which is confusing. - -Let remove the path from it also in case_id not supplied. - -Related to: #2764 - -Signed-off-by: Pavel Moravec ---- - sos/policies/distros/redhat.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index 3476e21fb..8817fc785 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -269,10 +269,10 @@ def _get_sftp_upload_name(self): - """The RH SFTP server will only automatically connect file uploads to - cases if the filename _starts_ with the case number - """ -+ fname = self.upload_archive_name.split('/')[-1] - if self.case_id: -- return "%s_%s" % (self.case_id, -- self.upload_archive_name.split('/')[-1]) -- return self.upload_archive_name -+ return "%s_%s" % (self.case_id, fname) -+ return fname - - def upload_sftp(self): - """Override the base upload_sftp to allow for setting an on-demand - -From 61023b29a656dd7afaa4a0643368b0a53f1a3779 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 19 Nov 2021 17:31:31 +0100 -Subject: [PATCH 2/3] [redhat] update SFTP API version to v2 - -Change API version from v1 to v2, which includes: -- change of URL -- different URI -- POST method for token generation instead of GET - -Resolves: #2764 - -Signed-off-by: Pavel Moravec ---- - sos/policies/distros/redhat.py | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index 8817fc785..e4e2b8835 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -175,7 +175,7 @@ def get_tmp_dir(self, opt_tmp_dir): - No changes will be made to system configuration. - """ - --RH_API_HOST = "https://access.redhat.com" -+RH_API_HOST = "https://api.access.redhat.com" - RH_SFTP_HOST = "sftp://sftp.access.redhat.com" - - -@@ -287,12 +287,12 @@ def upload_sftp(self): - " for obtaining SFTP auth token.") - _token = None - _user = None -+ url = RH_API_HOST + '/support/v2/sftp/token' - # we have a username and password, but we need to reset the password - # to be the token returned from the auth endpoint - if self.get_upload_user() and self.get_upload_password(): -- url = RH_API_HOST + '/hydra/rest/v1/sftp/token' - auth = self.get_upload_https_auth() -- ret = requests.get(url, auth=auth, timeout=10) -+ ret = requests.post(url, auth=auth, timeout=10) - if ret.status_code == 200: - # credentials are valid - _user = self.get_upload_user() -@@ -302,8 +302,8 @@ def upload_sftp(self): - "credentials. Will try anonymous.") - # we either do not have a username or password/token, or both - if not _token: -- aurl = RH_API_HOST + '/hydra/rest/v1/sftp/token?isAnonymous=true' -- anon = requests.get(aurl, timeout=10) -+ adata = {"isAnonymous": True} -+ anon = requests.post(url, data=json.dumps(adata), timeout=10) - if anon.status_code == 200: - resp = json.loads(anon.text) - _user = resp['username'] - -From 267da2156ec61f526dd28e760ff6528408a76c3f Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 22 Nov 2021 15:22:32 +0100 -Subject: [PATCH 3/3] [policies] Deal 200 return code as success - -Return code 200 of POST method request must be dealt as success. - -Newly required due to the SFTP API change using POST. - -Related to: #2764 - -Signed-off-by: Pavel Moravec ---- - sos/policies/distros/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py -index 0906fa779..6f257fdce 100644 ---- a/sos/policies/distros/__init__.py -+++ b/sos/policies/distros/__init__.py -@@ -488,7 +488,7 @@ class LinuxPolicy(Policy): - r = self._upload_https_no_stream(arc) - else: - r = self._upload_https_streaming(arc) -- if r.status_code != 201: -+ if r.status_code != 200 and r.status_code != 201: - if r.status_code == 401: - raise Exception( - "Authentication failed: invalid user credentials" -From 8da1b14246226792c160dd04e5c7c75dd4e8d44b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 22 Nov 2021 10:44:09 +0100 -Subject: [PATCH] [collect] fix moved get_upload_url under Policy class - -SoSCollector does not further declare get_upload_url method -as that was moved under Policy class(es). - -Resolves: #2766 - -Signed-off-by: Pavel Moravec ---- - sos/collector/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py -index 50183e873..42a7731d6 100644 ---- a/sos/collector/__init__.py -+++ b/sos/collector/__init__.py -@@ -1219,7 +1219,7 @@ this utility or remote systems that it c - msg = 'No sosreports were collected, nothing to archive...' - self.exit(msg, 1) - -- if self.opts.upload and self.get_upload_url(): -+ if self.opts.upload and self.policy.get_upload_url(): - try: - self.policy.upload_archive(arc_name) - self.ui_log.info("Uploaded archive successfully") - -From abb2fc65bd14760021c61699ad3113cab3bd4c64 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 30 Nov 2021 11:37:02 +0100 -Subject: [PATCH 1/2] [redhat] Fix broken URI to upload to customer portal - -Revert back the unwanted change in URI of uploading tarball to the -Red Hat Customer portal. - -Related: #2772 - -Signed-off-by: Pavel Moravec ---- - sos/policies/distros/redhat.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py -index e4e2b883..eb442407 100644 ---- a/sos/policies/distros/redhat.py -+++ b/sos/policies/distros/redhat.py -@@ -250,7 +250,7 @@ support representative. - elif self.commons['cmdlineopts'].upload_protocol == 'sftp': - return RH_SFTP_HOST - else: -- rh_case_api = "/hydra/rest/cases/%s/attachments" -+ rh_case_api = "/support/v1/cases/%s/attachments" - return RH_API_HOST + rh_case_api % self.case_id - - def _get_upload_headers(self): --- -2.31.1 - - -From ea4f9e88a412c80a4791396e1bb78ac1e24ece14 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 30 Nov 2021 13:00:26 +0100 -Subject: [PATCH 2/2] [policy] Add error message when FTP upload write failure - -When (S)FTP upload fails to write the destination file, -our "expect" code should detect it sooner than after timeout happens -and write appropriate error message. - -Resolves: #2772 - -Signed-off-by: Pavel Moravec ---- - sos/policies/distros/__init__.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py -index 6f257fdc..7bdc81b8 100644 ---- a/sos/policies/distros/__init__.py -+++ b/sos/policies/distros/__init__.py -@@ -473,7 +473,8 @@ class LinuxPolicy(Policy): - put_expects = [ - u'100%', - pexpect.TIMEOUT, -- pexpect.EOF -+ pexpect.EOF, -+ u'No such file or directory' - ] - - put_success = ret.expect(put_expects, timeout=180) -@@ -485,6 +486,8 @@ class LinuxPolicy(Policy): - raise Exception("Timeout expired while uploading") - elif put_success == 2: - raise Exception("Unknown error during upload: %s" % ret.before) -+ elif put_success == 3: -+ raise Exception("Unable to write archive to destination") - else: - raise Exception("Unexpected response from server: %s" % ret.before) - --- -2.31.1 - diff --git a/SOURCES/sos-bz2011413-cpuX-individual-sizelimits.patch b/SOURCES/sos-bz2011413-cpuX-individual-sizelimits.patch new file mode 100644 index 0000000..4d579d7 --- /dev/null +++ b/SOURCES/sos-bz2011413-cpuX-individual-sizelimits.patch @@ -0,0 +1,48 @@ +From b09ed75b09075d86c184b0a63cce9260f2cee4ca Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 30 Aug 2021 11:27:48 +0200 +Subject: [PATCH] [processor] Apply sizelimit to /sys/devices/system/cpu/cpuX + +Copy /sys/devices/system/cpu/cpuX with separately applied sizelimit. + +This is required for systems with tens/hundreds of CPUs where the +cumulative directory size exceeds 25MB or even 100MB. + +Resolves: #2639 +Closes: #2665 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/processor.py | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py +index 0ddfd126..2df2dc9a 100644 +--- a/sos/report/plugins/processor.py ++++ b/sos/report/plugins/processor.py +@@ -7,6 +7,7 @@ + # See the LICENSE file in the source distribution for further information. + + from sos.report.plugins import Plugin, IndependentPlugin ++import os + + + class Processor(Plugin, IndependentPlugin): +@@ -34,7 +35,13 @@ class Processor(Plugin, IndependentPlugin): + self.add_copy_spec([ + "/proc/cpuinfo", + "/sys/class/cpuid", +- "/sys/devices/system/cpu" ++ ]) ++ # copy /sys/devices/system/cpu/cpuX with separately applied sizelimit ++ # this is required for systems with tens/hundreds of CPUs where the ++ # cumulative directory size exceeds 25MB or even 100MB. ++ cdirs = self.listdir('/sys/devices/system/cpu') ++ self.add_copy_spec([ ++ os.path.join('/sys/devices/system/cpu', cdir) for cdir in cdirs + ]) + + self.add_cmd_output([ +-- +2.31.1 + diff --git a/SOURCES/sos-bz2011506-foreman-puma-status.patch b/SOURCES/sos-bz2011506-foreman-puma-status.patch new file mode 100644 index 0000000..2a80571 --- /dev/null +++ b/SOURCES/sos-bz2011506-foreman-puma-status.patch @@ -0,0 +1,69 @@ +From 5a9458d318302c1caef862a868745fc8bdf5c741 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 4 Oct 2021 15:52:36 +0200 +Subject: [PATCH] [foreman] Collect puma status and stats + +Collect foreman-puma-status and 'pumactl [gc-|]stats', optionally using +SCL (if detected). + +Resolves: #2712 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/foreman.py | 21 ++++++++++++++++++++- + 1 file changed, 20 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py +index 4539f12b..351794f4 100644 +--- a/sos/report/plugins/foreman.py ++++ b/sos/report/plugins/foreman.py +@@ -13,6 +13,7 @@ from sos.report.plugins import (Plugin, + UbuntuPlugin) + from pipes import quote + from re import match ++from sos.utilities import is_executable + + + class Foreman(Plugin): +@@ -26,7 +27,9 @@ class Foreman(Plugin): + option_list = [ + ('months', 'number of months for dynflow output', 'fast', 1), + ('proxyfeatures', 'collect features of smart proxies', 'slow', False), ++ ('puma-gc', 'collect Puma GC stats', 'fast', False), + ] ++ pumactl = 'pumactl %s -S /usr/share/foreman/tmp/puma.state' + + def setup(self): + # for external DB, search in /etc/foreman/database.yml for: +@@ -134,6 +138,17 @@ class Foreman(Plugin): + suggest_filename='dynflow_sidekiq_status') + self.add_journal(units="dynflow-sidekiq@*") + ++ # Puma stats & status, i.e. foreman-puma-stats, then ++ # pumactl stats -S /usr/share/foreman/tmp/puma.state ++ # and optionally also gc-stats ++ # if on RHEL with Software Collections, wrap the commands accordingly ++ if self.get_option('puma-gc'): ++ self.add_cmd_output(self.pumactl % 'gc-stats', ++ suggest_filename='pumactl_gc-stats') ++ self.add_cmd_output(self.pumactl % 'stats', ++ suggest_filename='pumactl_stats') ++ self.add_cmd_output('/usr/sbin/foreman-puma-status') ++ + # collect tables sizes, ordered + _cmd = self.build_query_cmd( + "SELECT table_name, pg_size_pretty(total_bytes) AS total, " +@@ -297,6 +312,10 @@ class RedHatForeman(Foreman, RedHatPlugin): + self.add_file_tags({ + '/usr/share/foreman/.ssh/ssh_config': 'ssh_foreman_config', + }) ++ # if we are on RHEL7 with scl, wrap some Puma commands by ++ # scl enable tfm 'command' ++ if self.policy.dist_version() == 7 and is_executable('scl'): ++ self.pumactl = "scl enable tfm '%s'" % self.pumactl + + super(RedHatForeman, self).setup() + +-- +2.31.1 + diff --git a/SOURCES/sos-bz2012856-dryrun-uncaught-exception.patch b/SOURCES/sos-bz2012856-dryrun-uncaught-exception.patch new file mode 100644 index 0000000..619d538 --- /dev/null +++ b/SOURCES/sos-bz2012856-dryrun-uncaught-exception.patch @@ -0,0 +1,33 @@ +From e56b3ea999731b831ebba80cf367e43e65c12b62 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 4 Oct 2021 14:43:08 +0200 +Subject: [PATCH] [report] Overwrite pred=None before refering predicate + attributes + +During a dry run, add_journal method sets pred=None whilst log_skipped_cmd +refers to predicate attributes. In that case, replace None predicate +by a default / empty predicate. + +Resolves: #2711 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/__init__.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 3c2b64d9..c635b8de 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -1693,6 +1693,8 @@ class Plugin(): + def _add_cmd_output(self, **kwargs): + """Internal helper to add a single command to the collection list.""" + pred = kwargs.pop('pred') if 'pred' in kwargs else SoSPredicate(self) ++ if pred is None: ++ pred = SoSPredicate(self) + if 'priority' not in kwargs: + kwargs['priority'] = 10 + if 'changes' not in kwargs: +-- +2.31.1 + diff --git a/SOURCES/sos-bz2012857-plugin-timeout-unhandled-exception.patch b/SOURCES/sos-bz2012857-plugin-timeout-unhandled-exception.patch new file mode 100644 index 0000000..e977fb5 --- /dev/null +++ b/SOURCES/sos-bz2012857-plugin-timeout-unhandled-exception.patch @@ -0,0 +1,31 @@ +From a93e118a9c88df52fd2c701d2276185f877d565c Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 3 Nov 2021 16:07:15 +0100 +Subject: [PATCH] [report] shutdown threads for timeouted plugins + +Wait for shutting down threads of timeouted plugins, to prevent +them in writing to moved auxiliary files like sos_logs/sos.log + +Resolves: #2722 +Closes: #2746 + +Signed-off-by: Pavel Moravec +--- + sos/report/__init__.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index 1b5bc97d..ef86b28d 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -1046,6 +1046,7 @@ class SoSReport(SoSComponent): + self.ui_log.error("\n Plugin %s timed out\n" % plugin[1]) + self.running_plugs.remove(plugin[1]) + self.loaded_plugins[plugin[0]-1][1].set_timeout_hit() ++ pool.shutdown(wait=True) + pool._threads.clear() + if self.opts.estimate_only: + from pathlib import Path +-- +2.31.1 + diff --git a/SOURCES/sos-bz2018033-plugin-timeouts-proper-handling.patch b/SOURCES/sos-bz2018033-plugin-timeouts-proper-handling.patch new file mode 100644 index 0000000..9fc7c3d --- /dev/null +++ b/SOURCES/sos-bz2018033-plugin-timeouts-proper-handling.patch @@ -0,0 +1,91 @@ +From 3fea9a564c4112d04f6324df0d8b212e78feb5b3 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 3 Nov 2021 11:02:54 -0400 +Subject: [PATCH] [Plugin] Ensure specific plugin timeouts are only set for + that plugin + +It was discovered that setting a specific plugin timeout via the `-k +$plugin.timeout` option could influence the timeout setting for other +plugins that are not also having their timeout explicitly set. Fix this +by moving the default plugin opts into `Plugin.__init__()` so that each +plugin is ensured a private copy of these default plugin options. + +Additionally, add more timeout data to plugin manifest entries to allow +for better tracking of this setting. + +Adds a test case for this scenario. + +Closes: #2744 + +Signed-off-by: Jake Hunsaker +--- + sos/report/__init__.py | 2 +- + sos/report/plugins/__init__.py | 28 +++++++++++++------ + tests/vendor_tests/redhat/rhbz2018033.py | 35 ++++++++++++++++++++++++ + 3 files changed, 55 insertions(+), 10 deletions(-) + create mode 100644 tests/vendor_tests/redhat/rhbz2018033.py + +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index ef86b28d..c95e6300 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -766,7 +766,7 @@ class SoSReport(SoSComponent): + if self.all_options: + self.ui_log.info(_("The following options are available for ALL " + "plugins:")) +- for opt in self.all_options[0][0]._default_plug_opts: ++ for opt in self.all_options[0][0].get_default_plugin_opts(): + val = opt[3] + if val == -1: + val = TIMEOUT_DEFAULT +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 49f1af27..3e717993 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -474,12 +474,6 @@ class Plugin(object): + # Default predicates + predicate = None + cmd_predicate = None +- _default_plug_opts = [ +- ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1), +- ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1), +- ('postproc', 'Enable post-processing collected plugin data', 'fast', +- True) +- ] + + def __init__(self, commons): + +@@ -506,7 +500,7 @@ class Plugin(object): + else logging.getLogger('sos') + + # add the default plugin opts +- self.option_list.extend(self._default_plug_opts) ++ self.option_list.extend(self.get_default_plugin_opts()) + + # get the option list into a dictionary + for opt in self.option_list: +@@ -591,6 +583,14 @@ class Plugin(): + # Initialise the default --dry-run predicate + self.set_predicate(SoSPredicate(self)) + ++ def get_default_plugin_opts(self): ++ return [ ++ ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1), ++ ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1), ++ ('postproc', 'Enable post-processing collected plugin data', 'fast', ++ True) ++ ] ++ + def set_plugin_manifest(self, manifest): + """Pass in a manifest object to the plugin to write to + +@@ -547,7 +541,9 @@ class Plugin(object): + self.manifest.add_field('setup_start', '') + self.manifest.add_field('setup_end', '') + self.manifest.add_field('setup_time', '') ++ self.manifest.add_field('timeout', self.timeout) + self.manifest.add_field('timeout_hit', False) ++ self.manifest.add_field('command_timeout', self.cmdtimeout) + self.manifest.add_list('commands', []) + self.manifest.add_list('files', []) + diff --git a/SOURCES/sos-bz2020777-filter-namespace-per-pattern.patch b/SOURCES/sos-bz2020777-filter-namespace-per-pattern.patch new file mode 100644 index 0000000..5b0afdb --- /dev/null +++ b/SOURCES/sos-bz2020777-filter-namespace-per-pattern.patch @@ -0,0 +1,54 @@ +From 568eb2fbcf74ecad00d5c06989f55f8a6a9e3516 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 4 Nov 2021 23:14:21 +0100 +Subject: [PATCH] [report] fix filter_namespace per pattern + +Curently, -k networking.namespace_pattern=.. is broken as the R.E. test +forgets to add the namespace in case of positive match. + +Also ensure both plugopts namespace_pattern and namespaces work +together. + +Resolves: #2748 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/__init__.py | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 3e717993..a0d4e95d 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -2953,21 +2953,20 @@ class Plugin(): + ) + for ns in ns_list: + # if ns_pattern defined, skip namespaces not matching the pattern +- if ns_pattern: +- if not bool(re.match(pattern, ns)): +- continue ++ if ns_pattern and not bool(re.match(pattern, ns)): ++ continue ++ out_ns.append(ns) + +- # if ns_max is defined at all, limit returned list to that number ++ # if ns_max is defined at all, break the loop when the limit is ++ # reached + # this allows the use of both '0' and `None` to mean unlimited +- elif ns_max: +- out_ns.append(ns) ++ if ns_max: + if len(out_ns) == ns_max: + self._log_warn("Limiting namespace iteration " + "to first %s namespaces found" + % ns_max) + break +- else: +- out_ns.append(ns) ++ + return out_ns + + +-- +2.31.1 + diff --git a/SOURCES/sos-bz2023867-cleaner-hostnames-improvements.patch b/SOURCES/sos-bz2023867-cleaner-hostnames-improvements.patch new file mode 100644 index 0000000..b129f9e --- /dev/null +++ b/SOURCES/sos-bz2023867-cleaner-hostnames-improvements.patch @@ -0,0 +1,1829 @@ +From decd39b7799a0579ea085b0da0728b6eabd49b38 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 1 Sep 2021 00:28:58 -0400 +Subject: [PATCH] [clean] Provide archive abstractions to obfuscate more than + sos archives + +This commit removes the restriction imposed on `sos clean` since its +introduction in sos-4.0 to only work against known sos report archives +or build directories. This is because there has been interest in using +the obfuscation bits of sos in other data-collector projects. + +The `SoSObfuscationArchive()` class has been revamped to now be an +abstraction for different types of archives, and the cleaner logic has +been updated to leverage this new abstraction rather than assuming we're +working on an sos archive. + +Abstractions are added for our own native use cases - that being `sos +report` and `sos collect` for at-runtime obfuscation, as well as +standalone archives previously generated. Further generic abstractions +are available for plain directories and tarballs however these will not +provide the same level of coverage as fully supported archive types, as +is noted in the manpage for sos-clean. + +Signed-off-by: Jake Hunsaker +--- + man/en/sos-clean.1 | 25 ++ + sos/cleaner/__init__.py | 308 +++++++++--------- + .../__init__.py} | 80 ++++- + sos/cleaner/archives/generic.py | 52 +++ + sos/cleaner/archives/sos.py | 106 ++++++ + sos/cleaner/parsers/__init__.py | 6 - + sos/cleaner/parsers/hostname_parser.py | 1 - + sos/cleaner/parsers/ip_parser.py | 1 - + sos/cleaner/parsers/keyword_parser.py | 1 - + sos/cleaner/parsers/mac_parser.py | 1 - + sos/cleaner/parsers/username_parser.py | 8 - + tests/cleaner_tests/existing_archive.py | 7 + + tests/cleaner_tests/full_report_run.py | 3 + + tests/cleaner_tests/report_with_mask.py | 3 + + 14 files changed, 423 insertions(+), 179 deletions(-) + rename sos/cleaner/{obfuscation_archive.py => archives/__init__.py} (81%) + create mode 100644 sos/cleaner/archives/generic.py + create mode 100644 sos/cleaner/archives/sos.py + +diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1 +index b77bc63c..54026713 100644 +--- a/man/en/sos-clean.1 ++++ b/man/en/sos-clean.1 +@@ -10,6 +10,7 @@ sos clean - Obfuscate sensitive data from one or more sosreports + [\-\-jobs] + [\-\-no-update] + [\-\-keep-binary-files] ++ [\-\-archive-type] + + .SH DESCRIPTION + \fBsos clean\fR or \fBsos mask\fR is an sos subcommand used to obfuscate sensitive information from +@@ -88,6 +89,30 @@ Users should review any archive that keeps binary files in place before sending + a third party. + + Default: False (remove encountered binary files) ++.TP ++.B \-\-archive-type TYPE ++Specify the type of archive that TARGET was generated as. ++When sos inspects a TARGET archive, it tries to identify what type of archive it is. ++For example, it may be a report generated by \fBsos report\fR, or a collection of those ++reports generated by \fBsos collect\fR, which require separate approaches. ++ ++This option may be useful if a given TARGET archive is known to be of a specific type, ++but due to unknown reasons or some malformed/missing information in the archive directly, ++that is not properly identified by sos. ++ ++The following are accepted values for this option: ++ ++ \fBauto\fR Automatically detect the archive type ++ \fBreport\fR An archive generated by \fBsos report\fR ++ \fBcollect\fR An archive generated by \fBsos collect\fR ++ ++The following may also be used, however note that these do not attempt to pre-load ++any information from the archives into the parsers. This means that, among other limitations, ++items like host and domain names may not be obfuscated unless an obfuscated mapping already exists ++on the system from a previous execution. ++ ++ \fBdata-dir\fR A plain directory on the filesystem. ++ \fBtarball\fR A generic tar archive not associated with any known tool + + .SH SEE ALSO + .BR sos (1) +diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py +index 6aadfe79..6d2eb483 100644 +--- a/sos/cleaner/__init__.py ++++ b/sos/cleaner/__init__.py +@@ -12,9 +12,7 @@ import hashlib + import json + import logging + import os +-import re + import shutil +-import tarfile + import tempfile + + from concurrent.futures import ThreadPoolExecutor +@@ -27,7 +25,10 @@ from sos.cleaner.parsers.mac_parser import SoSMacParser + from sos.cleaner.parsers.hostname_parser import SoSHostnameParser + from sos.cleaner.parsers.keyword_parser import SoSKeywordParser + from sos.cleaner.parsers.username_parser import SoSUsernameParser +-from sos.cleaner.obfuscation_archive import SoSObfuscationArchive ++from sos.cleaner.archives.sos import (SoSReportArchive, SoSReportDirectory, ++ SoSCollectorArchive, ++ SoSCollectorDirectory) ++from sos.cleaner.archives.generic import DataDirArchive, TarballArchive + from sos.utilities import get_human_readable + from textwrap import fill + +@@ -41,6 +42,7 @@ class SoSCleaner(SoSComponent): + desc = "Obfuscate sensitive networking information in a report" + + arg_defaults = { ++ 'archive_type': 'auto', + 'domains': [], + 'jobs': 4, + 'keywords': [], +@@ -70,6 +72,7 @@ class SoSCleaner(SoSComponent): + self.from_cmdline = False + if not hasattr(self.opts, 'jobs'): + self.opts.jobs = 4 ++ self.opts.archive_type = 'auto' + self.soslog = logging.getLogger('sos') + self.ui_log = logging.getLogger('sos_ui') + # create the tmp subdir here to avoid a potential race condition +@@ -92,6 +95,17 @@ class SoSCleaner(SoSComponent): + SoSUsernameParser(self.cleaner_mapping, self.opts.usernames) + ] + ++ self.archive_types = [ ++ SoSReportDirectory, ++ SoSReportArchive, ++ SoSCollectorDirectory, ++ SoSCollectorArchive, ++ # make sure these two are always last as they are fallbacks ++ DataDirArchive, ++ TarballArchive ++ ] ++ self.nested_archive = None ++ + self.log_info("Cleaner initialized. From cmdline: %s" + % self.from_cmdline) + +@@ -178,6 +192,11 @@ third party. + ) + clean_grp.add_argument('target', metavar='TARGET', + help='The directory or archive to obfuscate') ++ clean_grp.add_argument('--archive-type', default='auto', ++ choices=['auto', 'report', 'collect', ++ 'data-dir', 'tarball'], ++ help=('Specify what kind of archive the target ' ++ 'was generated as')) + clean_grp.add_argument('--domains', action='extend', default=[], + help='List of domain names to obfuscate') + clean_grp.add_argument('-j', '--jobs', default=4, type=int, +@@ -218,59 +237,28 @@ third party. + + In the event the target path is not an archive, abort. + """ +- if not tarfile.is_tarfile(self.opts.target): +- self.ui_log.error( +- "Invalid target: must be directory or tar archive" +- ) +- self._exit(1) +- +- archive = tarfile.open(self.opts.target) +- self.arc_name = self.opts.target.split('/')[-1].split('.')[:-2][0] +- +- try: +- archive.getmember(os.path.join(self.arc_name, 'sos_logs')) +- except Exception: +- # this is not an sos archive +- self.ui_log.error("Invalid target: not an sos archive") +- self._exit(1) +- +- # see if there are archives within this archive +- nested_archives = [] +- for _file in archive.getmembers(): +- if (re.match('sosreport-.*.tar', _file.name.split('/')[-1]) and not +- (_file.name.endswith(('.md5', '.sha256')))): +- nested_archives.append(_file.name.split('/')[-1]) +- +- if nested_archives: +- self.log_info("Found nested archive(s), extracting top level") +- nested_path = self.extract_archive(archive) +- for arc_file in os.listdir(nested_path): +- if re.match('sosreport.*.tar.*', arc_file): +- if arc_file.endswith(('.md5', '.sha256')): +- continue +- self.report_paths.append(os.path.join(nested_path, +- arc_file)) +- # add the toplevel extracted archive +- self.report_paths.append(nested_path) ++ _arc = None ++ if self.opts.archive_type != 'auto': ++ check_type = self.opts.archive_type.replace('-', '_') ++ for archive in self.archive_types: ++ if archive.type_name == check_type: ++ _arc = archive(self.opts.target, self.tmpdir) + else: +- self.report_paths.append(self.opts.target) +- +- archive.close() +- +- def extract_archive(self, archive): +- """Extract an archive into our tmpdir so that we may inspect it or +- iterate through its contents for obfuscation +- +- Positional arguments: +- +- :param archive: An open TarFile object for the archive +- +- """ +- if not isinstance(archive, tarfile.TarFile): +- archive = tarfile.open(archive) +- path = os.path.join(self.tmpdir, 'cleaner') +- archive.extractall(path) +- return os.path.join(path, archive.name.split('/')[-1].split('.tar')[0]) ++ for arc in self.archive_types: ++ if arc.check_is_type(self.opts.target): ++ _arc = arc(self.opts.target, self.tmpdir) ++ break ++ if not _arc: ++ return ++ self.report_paths.append(_arc) ++ if _arc.is_nested: ++ self.report_paths.extend(_arc.get_nested_archives()) ++ # We need to preserve the top level archive until all ++ # nested archives are processed ++ self.report_paths.remove(_arc) ++ self.nested_archive = _arc ++ if self.nested_archive: ++ self.nested_archive.ui_name = self.nested_archive.description + + def execute(self): + """SoSCleaner will begin by inspecting the TARGET option to determine +@@ -283,6 +271,7 @@ third party. + be unpacked, cleaned, and repacked and the final top-level archive will + then be repacked as well. + """ ++ self.arc_name = self.opts.target.split('/')[-1].split('.tar')[0] + if self.from_cmdline: + self.print_disclaimer() + self.report_paths = [] +@@ -290,23 +279,11 @@ third party. + self.ui_log.error("Invalid target: no such file or directory %s" + % self.opts.target) + self._exit(1) +- if os.path.isdir(self.opts.target): +- self.arc_name = self.opts.target.split('/')[-1] +- for _file in os.listdir(self.opts.target): +- if _file == 'sos_logs': +- self.report_paths.append(self.opts.target) +- if (_file.startswith('sosreport') and +- (_file.endswith(".tar.gz") or _file.endswith(".tar.xz"))): +- self.report_paths.append(os.path.join(self.opts.target, +- _file)) +- if not self.report_paths: +- self.ui_log.error("Invalid target: not an sos directory") +- self._exit(1) +- else: +- self.inspect_target_archive() ++ ++ self.inspect_target_archive() + + if not self.report_paths: +- self.ui_log.error("No valid sos archives or directories found\n") ++ self.ui_log.error("No valid archives or directories found\n") + self._exit(1) + + # we have at least one valid target to obfuscate +@@ -334,33 +311,7 @@ third party. + + final_path = None + if len(self.completed_reports) > 1: +- # we have an archive of archives, so repack the obfuscated tarball +- arc_name = self.arc_name + '-obfuscated' +- self.setup_archive(name=arc_name) +- for arc in self.completed_reports: +- if arc.is_tarfile: +- arc_dest = self.obfuscate_string( +- arc.final_archive_path.split('/')[-1] +- ) +- self.archive.add_file(arc.final_archive_path, +- dest=arc_dest) +- checksum = self.get_new_checksum(arc.final_archive_path) +- if checksum is not None: +- dname = self.obfuscate_string( +- "checksums/%s.%s" % (arc_dest, self.hash_name) +- ) +- self.archive.add_string(checksum, dest=dname) +- else: +- for dirname, dirs, files in os.walk(arc.archive_path): +- for filename in files: +- if filename.startswith('sosreport'): +- continue +- fname = os.path.join(dirname, filename) +- dnm = self.obfuscate_string( +- fname.split(arc.archive_name)[-1].lstrip('/') +- ) +- self.archive.add_file(fname, dest=dnm) +- arc_path = self.archive.finalize(self.opts.compression_type) ++ arc_path = self.rebuild_nested_archive() + else: + arc = self.completed_reports[0] + arc_path = arc.final_archive_path +@@ -371,8 +322,7 @@ third party. + ) + with open(os.path.join(self.sys_tmp, chksum_name), 'w') as cf: + cf.write(checksum) +- +- self.write_cleaner_log() ++ self.write_cleaner_log() + + final_path = self.obfuscate_string( + os.path.join(self.sys_tmp, arc_path.split('/')[-1]) +@@ -393,6 +343,30 @@ third party. + + self.cleanup() + ++ def rebuild_nested_archive(self): ++ """Handles repacking the nested tarball, now containing only obfuscated ++ copies of the reports, log files, manifest, etc... ++ """ ++ # we have an archive of archives, so repack the obfuscated tarball ++ arc_name = self.arc_name + '-obfuscated' ++ self.setup_archive(name=arc_name) ++ for archive in self.completed_reports: ++ arc_dest = archive.final_archive_path.split('/')[-1] ++ checksum = self.get_new_checksum(archive.final_archive_path) ++ if checksum is not None: ++ dname = "checksums/%s.%s" % (arc_dest, self.hash_name) ++ self.archive.add_string(checksum, dest=dname) ++ for dirn, dirs, files in os.walk(self.nested_archive.extracted_path): ++ for filename in files: ++ fname = os.path.join(dirn, filename) ++ dname = fname.split(self.nested_archive.extracted_path)[-1] ++ dname = dname.lstrip('/') ++ self.archive.add_file(fname, dest=dname) ++ # remove it now so we don't balloon our fs space needs ++ os.remove(fname) ++ self.write_cleaner_log(archive=True) ++ return self.archive.finalize(self.opts.compression_type) ++ + def compile_mapping_dict(self): + """Build a dict that contains each parser's map as a key, with the + contents as that key's value. This will then be written to disk in the +@@ -441,7 +415,7 @@ third party. + self.log_error("Could not update mapping config file: %s" + % err) + +- def write_cleaner_log(self): ++ def write_cleaner_log(self, archive=False): + """When invoked via the command line, the logging from SoSCleaner will + not be added to the archive(s) it processes, so we need to write it + separately to disk +@@ -454,6 +428,10 @@ third party. + for line in self.sos_log_file.readlines(): + logfile.write(line) + ++ if archive: ++ self.obfuscate_file(log_name) ++ self.archive.add_file(log_name, dest="sos_logs/cleaner.log") ++ + def get_new_checksum(self, archive_path): + """Calculate a new checksum for the obfuscated archive, as the previous + checksum will no longer be valid +@@ -481,11 +459,11 @@ third party. + be obfuscated concurrently. + """ + try: +- if len(self.report_paths) > 1: +- msg = ("Found %s total reports to obfuscate, processing up to " +- "%s concurrently\n" +- % (len(self.report_paths), self.opts.jobs)) +- self.ui_log.info(msg) ++ msg = ( ++ "Found %s total reports to obfuscate, processing up to %s " ++ "concurrently\n" % (len(self.report_paths), self.opts.jobs) ++ ) ++ self.ui_log.info(msg) + if self.opts.keep_binary_files: + self.ui_log.warning( + "WARNING: binary files that potentially contain sensitive " +@@ -494,53 +472,67 @@ third party. + pool = ThreadPoolExecutor(self.opts.jobs) + pool.map(self.obfuscate_report, self.report_paths, chunksize=1) + pool.shutdown(wait=True) ++ # finally, obfuscate the nested archive if one exists ++ if self.nested_archive: ++ self._replace_obfuscated_archives() ++ self.obfuscate_report(self.nested_archive) + except KeyboardInterrupt: + self.ui_log.info("Exiting on user cancel") + os._exit(130) + ++ def _replace_obfuscated_archives(self): ++ """When we have a nested archive, we need to rebuild the original ++ archive, which entails replacing the existing archives with their ++ obfuscated counterparts ++ """ ++ for archive in self.completed_reports: ++ os.remove(archive.archive_path) ++ dest = self.nested_archive.extracted_path ++ tarball = archive.final_archive_path.split('/')[-1] ++ dest_name = os.path.join(dest, tarball) ++ shutil.move(archive.final_archive_path, dest) ++ archive.final_archive_path = dest_name ++ + def preload_all_archives_into_maps(self): + """Before doing the actual obfuscation, if we have multiple archives + to obfuscate then we need to preload each of them into the mappings + to ensure that node1 is obfuscated in node2 as well as node2 being + obfuscated in node1's archive. + """ +- self.log_info("Pre-loading multiple archives into obfuscation maps") ++ self.log_info("Pre-loading all archives into obfuscation maps") + for _arc in self.report_paths: +- is_dir = os.path.isdir(_arc) +- if is_dir: +- _arc_name = _arc +- else: +- archive = tarfile.open(_arc) +- _arc_name = _arc.split('/')[-1].split('.tar')[0] +- # for each parser, load the map_prep_file into memory, and then +- # send that for obfuscation. We don't actually obfuscate the file +- # here, do that in the normal archive loop + for _parser in self.parsers: +- if not _parser.prep_map_file: ++ try: ++ pfile = _arc.prep_files[_parser.name.lower().split()[0]] ++ if not pfile: ++ continue ++ except (IndexError, KeyError): + continue +- if isinstance(_parser.prep_map_file, str): +- _parser.prep_map_file = [_parser.prep_map_file] +- for parse_file in _parser.prep_map_file: +- _arc_path = os.path.join(_arc_name, parse_file) ++ if isinstance(pfile, str): ++ pfile = [pfile] ++ for parse_file in pfile: ++ self.log_debug("Attempting to load %s" % parse_file) + try: +- if is_dir: +- _pfile = open(_arc_path, 'r') +- content = _pfile.read() +- else: +- _pfile = archive.extractfile(_arc_path) +- content = _pfile.read().decode('utf-8') +- _pfile.close() ++ content = _arc.get_file_content(parse_file) ++ if not content: ++ continue + if isinstance(_parser, SoSUsernameParser): + _parser.load_usernames_into_map(content) +- for line in content.splitlines(): +- if isinstance(_parser, SoSHostnameParser): +- _parser.load_hostname_into_map(line) +- self.obfuscate_line(line) ++ elif isinstance(_parser, SoSHostnameParser): ++ _parser.load_hostname_into_map( ++ content.splitlines()[0] ++ ) ++ else: ++ for line in content.splitlines(): ++ self.obfuscate_line(line) + except Exception as err: +- self.log_debug("Could not prep %s: %s" +- % (_arc_path, err)) ++ self.log_info( ++ "Could not prepare %s from %s (archive: %s): %s" ++ % (_parser.name, parse_file, _arc.archive_name, ++ err) ++ ) + +- def obfuscate_report(self, report): ++ def obfuscate_report(self, archive): + """Individually handle each archive or directory we've discovered by + running through each file therein. + +@@ -549,17 +541,12 @@ third party. + :param report str: Filepath to the directory or archive + """ + try: +- if not os.access(report, os.W_OK): +- msg = "Insufficient permissions on %s" % report +- self.log_info(msg) +- self.ui_log.error(msg) +- return +- +- archive = SoSObfuscationArchive(report, self.tmpdir) + arc_md = self.cleaner_md.add_section(archive.archive_name) + start_time = datetime.now() + arc_md.add_field('start_time', start_time) +- archive.extract() ++ # don't double extract nested archives ++ if not archive.is_extracted: ++ archive.extract() + archive.report_msg("Beginning obfuscation...") + + file_list = archive.get_file_list() +@@ -586,27 +573,28 @@ third party. + caller=archive.archive_name) + + # if the archive was already a tarball, repack it +- method = archive.get_compression() +- if method: +- archive.report_msg("Re-compressing...") +- try: +- archive.rename_top_dir( +- self.obfuscate_string(archive.archive_name) +- ) +- archive.compress(method) +- except Exception as err: +- self.log_debug("Archive %s failed to compress: %s" +- % (archive.archive_name, err)) +- archive.report_msg("Failed to re-compress archive: %s" +- % err) +- return ++ if not archive.is_nested: ++ method = archive.get_compression() ++ if method: ++ archive.report_msg("Re-compressing...") ++ try: ++ archive.rename_top_dir( ++ self.obfuscate_string(archive.archive_name) ++ ) ++ archive.compress(method) ++ except Exception as err: ++ self.log_debug("Archive %s failed to compress: %s" ++ % (archive.archive_name, err)) ++ archive.report_msg("Failed to re-compress archive: %s" ++ % err) ++ return ++ self.completed_reports.append(archive) + + end_time = datetime.now() + arc_md.add_field('end_time', end_time) + arc_md.add_field('run_time', end_time - start_time) + arc_md.add_field('files_obfuscated', len(archive.file_sub_list)) + arc_md.add_field('total_substitutions', archive.total_sub_count) +- self.completed_reports.append(archive) + rmsg = '' + if archive.removed_file_count: + rmsg = " [removed %s unprocessable files]" +@@ -615,7 +603,7 @@ third party. + + except Exception as err: + self.ui_log.info("Exception while processing %s: %s" +- % (report, err)) ++ % (archive.archive_name, err)) + + def obfuscate_file(self, filename, short_name=None, arc_name=None): + """Obfuscate and individual file, line by line. +@@ -635,6 +623,8 @@ third party. + # the requested file doesn't exist in the archive + return + subs = 0 ++ if not short_name: ++ short_name = filename.split('/')[-1] + if not os.path.islink(filename): + # don't run the obfuscation on the link, but on the actual file + # at some other point. +@@ -745,3 +735,5 @@ third party. + for parser in self.parsers: + _sec = parse_sec.add_section(parser.name.replace(' ', '_').lower()) + _sec.add_field('entries', len(parser.mapping.dataset.keys())) ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/archives/__init__.py +similarity index 81% +rename from sos/cleaner/obfuscation_archive.py +rename to sos/cleaner/archives/__init__.py +index ea0b7012..795c5a78 100644 +--- a/sos/cleaner/obfuscation_archive.py ++++ b/sos/cleaner/archives/__init__.py +@@ -40,6 +40,10 @@ class SoSObfuscationArchive(): + file_sub_list = [] + total_sub_count = 0 + removed_file_count = 0 ++ type_name = 'undetermined' ++ description = 'undetermined' ++ is_nested = False ++ prep_files = {} + + def __init__(self, archive_path, tmpdir): + self.archive_path = archive_path +@@ -50,7 +54,43 @@ class SoSObfuscationArchive(): + self.soslog = logging.getLogger('sos') + self.ui_log = logging.getLogger('sos_ui') + self.skip_list = self._load_skip_list() +- self.log_info("Loaded %s as an archive" % self.archive_path) ++ self.is_extracted = False ++ self._load_self() ++ self.archive_root = '' ++ self.log_info( ++ "Loaded %s as type %s" ++ % (self.archive_path, self.description) ++ ) ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ """Check if the archive is a well-known type we directly support""" ++ return False ++ ++ def _load_self(self): ++ if self.is_tarfile: ++ self.tarobj = tarfile.open(self.archive_path) ++ ++ def get_nested_archives(self): ++ """Return a list of ObfuscationArchives that represent additional ++ archives found within the target archive. For example, an archive from ++ `sos collect` will return a list of ``SoSReportArchive`` objects. ++ ++ This should be overridden by individual types of ObfuscationArchive's ++ """ ++ return [] ++ ++ def get_archive_root(self): ++ """Set the root path for the archive that should be prepended to any ++ filenames given to methods in this class. ++ """ ++ if self.is_tarfile: ++ toplevel = self.tarobj.firstmember ++ if toplevel.isdir(): ++ return toplevel.name ++ else: ++ return os.sep ++ return os.path.abspath(self.archive_path) + + def report_msg(self, msg): + """Helper to easily format ui messages on a per-report basis""" +@@ -96,10 +136,42 @@ class SoSObfuscationArchive(): + os.remove(full_fname) + self.removed_file_count += 1 + +- def extract(self): ++ def format_file_name(self, fname): ++ """Based on the type of archive we're dealing with, do whatever that ++ archive requires to a provided **relative** filepath to be able to ++ access it within the archive ++ """ ++ if not self.is_extracted: ++ if not self.archive_root: ++ self.archive_root = self.get_archive_root() ++ return os.path.join(self.archive_root, fname) ++ else: ++ return os.path.join(self.extracted_path, fname) ++ ++ def get_file_content(self, fname): ++ """Return the content from the specified fname. Particularly useful for ++ tarball-type archives so we can retrieve prep file contents prior to ++ extracting the entire archive ++ """ ++ if self.is_extracted is False and self.is_tarfile: ++ filename = self.format_file_name(fname) ++ try: ++ return self.tarobj.extractfile(filename).read().decode('utf-8') ++ except KeyError: ++ self.log_debug( ++ "Unable to retrieve %s: no such file in archive" % fname ++ ) ++ return '' ++ else: ++ with open(self.format_file_name(fname), 'r') as to_read: ++ return to_read.read() ++ ++ def extract(self, quiet=False): + if self.is_tarfile: +- self.report_msg("Extracting...") ++ if not quiet: ++ self.report_msg("Extracting...") + self.extracted_path = self.extract_self() ++ self.is_extracted = True + else: + self.extracted_path = self.archive_path + # if we're running as non-root (e.g. collector), then we can have a +@@ -317,3 +389,5 @@ class SoSObfuscationArchive(): + return False + except UnicodeDecodeError: + return True ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/cleaner/archives/generic.py b/sos/cleaner/archives/generic.py +new file mode 100644 +index 00000000..2ce6f09b +--- /dev/null ++++ b/sos/cleaner/archives/generic.py +@@ -0,0 +1,52 @@ ++# Copyright 2020 Red Hat, Inc. Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++ ++from sos.cleaner.archives import SoSObfuscationArchive ++ ++import os ++import tarfile ++ ++ ++class DataDirArchive(SoSObfuscationArchive): ++ """A plain directory on the filesystem that is not directly associated with ++ any known or supported collection utility ++ """ ++ ++ type_name = 'data_dir' ++ description = 'unassociated directory' ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ return os.path.isdir(arc_path) ++ ++ def set_archive_root(self): ++ return os.path.abspath(self.archive_path) ++ ++ ++class TarballArchive(SoSObfuscationArchive): ++ """A generic tar archive that is not associated with any known or supported ++ collection utility ++ """ ++ ++ type_name = 'tarball' ++ description = 'unassociated tarball' ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ try: ++ return tarfile.is_tarfile(arc_path) ++ except Exception: ++ return False ++ ++ def set_archive_root(self): ++ if self.tarobj.firstmember.isdir(): ++ return self.tarobj.firstmember.name ++ return '' +diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py +new file mode 100644 +index 00000000..4401d710 +--- /dev/null ++++ b/sos/cleaner/archives/sos.py +@@ -0,0 +1,106 @@ ++# Copyright 2021 Red Hat, Inc. Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++ ++from sos.cleaner.archives import SoSObfuscationArchive ++ ++import os ++import tarfile ++ ++ ++class SoSReportArchive(SoSObfuscationArchive): ++ """This is the class representing an sos report, or in other words the ++ type the archive the SoS project natively generates ++ """ ++ ++ type_name = 'report' ++ description = 'sos report archive' ++ prep_files = { ++ 'hostname': 'sos_commands/host/hostname', ++ 'ip': 'sos_commands/networking/ip_-o_addr', ++ 'mac': 'sos_commands/networking/ip_-d_address', ++ 'username': [ ++ 'sos_commands/login/lastlog_-u_1000-60000', ++ 'sos_commands/login/lastlog_-u_60001-65536', ++ 'sos_commands/login/lastlog_-u_65537-4294967295', ++ # AD users will be reported here, but favor the lastlog files since ++ # those will include local users who have not logged in ++ 'sos_commands/login/last' ++ ] ++ } ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ try: ++ return tarfile.is_tarfile(arc_path) and 'sosreport-' in arc_path ++ except Exception: ++ return False ++ ++ ++class SoSReportDirectory(SoSReportArchive): ++ """This is the archive class representing a build directory, or in other ++ words what `sos report --clean` will end up using for in-line obfuscation ++ """ ++ ++ type_name = 'report_dir' ++ description = 'sos report directory' ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ if os.path.isdir(arc_path): ++ return 'sos_logs' in os.listdir(arc_path) ++ return False ++ ++ ++class SoSCollectorArchive(SoSObfuscationArchive): ++ """Archive class representing the tarball created by ``sos collect``. It ++ will not provide prep files on its own, however it will provide a list ++ of SoSReportArchive's which will then be used to prep the parsers ++ """ ++ ++ type_name = 'collect' ++ description = 'sos collect tarball' ++ is_nested = True ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ try: ++ return (tarfile.is_tarfile(arc_path) and 'sos-collect' in arc_path) ++ except Exception: ++ return False ++ ++ def get_nested_archives(self): ++ self.extract(quiet=True) ++ _path = self.extracted_path ++ archives = [] ++ for fname in os.listdir(_path): ++ arc_name = os.path.join(_path, fname) ++ if 'sosreport-' in fname and tarfile.is_tarfile(arc_name): ++ archives.append(SoSReportArchive(arc_name, self.tmpdir)) ++ return archives ++ ++ ++class SoSCollectorDirectory(SoSCollectorArchive): ++ """The archive class representing the temp directory used by ``sos ++ collect`` when ``--clean`` is used during runtime. ++ """ ++ ++ type_name = 'collect_dir' ++ description = 'sos collect directory' ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ if os.path.isdir(arc_path): ++ for fname in os.listdir(arc_path): ++ if 'sos-collector-' in fname: ++ return True ++ return False ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py +index af6e375e..e62fd938 100644 +--- a/sos/cleaner/parsers/__init__.py ++++ b/sos/cleaner/parsers/__init__.py +@@ -37,11 +37,6 @@ class SoSCleanerParser(): + :cvar map_file_key: The key in the ``map_file`` to read when loading + previous obfuscation matches + :vartype map_file_key: ``str`` +- +- +- :cvar prep_map_file: File to read from an archive to pre-seed the map with +- matches. E.G. ip_addr for loading IP addresses +- :vartype prep_map_fie: ``str`` + """ + + name = 'Undefined Parser' +@@ -49,7 +44,6 @@ class SoSCleanerParser(): + skip_line_patterns = [] + skip_files = [] + map_file_key = 'unset' +- prep_map_file = [] + + def __init__(self, config={}): + if self.map_file_key in config: +diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py +index 71e13d3f..daa76a62 100644 +--- a/sos/cleaner/parsers/hostname_parser.py ++++ b/sos/cleaner/parsers/hostname_parser.py +@@ -16,7 +16,6 @@ class SoSHostnameParser(SoSCleanerParser): + + name = 'Hostname Parser' + map_file_key = 'hostname_map' +- prep_map_file = 'sos_commands/host/hostname' + regex_patterns = [ + r'(((\b|_)[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}(\b|_)))' + ] +diff --git a/sos/cleaner/parsers/ip_parser.py b/sos/cleaner/parsers/ip_parser.py +index 525139e8..71d38be8 100644 +--- a/sos/cleaner/parsers/ip_parser.py ++++ b/sos/cleaner/parsers/ip_parser.py +@@ -41,7 +41,6 @@ class SoSIPParser(SoSCleanerParser): + ] + + map_file_key = 'ip_map' +- prep_map_file = 'sos_commands/networking/ip_-o_addr' + + def __init__(self, config): + self.mapping = SoSIPMap() +diff --git a/sos/cleaner/parsers/keyword_parser.py b/sos/cleaner/parsers/keyword_parser.py +index 68de3727..694c6073 100644 +--- a/sos/cleaner/parsers/keyword_parser.py ++++ b/sos/cleaner/parsers/keyword_parser.py +@@ -20,7 +20,6 @@ class SoSKeywordParser(SoSCleanerParser): + + name = 'Keyword Parser' + map_file_key = 'keyword_map' +- prep_map_file = '' + + def __init__(self, config, keywords=None, keyword_file=None): + self.mapping = SoSKeywordMap() +diff --git a/sos/cleaner/parsers/mac_parser.py b/sos/cleaner/parsers/mac_parser.py +index 7ca80b8d..c74288cf 100644 +--- a/sos/cleaner/parsers/mac_parser.py ++++ b/sos/cleaner/parsers/mac_parser.py +@@ -30,7 +30,6 @@ class SoSMacParser(SoSCleanerParser): + '534f:53' + ) + map_file_key = 'mac_map' +- prep_map_file = 'sos_commands/networking/ip_-d_address' + + def __init__(self, config): + self.mapping = SoSMacMap() +diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py +index b142e371..35377a31 100644 +--- a/sos/cleaner/parsers/username_parser.py ++++ b/sos/cleaner/parsers/username_parser.py +@@ -25,14 +25,6 @@ class SoSUsernameParser(SoSCleanerParser): + + name = 'Username Parser' + map_file_key = 'username_map' +- prep_map_file = [ +- 'sos_commands/login/lastlog_-u_1000-60000', +- 'sos_commands/login/lastlog_-u_60001-65536', +- 'sos_commands/login/lastlog_-u_65537-4294967295', +- # AD users will be reported here, but favor the lastlog files since +- # those will include local users who have not logged in +- 'sos_commands/login/last' +- ] + regex_patterns = [] + skip_list = [ + 'core', +diff --git a/tests/cleaner_tests/existing_archive.py b/tests/cleaner_tests/existing_archive.py +index 0eaf6c8d..e13d1cae 100644 +--- a/tests/cleaner_tests/existing_archive.py ++++ b/tests/cleaner_tests/existing_archive.py +@@ -28,6 +28,13 @@ class ExistingArchiveCleanTest(StageTwoReportTest): + def test_obfuscation_log_created(self): + self.assertFileExists(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE)) + ++ def test_archive_type_correct(self): ++ with open(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE), 'r') as log: ++ for line in log: ++ if "Loaded %s" % ARCHIVE in line: ++ assert 'as type sos report archive' in line, "Incorrect archive type detected: %s" % line ++ break ++ + def test_from_cmdline_logged(self): + with open(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE), 'r') as log: + for line in log: +diff --git a/tests/cleaner_tests/full_report_run.py b/tests/cleaner_tests/full_report_run.py +index 3b28e7a2..2de54946 100644 +--- a/tests/cleaner_tests/full_report_run.py ++++ b/tests/cleaner_tests/full_report_run.py +@@ -35,6 +35,9 @@ class FullCleanTest(StageTwoReportTest): + def test_tarball_named_obfuscated(self): + self.assertTrue('obfuscated' in self.archive) + ++ def test_archive_type_correct(self): ++ self.assertSosLogContains('Loaded .* as type sos report directory') ++ + def test_hostname_not_in_any_file(self): + host = self.sysinfo['pre']['networking']['hostname'] + # much faster to just use grep here +diff --git a/tests/cleaner_tests/report_with_mask.py b/tests/cleaner_tests/report_with_mask.py +index 4f94ba33..08e873d4 100644 +--- a/tests/cleaner_tests/report_with_mask.py ++++ b/tests/cleaner_tests/report_with_mask.py +@@ -31,6 +31,9 @@ class ReportWithMask(StageOneReportTest): + def test_tarball_named_obfuscated(self): + self.assertTrue('obfuscated' in self.archive) + ++ def test_archive_type_correct(self): ++ self.assertSosLogContains('Loaded .* as type sos report directory') ++ + def test_localhost_was_obfuscated(self): + self.assertFileHasContent('/etc/hostname', 'host0') + +-- +2.31.1 + +From 9b119f860eaec089f7ef884ff39c42589a662994 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 1 Sep 2021 00:34:04 -0400 +Subject: [PATCH] [hostname_map] Add a catch for single-character hostnames + +If a log file was truncated at a specific boundary in a string of the +FQDN of the host such that we only get a couple characters before the +rest of the domain, we would previously bodly replace all instances of +that character with the obfuscated short name; not very helpful. + +Instead, don't sanitize the short name if this happens and instead +obfuscate the whole FQDN as 'unknown.example.com'. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/hostname_map.py | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py +index d4b2c88e..e70a5530 100644 +--- a/sos/cleaner/mappings/hostname_map.py ++++ b/sos/cleaner/mappings/hostname_map.py +@@ -184,7 +184,14 @@ class SoSHostnameMap(SoSMap): + hostname = host[0] + domain = host[1:] + # obfuscate the short name +- ob_hostname = self.sanitize_short_name(hostname) ++ if len(hostname) > 2: ++ ob_hostname = self.sanitize_short_name(hostname) ++ else: ++ # by best practice it appears the host part of the fqdn was cut ++ # off due to some form of truncating, as such don't obfuscate ++ # short strings that are likely to throw off obfuscation of ++ # unrelated bits and paths ++ ob_hostname = 'unknown' + ob_domain = self.sanitize_domain(domain) + self.dataset[item] = ob_domain + return '.'.join([ob_hostname, ob_domain]) +-- +2.31.1 + +From f3f3e763d7c31b7b7cafdf8dd4dab87056fb7696 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 1 Sep 2021 15:54:55 -0400 +Subject: [PATCH] [cleaner] Add support for Insights client archives + +Adds a new type of `SoSObfuscationArchive` to add support for +obfuscating archives generated by the Insights project. + +Signed-off-by: Jake Hunsaker +--- + man/en/sos-clean.1 | 1 + + sos/cleaner/__init__.py | 4 ++- + sos/cleaner/archives/insights.py | 42 ++++++++++++++++++++++++++++++++ + 3 files changed, 46 insertions(+), 1 deletion(-) + create mode 100644 sos/cleaner/archives/insights.py + +diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1 +index 54026713..358ec0cb 100644 +--- a/man/en/sos-clean.1 ++++ b/man/en/sos-clean.1 +@@ -105,6 +105,7 @@ The following are accepted values for this option: + \fBauto\fR Automatically detect the archive type + \fBreport\fR An archive generated by \fBsos report\fR + \fBcollect\fR An archive generated by \fBsos collect\fR ++ \fBinsights\fR An archive generated by the \fBinsights-client\fR package + + The following may also be used, however note that these do not attempt to pre-load + any information from the archives into the parsers. This means that, among other limitations, +diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py +index 6d2eb483..3e08aa28 100644 +--- a/sos/cleaner/__init__.py ++++ b/sos/cleaner/__init__.py +@@ -29,6 +29,7 @@ from sos.cleaner.archives.sos import (SoSReportArchive, SoSReportDirectory, + SoSCollectorArchive, + SoSCollectorDirectory) + from sos.cleaner.archives.generic import DataDirArchive, TarballArchive ++from sos.cleaner.archives.insights import InsightsArchive + from sos.utilities import get_human_readable + from textwrap import fill + +@@ -100,6 +101,7 @@ class SoSCleaner(SoSComponent): + SoSReportArchive, + SoSCollectorDirectory, + SoSCollectorArchive, ++ InsightsArchive, + # make sure these two are always last as they are fallbacks + DataDirArchive, + TarballArchive +@@ -194,7 +196,7 @@ third party. + help='The directory or archive to obfuscate') + clean_grp.add_argument('--archive-type', default='auto', + choices=['auto', 'report', 'collect', +- 'data-dir', 'tarball'], ++ 'insights', 'data-dir', 'tarball'], + help=('Specify what kind of archive the target ' + 'was generated as')) + clean_grp.add_argument('--domains', action='extend', default=[], +diff --git a/sos/cleaner/archives/insights.py b/sos/cleaner/archives/insights.py +new file mode 100644 +index 00000000..dab48b16 +--- /dev/null ++++ b/sos/cleaner/archives/insights.py +@@ -0,0 +1,42 @@ ++# Copyright 2021 Red Hat, Inc. Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++ ++from sos.cleaner.archives import SoSObfuscationArchive ++ ++import tarfile ++ ++ ++class InsightsArchive(SoSObfuscationArchive): ++ """This class represents archives generated by the insights-client utility ++ for RHEL systems. ++ """ ++ ++ type_name = 'insights' ++ description = 'insights-client archive' ++ ++ prep_files = { ++ 'hostname': 'data/insights_commands/hostname_-f', ++ 'ip': 'data/insights_commands/ip_addr', ++ 'mac': 'data/insights_commands/ip_addr' ++ } ++ ++ @classmethod ++ def check_is_type(cls, arc_path): ++ try: ++ return tarfile.is_tarfile(arc_path) and 'insights-' in arc_path ++ except Exception: ++ return False ++ ++ def get_archive_root(self): ++ top = self.archive_path.split('/')[-1].split('.tar')[0] ++ if self.tarobj.firstmember.name == '.': ++ top = './' + top ++ return top +-- +2.31.1 + +From 9639dc3d240076b55f2a1d04b43ea42bebd09215 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 16 Nov 2021 17:50:42 -0500 +Subject: [PATCH] [clean,hostname_parser] Source /etc/hosts for obfuscation + +Up until now, our sourcing of hostnames/domains for obfuscation has been +dependent upon the output of the `hostname` command. However, some +scenarios have come up where sourcing `/etc/hosts` is advantageous for +several reasons: + +First, if `hostname` output is unavailable, this provides a fallback +measure. + +Second, `/etc/hosts` is a common place to have short names defined which +would otherwise not be detected (or at the very least would result in a +race condition based on where/if the short name was elsewhere able to be +gleaned from an FQDN), thus leaving the potential for unobfuscated data +in an archive. + +Due to both the nature of hostname obfuscation and the malleable syntax +of `/etc/hosts`, the parsing of this file needs special handling not +covered by our more generic parsing and obfuscation methods. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/__init__.py | 11 ++++++++--- + sos/cleaner/archives/sos.py | 5 ++++- + sos/cleaner/parsers/hostname_parser.py | 19 +++++++++++++++++++ + 3 files changed, 31 insertions(+), 4 deletions(-) + +diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py +index ed461a8f..3f530d44 100644 +--- a/sos/cleaner/__init__.py ++++ b/sos/cleaner/__init__.py +@@ -523,9 +523,14 @@ third party. + if isinstance(_parser, SoSUsernameParser): + _parser.load_usernames_into_map(content) + elif isinstance(_parser, SoSHostnameParser): +- _parser.load_hostname_into_map( +- content.splitlines()[0] +- ) ++ if 'hostname' in parse_file: ++ _parser.load_hostname_into_map( ++ content.splitlines()[0] ++ ) ++ elif 'etc/hosts' in parse_file: ++ _parser.load_hostname_from_etc_hosts( ++ content ++ ) + else: + for line in content.splitlines(): + self.obfuscate_line(line) +diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py +index 4401d710..f8720c88 100644 +--- a/sos/cleaner/archives/sos.py ++++ b/sos/cleaner/archives/sos.py +@@ -23,7 +23,10 @@ class SoSReportArchive(SoSObfuscationArchive): + type_name = 'report' + description = 'sos report archive' + prep_files = { +- 'hostname': 'sos_commands/host/hostname', ++ 'hostname': [ ++ 'sos_commands/host/hostname', ++ 'etc/hosts' ++ ], + 'ip': 'sos_commands/networking/ip_-o_addr', + 'mac': 'sos_commands/networking/ip_-d_address', + 'username': [ +diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py +index daa76a62..0a733bee 100644 +--- a/sos/cleaner/parsers/hostname_parser.py ++++ b/sos/cleaner/parsers/hostname_parser.py +@@ -61,6 +61,25 @@ class SoSHostnameParser(SoSCleanerParser): + self.mapping.add(high_domain) + self.mapping.add(hostname_string) + ++ def load_hostname_from_etc_hosts(self, content): ++ """Parse an archive's copy of /etc/hosts, which requires handling that ++ is separate from the output of the `hostname` command. Just like ++ load_hostname_into_map(), this has to be done explicitly and we ++ cannot rely upon the more generic methods to do this reliably. ++ """ ++ lines = content.splitlines() ++ for line in lines: ++ if line.startswith('#') or 'localhost' in line: ++ continue ++ hostln = line.split()[1:] ++ for host in hostln: ++ if len(host.split('.')) == 1: ++ # only generate a mapping for fqdns but still record the ++ # short name here for later obfuscation with parse_line() ++ self.short_names.append(host) ++ else: ++ self.mapping.add(host) ++ + def parse_line(self, line): + """Override the default parse_line() method to also check for the + shortname of the host derived from the hostname. +-- +2.31.1 + +From c1680226b53452b18f27f2e76c3e0e03e521f935 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 17 Nov 2021 13:11:33 -0500 +Subject: [PATCH] [clean, hostname] Fix unintentionally case sensitive + shortname handling + +It was discovered that our extra handling for shortnames was +unintentionally case sensitive. Fix this to ensure that shortnames are +obfuscated regardless of case in all collected text. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/hostname_map.py | 6 +++--- + sos/cleaner/parsers/hostname_parser.py | 8 +++++--- + tests/cleaner_tests/full_report_run.py | 21 ++++++++++++++++++++- + 3 files changed, 28 insertions(+), 7 deletions(-) + +diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py +index e70a5530..0fe78fb1 100644 +--- a/sos/cleaner/mappings/hostname_map.py ++++ b/sos/cleaner/mappings/hostname_map.py +@@ -169,13 +169,13 @@ class SoSHostnameMap(SoSMap): + + def sanitize_item(self, item): + host = item.split('.') +- if all([h.isupper() for h in host]): ++ if len(host) > 1 and all([h.isupper() for h in host]): + # by convention we have just a domain + _host = [h.lower() for h in host] + return self.sanitize_domain(_host).upper() + if len(host) == 1: + # we have a shortname for a host +- return self.sanitize_short_name(host[0]) ++ return self.sanitize_short_name(host[0].lower()) + if len(host) == 2: + # we have just a domain name, e.g. example.com + return self.sanitize_domain(host) +@@ -185,7 +185,7 @@ class SoSHostnameMap(SoSMap): + domain = host[1:] + # obfuscate the short name + if len(hostname) > 2: +- ob_hostname = self.sanitize_short_name(hostname) ++ ob_hostname = self.sanitize_short_name(hostname.lower()) + else: + # by best practice it appears the host part of the fqdn was cut + # off due to some form of truncating, as such don't obfuscate +diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py +index 0a733bee..7fd0e698 100644 +--- a/sos/cleaner/parsers/hostname_parser.py ++++ b/sos/cleaner/parsers/hostname_parser.py +@@ -8,6 +8,8 @@ + # + # See the LICENSE file in the source distribution for further information. + ++import re ++ + from sos.cleaner.parsers import SoSCleanerParser + from sos.cleaner.mappings.hostname_map import SoSHostnameMap + +@@ -91,9 +93,9 @@ class SoSHostnameParser(SoSCleanerParser): + """ + if search in self.mapping.skip_keys: + return ln, count +- if search in ln: +- count += ln.count(search) +- ln = ln.replace(search, self.mapping.get(repl or search)) ++ _reg = re.compile(search, re.I) ++ if _reg.search(ln): ++ return _reg.subn(self.mapping.get(repl or search), ln) + return ln, count + + count = 0 +diff --git a/tests/cleaner_tests/full_report_run.py b/tests/cleaner_tests/full_report_run.py +index 2de54946..0b23acaf 100644 +--- a/tests/cleaner_tests/full_report_run.py ++++ b/tests/cleaner_tests/full_report_run.py +@@ -26,6 +26,24 @@ class FullCleanTest(StageTwoReportTest): + # replace with an empty placeholder, make sure that this test case is not + # influenced by previous clean runs + files = ['/etc/sos/cleaner/default_mapping'] ++ packages = { ++ 'rhel': ['python3-systemd'], ++ 'ubuntu': ['python3-systemd'] ++ } ++ ++ def pre_sos_setup(self): ++ # ensure that case-insensitive matching of FQDNs and shortnames work ++ from systemd import journal ++ from socket import gethostname ++ host = gethostname() ++ short = host.split('.')[0] ++ sosfd = journal.stream('sos-testing') ++ sosfd.write( ++ "This is a test line from sos clean testing. The hostname %s " ++ "should not appear, nor should %s in an obfuscated archive. The " ++ "shortnames of %s and %s should also not appear." ++ % (host.lower(), host.upper(), short.lower(), short.upper()) ++ ) + + def test_private_map_was_generated(self): + self.assertOutputContains('A mapping of obfuscated elements is available at') +@@ -40,8 +58,9 @@ class FullCleanTest(StageTwoReportTest): + + def test_hostname_not_in_any_file(self): + host = self.sysinfo['pre']['networking']['hostname'] ++ short = host.split('.')[0] + # much faster to just use grep here +- content = self.grep_for_content(host) ++ content = self.grep_for_content(host) + self.grep_for_content(short) + if not content: + assert True + else: +-- +2.31.1 + +From aaeb8cb57ed55598ab744b96d4f127aedebcb292 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 21 Sep 2021 15:23:20 -0400 +Subject: [PATCH] [build] Add archives to setup.py packages + +Adds the newly abstracted `sos.cleaner.archives` package to `setup.py` +so that manual builds will properly include it. + +Signed-off-by: Jake Hunsaker +--- + setup.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/setup.py b/setup.py +index 1e8d8e2dc5..7653b59de3 100644 +--- a/setup.py ++++ b/setup.py +@@ -102,7 +102,7 @@ def copy_file (self, filename, dirname): + 'sos.policies.package_managers', 'sos.policies.init_systems', + 'sos.report', 'sos.report.plugins', 'sos.collector', + 'sos.collector.clusters', 'sos.cleaner', 'sos.cleaner.mappings', +- 'sos.cleaner.parsers' ++ 'sos.cleaner.parsers', 'sos.cleaner.archives' + ], + cmdclass=cmdclass, + command_options=command_options, +-- +2.31.1 + +From ba3528230256429a4394f155a9ca1fdb91cf3560 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 30 Nov 2021 12:46:34 -0500 +Subject: [PATCH 1/2] [hostname] Simplify case matching for domains + +Instead of special handling all uppercase domain conventions, use our +normal flow for obfuscation and just match the casing at the end of the +sanitization routine. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/hostname_map.py | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py +index 0fe78fb1..5cd8e985 100644 +--- a/sos/cleaner/mappings/hostname_map.py ++++ b/sos/cleaner/mappings/hostname_map.py +@@ -169,16 +169,15 @@ class SoSHostnameMap(SoSMap): + + def sanitize_item(self, item): + host = item.split('.') +- if len(host) > 1 and all([h.isupper() for h in host]): +- # by convention we have just a domain +- _host = [h.lower() for h in host] +- return self.sanitize_domain(_host).upper() + if len(host) == 1: + # we have a shortname for a host + return self.sanitize_short_name(host[0].lower()) + if len(host) == 2: + # we have just a domain name, e.g. example.com +- return self.sanitize_domain(host) ++ dname = self.sanitize_domain(host) ++ if all([h.isupper() for h in host]): ++ dname = dname.upper() ++ return dname + if len(host) > 2: + # we have an FQDN, e.g. foo.example.com + hostname = host[0] +@@ -194,7 +193,10 @@ class SoSHostnameMap(SoSMap): + ob_hostname = 'unknown' + ob_domain = self.sanitize_domain(domain) + self.dataset[item] = ob_domain +- return '.'.join([ob_hostname, ob_domain]) ++ _fqdn = '.'.join([ob_hostname, ob_domain]) ++ if all([h.isupper() for h in host]): ++ _fqdn = _fqdn.upper() ++ return _fqdn + + def sanitize_short_name(self, hostname): + """Obfuscate the short name of the host with an incremented counter +-- +2.31.1 + + +From 189586728de22dd55122c1f7e06b19590f9a788f Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 30 Nov 2021 12:47:58 -0500 +Subject: [PATCH 2/2] [username] Improve username sourcing and remove case + sensitivity + +First, don't skip the first line of `last` output, and instead add the +header from lastlog to the skip list. Additionally, add +`/etc/cron.allow` and `/etc/cron.deny` as sources for usernames that +might not appear in other locations in certain environments. + +Also, make matching and replacement case insensitive. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/archives/sos.py | 4 +++- + sos/cleaner/mappings/username_map.py | 2 +- + sos/cleaner/parsers/username_parser.py | 14 +++++++++----- + 3 files changed, 13 insertions(+), 7 deletions(-) + +diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py +index f8720c88..12766496 100644 +--- a/sos/cleaner/archives/sos.py ++++ b/sos/cleaner/archives/sos.py +@@ -35,7 +35,9 @@ class SoSReportArchive(SoSObfuscationArchive): + 'sos_commands/login/lastlog_-u_65537-4294967295', + # AD users will be reported here, but favor the lastlog files since + # those will include local users who have not logged in +- 'sos_commands/login/last' ++ 'sos_commands/login/last', ++ 'etc/cron.allow', ++ 'etc/cron.deny' + ] + } + +diff --git a/sos/cleaner/mappings/username_map.py b/sos/cleaner/mappings/username_map.py +index cdbf36fe..7ecccd7b 100644 +--- a/sos/cleaner/mappings/username_map.py ++++ b/sos/cleaner/mappings/username_map.py +@@ -33,5 +33,5 @@ class SoSUsernameMap(SoSMap): + ob_name = "obfuscateduser%s" % self.name_count + self.name_count += 1 + if ob_name in self.dataset.values(): +- return self.sanitize_item(username) ++ return self.sanitize_item(username.lower()) + return ob_name +diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py +index 35377a31..229c7de4 100644 +--- a/sos/cleaner/parsers/username_parser.py ++++ b/sos/cleaner/parsers/username_parser.py +@@ -8,6 +8,7 @@ + # + # See the LICENSE file in the source distribution for further information. + ++import re + + from sos.cleaner.parsers import SoSCleanerParser + from sos.cleaner.mappings.username_map import SoSUsernameMap +@@ -34,6 +35,7 @@ class SoSUsernameParser(SoSCleanerParser): + 'reboot', + 'root', + 'ubuntu', ++ 'username', + 'wtmp' + ] + +@@ -47,12 +49,12 @@ class SoSUsernameParser(SoSCleanerParser): + this parser, we need to override the initial parser prepping here. + """ + users = set() +- for line in content.splitlines()[1:]: ++ for line in content.splitlines(): + try: + user = line.split()[0] + except Exception: + continue +- if user in self.skip_list: ++ if user.lower() in self.skip_list: + continue + users.add(user) + for each in users: +@@ -61,7 +63,9 @@ class SoSUsernameParser(SoSCleanerParser): + def parse_line(self, line): + count = 0 + for username in sorted(self.mapping.dataset.keys(), reverse=True): +- if username in line: +- count = line.count(username) +- line = line.replace(username, self.mapping.get(username)) ++ _reg = re.compile(username, re.I) ++ if _reg.search(line): ++ line, count = _reg.subn( ++ self.mapping.get(username.lower()), line ++ ) + return line, count +-- +2.31.1 + +From cafd0f3a52436a3966576e7db21e5dd17c06f0cc Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Sun, 12 Dec 2021 11:10:46 -0500 +Subject: [PATCH] [hostname] Fix edge case for new hosts in a known subdomain + +Fixes an edge case that would cause us to at first not recognize that a +given hostname string is a new host in a known subdomain, but then on +the obfuscation attempt properly recognize it as such and result in an +incomplete obfuscation. + +This was mostly triggered by specific patterns for build hosts within +`sos_commands/rpm/package-data`. With this refined check, these types of +matches are properly obfuscated. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/hostname_map.py | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py +index 5cd8e9857..33b0e6c80 100644 +--- a/sos/cleaner/mappings/hostname_map.py ++++ b/sos/cleaner/mappings/hostname_map.py +@@ -129,7 +129,7 @@ def get(self, item): + item = item[0:-1] + if not self.domain_name_in_loaded_domains(item.lower()): + return item +- if item.endswith(('.yaml', '.yml', '.crt', '.key', '.pem')): ++ if item.endswith(('.yaml', '.yml', '.crt', '.key', '.pem', '.log')): + ext = '.' + item.split('.')[-1] + item = item.replace(ext, '') + suffix += ext +@@ -148,7 +148,8 @@ def get(self, item): + if len(_test) == 1 or not _test[0]: + # does not match existing obfuscation + continue +- elif _test[0].endswith('.') and not _host_substr: ++ elif not _host_substr and (_test[0].endswith('.') or ++ item.endswith(_existing)): + # new hostname in known domain + final = super(SoSHostnameMap, self).get(item) + break +@@ -219,8 +220,8 @@ def sanitize_domain(self, domain): + # don't obfuscate vendor domains + if re.match(_skip, '.'.join(domain)): + return '.'.join(domain) +- top_domain = domain[-1] +- dname = '.'.join(domain[0:-1]) ++ top_domain = domain[-1].lower() ++ dname = '.'.join(domain[0:-1]).lower() + ob_domain = self._new_obfuscated_domain(dname) + ob_domain = '.'.join([ob_domain, top_domain]) + self.dataset['.'.join(domain)] = ob_domain +From f5e1298162a9393ea2d9f5c4df40dfece50f5f88 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 6 Jan 2022 13:15:15 -0500 +Subject: [PATCH 1/3] [hostname] Fix loading and detection of long base domains + +Our domain matching has up to now assumed that users would be providing +'base' domains such as 'example.com' whereby something like +'foo.bar.example.com' is a subdomain (or host) within that base domain. + +However, the use case exists to provide 'foo.bar.example.com' as the +base domain, without wanting to obfuscate 'example.com' directly. + +This commit fixes our handling of both loading these longer domains and +doing the 'domain is part of a domain we want to obfuscate' check. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/hostname_map.py | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py +index 33b0e6c8..7a7cf6b8 100644 +--- a/sos/cleaner/mappings/hostname_map.py ++++ b/sos/cleaner/mappings/hostname_map.py +@@ -50,10 +50,14 @@ class SoSHostnameMap(SoSMap): + in this parser, we need to re-inject entries from the map_file into + these dicts and not just the underlying 'dataset' dict + """ +- for domain in self.dataset: ++ for domain, ob_pair in self.dataset.items(): + if len(domain.split('.')) == 1: + self.hosts[domain.split('.')[0]] = self.dataset[domain] + else: ++ if ob_pair.startswith('obfuscateddomain'): ++ # directly exact domain matches ++ self._domains[domain] = ob_pair.split('.')[0] ++ continue + # strip the host name and trailing top-level domain so that + # we in inject the domain properly for later string matching + +@@ -102,9 +106,12 @@ class SoSHostnameMap(SoSMap): + and should be obfuscated + """ + host = domain.split('.') ++ no_tld = '.'.join(domain.split('.')[0:-1]) + if len(host) == 1: + # don't block on host's shortname + return host[0] in self.hosts.keys() ++ elif any([no_tld.endswith(_d) for _d in self._domains]): ++ return True + else: + domain = host[0:-1] + for known_domain in self._domains: +-- +2.31.1 + + +From e241cf33a14ecd4e848a5fd857c5d3d7d07fbd71 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 6 Jan 2022 13:18:44 -0500 +Subject: [PATCH 2/3] [cleaner] Improve parser-specific file skipping + +This commit improves our handling of skipping files on a per-parser +basis, by first filtering the list of parsers that `obfuscate_line()` +will iterate over by the parser's `skip_file` class attr, rather than +relying on higher-level checks. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/__init__.py | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py +index 3f530d44..5686e213 100644 +--- a/sos/cleaner/__init__.py ++++ b/sos/cleaner/__init__.py +@@ -12,6 +12,7 @@ import hashlib + import json + import logging + import os ++import re + import shutil + import tempfile + +@@ -640,10 +641,16 @@ third party. + self.log_debug("Obfuscating %s" % short_name or filename, + caller=arc_name) + tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) ++ _parsers = [ ++ _p for _p in self.parsers if not ++ any([ ++ re.match(p, short_name) for p in _p.skip_files ++ ]) ++ ] + with open(filename, 'r') as fname: + for line in fname: + try: +- line, count = self.obfuscate_line(line) ++ line, count = self.obfuscate_line(line, _parsers) + subs += count + tfile.write(line) + except Exception as err: +@@ -713,7 +720,7 @@ third party. + pass + return string_data + +- def obfuscate_line(self, line): ++ def obfuscate_line(self, line, parsers=None): + """Run a line through each of the obfuscation parsers, keeping a + cumulative total of substitutions done on that particular line. + +@@ -721,6 +728,8 @@ third party. + + :param line str: The raw line as read from the file being + processed ++ :param parsers: A list of parser objects to obfuscate ++ with. If None, use all. + + Returns the fully obfuscated line and the number of substitutions made + """ +@@ -729,7 +738,9 @@ third party. + count = 0 + if not line.strip(): + return line, count +- for parser in self.parsers: ++ if parsers is None: ++ parsers = self.parsers ++ for parser in parsers: + try: + line, _count = parser.parse_line(line) + count += _count +-- +2.31.1 + + +From 96c9a833e77639a853b7d3d6f1df68bbbbe5e9cb Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 6 Jan 2022 13:20:32 -0500 +Subject: [PATCH 3/3] [cleaner] Add skips for known files and usernames + +Adds skips for `/proc/kallsyms` which should never be obfuscated, as +well as any packaging-related log file for the IP parser. Further, do +not obfuscate the `stack` users, as that is a well-known user for many +configurations that, if obfuscated, could result in undesired string +substitutions in normal logging. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/archives/__init__.py | 2 ++ + sos/cleaner/parsers/ip_parser.py | 3 ++- + sos/cleaner/parsers/username_parser.py | 1 + + 3 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/sos/cleaner/archives/__init__.py b/sos/cleaner/archives/__init__.py +index 795c5a78..cbf1f809 100644 +--- a/sos/cleaner/archives/__init__.py ++++ b/sos/cleaner/archives/__init__.py +@@ -43,6 +43,7 @@ class SoSObfuscationArchive(): + type_name = 'undetermined' + description = 'undetermined' + is_nested = False ++ skip_files = [] + prep_files = {} + + def __init__(self, archive_path, tmpdir): +@@ -111,6 +112,7 @@ class SoSObfuscationArchive(): + Returns: list of files and file regexes + """ + return [ ++ 'proc/kallsyms', + 'sosreport-', + 'sys/firmware', + 'sys/fs', +diff --git a/sos/cleaner/parsers/ip_parser.py b/sos/cleaner/parsers/ip_parser.py +index 71d38be8..b007368c 100644 +--- a/sos/cleaner/parsers/ip_parser.py ++++ b/sos/cleaner/parsers/ip_parser.py +@@ -37,7 +37,8 @@ class SoSIPParser(SoSCleanerParser): + 'sos_commands/snappy/snap_list_--all', + 'sos_commands/snappy/snap_--version', + 'sos_commands/vulkan/vulkaninfo', +- 'var/log/.*dnf.*' ++ 'var/log/.*dnf.*', ++ 'var/log/.*packag.*' # get 'packages' and 'packaging' logs + ] + + map_file_key = 'ip_map' +diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py +index 229c7de4..3208a655 100644 +--- a/sos/cleaner/parsers/username_parser.py ++++ b/sos/cleaner/parsers/username_parser.py +@@ -32,6 +32,7 @@ class SoSUsernameParser(SoSCleanerParser): + 'nobody', + 'nfsnobody', + 'shutdown', ++ 'stack', + 'reboot', + 'root', + 'ubuntu', +-- +2.31.1 + +From 7ebb2ce0bcd13c1b3aada648aceb20b5aff636d9 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 15 Feb 2022 14:18:02 -0500 +Subject: [PATCH] [host] Skip entire /etc/sos/cleaner directory + +While `default_mapping` is typically the only file expected under +`/etc/sos/cleaner/` it is possible for other mapping files (such as +backups) to appear there. + +Make the `add_forbidden_path()` spec here target the entire cleaner +directory to avoid ever capturing these map files. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/host.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/report/plugins/host.py b/sos/report/plugins/host.py +index 5e21da7b8e..95a3b9cd95 100644 +--- a/sos/report/plugins/host.py ++++ b/sos/report/plugins/host.py +@@ -20,7 +20,7 @@ class Host(Plugin, IndependentPlugin): + + def setup(self): + +- self.add_forbidden_path('/etc/sos/cleaner/default_mapping') ++ self.add_forbidden_path('/etc/sos/cleaner') + + self.add_cmd_output('hostname', root_symlink='hostname') + self.add_cmd_output('uptime', root_symlink='uptime') diff --git a/SOURCES/sos-bz2025403-nvidia-GPU-info.patch b/SOURCES/sos-bz2025403-nvidia-GPU-info.patch new file mode 100644 index 0000000..30fbb53 --- /dev/null +++ b/SOURCES/sos-bz2025403-nvidia-GPU-info.patch @@ -0,0 +1,46 @@ +From f2cc67750f55a71edff0c527a1bfc14fde8132c3 Mon Sep 17 00:00:00 2001 +From: Mamatha Inamdar +Date: Mon, 8 Nov 2021 10:50:03 +0530 +Subject: [PATCH] [nvidia]:Patch to update nvidia plugin for GPU info + +This patch is to update nvidia plugin to collect +logs for Nvidia GPUs + +Signed-off-by: Mamatha Inamdar +Reported-by: Borislav Stoymirski +Reported-by: Yesenia Jimenez +--- + sos/report/plugins/nvidia.py | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/nvidia.py b/sos/report/plugins/nvidia.py +index 09aaf586b..9e21b478e 100644 +--- a/sos/report/plugins/nvidia.py ++++ b/sos/report/plugins/nvidia.py +@@ -23,13 +23,24 @@ def setup(self): + '--list-gpus', + '-q -d PERFORMANCE', + '-q -d SUPPORTED_CLOCKS', +- '-q -d PAGE_RETIREMENT' ++ '-q -d PAGE_RETIREMENT', ++ '-q', ++ '-q -d ECC', ++ 'nvlink -s', ++ 'nvlink -e' + ] + + self.add_cmd_output(["nvidia-smi %s" % cmd for cmd in subcmds]) + + query = ('gpu_name,gpu_bus_id,vbios_version,temperature.gpu,' +- 'utilization.gpu,memory.total,memory.free,memory.used') ++ 'utilization.gpu,memory.total,memory.free,memory.used,' ++ 'clocks.applications.graphics,clocks.applications.memory') ++ querypages = ('timestamp,gpu_bus_id,gpu_serial,gpu_uuid,' ++ 'retired_pages.address,retired_pages.cause') + self.add_cmd_output("nvidia-smi --query-gpu=%s --format=csv" % query) ++ self.add_cmd_output( ++ "nvidia-smi --query-retired-pages=%s --format=csv" % querypages ++ ) ++ self.add_journal(boot=0, identifier='nvidia-persistenced') + + # vim: set et ts=4 sw=4 : diff --git a/SOURCES/sos-bz2025610-RHTS-api-change.patch b/SOURCES/sos-bz2025610-RHTS-api-change.patch new file mode 100644 index 0000000..580117f --- /dev/null +++ b/SOURCES/sos-bz2025610-RHTS-api-change.patch @@ -0,0 +1,224 @@ +From 2e8b5e2d4f30854cce93d149fc7d24b9d9cfd02c Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 19 Nov 2021 16:16:07 +0100 +Subject: [PATCH 1/3] [policies] strip path from SFTP upload filename + +When case_id is not supplied, we ask SFTP server to store the uploaded +file under name /var/tmp/, which is confusing. + +Let remove the path from it also in case_id not supplied. + +Related to: #2764 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/redhat.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index 3476e21fb..8817fc785 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -269,10 +269,10 @@ def _get_sftp_upload_name(self): + """The RH SFTP server will only automatically connect file uploads to + cases if the filename _starts_ with the case number + """ ++ fname = self.upload_archive_name.split('/')[-1] + if self.case_id: +- return "%s_%s" % (self.case_id, +- self.upload_archive_name.split('/')[-1]) +- return self.upload_archive_name ++ return "%s_%s" % (self.case_id, fname) ++ return fname + + def upload_sftp(self): + """Override the base upload_sftp to allow for setting an on-demand + +From 61023b29a656dd7afaa4a0643368b0a53f1a3779 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 19 Nov 2021 17:31:31 +0100 +Subject: [PATCH 2/3] [redhat] update SFTP API version to v2 + +Change API version from v1 to v2, which includes: +- change of URL +- different URI +- POST method for token generation instead of GET + +Resolves: #2764 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/redhat.py | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index 8817fc785..e4e2b8835 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -175,7 +175,7 @@ def get_tmp_dir(self, opt_tmp_dir): + No changes will be made to system configuration. + """ + +-RH_API_HOST = "https://access.redhat.com" ++RH_API_HOST = "https://api.access.redhat.com" + RH_SFTP_HOST = "sftp://sftp.access.redhat.com" + + +@@ -287,12 +287,12 @@ def upload_sftp(self): + " for obtaining SFTP auth token.") + _token = None + _user = None ++ url = RH_API_HOST + '/support/v2/sftp/token' + # we have a username and password, but we need to reset the password + # to be the token returned from the auth endpoint + if self.get_upload_user() and self.get_upload_password(): +- url = RH_API_HOST + '/hydra/rest/v1/sftp/token' + auth = self.get_upload_https_auth() +- ret = requests.get(url, auth=auth, timeout=10) ++ ret = requests.post(url, auth=auth, timeout=10) + if ret.status_code == 200: + # credentials are valid + _user = self.get_upload_user() +@@ -302,8 +302,8 @@ def upload_sftp(self): + "credentials. Will try anonymous.") + # we either do not have a username or password/token, or both + if not _token: +- aurl = RH_API_HOST + '/hydra/rest/v1/sftp/token?isAnonymous=true' +- anon = requests.get(aurl, timeout=10) ++ adata = {"isAnonymous": True} ++ anon = requests.post(url, data=json.dumps(adata), timeout=10) + if anon.status_code == 200: + resp = json.loads(anon.text) + _user = resp['username'] + +From 267da2156ec61f526dd28e760ff6528408a76c3f Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 22 Nov 2021 15:22:32 +0100 +Subject: [PATCH 3/3] [policies] Deal 200 return code as success + +Return code 200 of POST method request must be dealt as success. + +Newly required due to the SFTP API change using POST. + +Related to: #2764 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index 0906fa779..6f257fdce 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -551,7 +551,7 @@ def upload_https(self): + r = self._upload_https_put(arc, verify) + else: + r = self._upload_https_post(arc, verify) +- if r.status_code != 201: ++ if r.status_code != 200 and r.status_code != 201: + if r.status_code == 401: + raise Exception( + "Authentication failed: invalid user credentials" +From 8da1b14246226792c160dd04e5c7c75dd4e8d44b Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 22 Nov 2021 10:44:09 +0100 +Subject: [PATCH] [collect] fix moved get_upload_url under Policy class + +SoSCollector does not further declare get_upload_url method +as that was moved under Policy class(es). + +Resolves: #2766 + +Signed-off-by: Pavel Moravec +--- + sos/collector/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index 50183e873..42a7731d6 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -1219,7 +1219,7 @@ this utility or remote systems that it c + msg = 'No sosreports were collected, nothing to archive...' + self.exit(msg, 1) + +- if self.opts.upload and self.get_upload_url(): ++ if self.opts.upload and self.policy.get_upload_url(): + try: + self.policy.upload_archive(arc_name) + self.ui_log.info("Uploaded archive successfully") +From abb2fc65bd14760021c61699ad3113cab3bd4c64 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 30 Nov 2021 11:37:02 +0100 +Subject: [PATCH 1/2] [redhat] Fix broken URI to upload to customer portal + +Revert back the unwanted change in URI of uploading tarball to the +Red Hat Customer portal. + +Related: #2772 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/redhat.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index e4e2b883..eb442407 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -250,7 +250,7 @@ support representative. + elif self.commons['cmdlineopts'].upload_protocol == 'sftp': + return RH_SFTP_HOST + else: +- rh_case_api = "/hydra/rest/cases/%s/attachments" ++ rh_case_api = "/support/v1/cases/%s/attachments" + return RH_API_HOST + rh_case_api % self.case_id + + def _get_upload_headers(self): +-- +2.31.1 + + +From ea4f9e88a412c80a4791396e1bb78ac1e24ece14 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 30 Nov 2021 13:00:26 +0100 +Subject: [PATCH 2/2] [policy] Add error message when FTP upload write failure + +When (S)FTP upload fails to write the destination file, +our "expect" code should detect it sooner than after timeout happens +and write appropriate error message. + +Resolves: #2772 + +Signed-off-by: Pavel Moravec +--- + sos/policies/distros/__init__.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index 6f257fdc..7bdc81b8 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -473,7 +473,8 @@ class LinuxPolicy(Policy): + put_expects = [ + u'100%', + pexpect.TIMEOUT, +- pexpect.EOF ++ pexpect.EOF, ++ u'No such file or directory' + ] + + put_success = ret.expect(put_expects, timeout=180) +@@ -485,6 +486,8 @@ class LinuxPolicy(Policy): + raise Exception("Timeout expired while uploading") + elif put_success == 2: + raise Exception("Unknown error during upload: %s" % ret.before) ++ elif put_success == 3: ++ raise Exception("Unable to write archive to destination") + else: + raise Exception("Unexpected response from server: %s" % ret.before) + +-- +2.31.1 + diff --git a/SOURCES/sos-bz2030741-rhui-logs.patch b/SOURCES/sos-bz2030741-rhui-logs.patch new file mode 100644 index 0000000..dcfbc89 --- /dev/null +++ b/SOURCES/sos-bz2030741-rhui-logs.patch @@ -0,0 +1,24 @@ +From aa2887f71c779448b22e4de67ae68dbaf218b7b9 Mon Sep 17 00:00:00 2001 +From: Taft Sanders +Date: Fri, 10 Dec 2021 09:34:59 -0500 +Subject: [PATCH] [rhui] New log folder + +Included new log folder per Bugzilla 2030741 + +Signed-off-by: Taft Sanders +--- + sos/report/plugins/rhui.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py +index 52065fb44..add024613 100644 +--- a/sos/report/plugins/rhui.py ++++ b/sos/report/plugins/rhui.py +@@ -27,6 +27,7 @@ def setup(self): + "/var/log/rhui-subscription-sync.log", + "/var/cache/rhui/*", + "/root/.rhui/*", ++ "/var/log/rhui/*", + ]) + # skip collecting certificate keys + self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True) diff --git a/SOURCES/sos-bz2036697-ocp-backports.patch b/SOURCES/sos-bz2036697-ocp-backports.patch new file mode 100644 index 0000000..3e53e93 --- /dev/null +++ b/SOURCES/sos-bz2036697-ocp-backports.patch @@ -0,0 +1,5145 @@ +From 676dfca09d9c783311a51a1c53fa0f7ecd95bd28 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Fri, 10 Sep 2021 13:38:19 -0400 +Subject: [PATCH] [collect] Abstract transport protocol from SoSNode + +Since its addition to sos, collect has assumed the use of a system +installation of SSH in order to connect to the nodes identified for +collection. However, there may be use cases and desires to use other +transport protocols. + +As such, provide an abstraction for these protocols in the form of the +new `RemoteTransport` class that `SoSNode` will now leverage. So far an +abstraction for the currently used SSH ControlPersist function is +provided, along with a psuedo abstraction for local execution so that +SoSNode does not directly need to make more "if local then foo" checks +than are absolutely necessary. + +Related: #2668 + +Signed-off-by: Jake Hunsaker +--- + setup.py | 4 +- + sos/collector/__init__.py | 54 +-- + sos/collector/clusters/__init__.py | 4 +- + sos/collector/clusters/jbon.py | 2 + + sos/collector/clusters/kubernetes.py | 4 +- + sos/collector/clusters/ocp.py | 6 +- + sos/collector/clusters/ovirt.py | 10 +- + sos/collector/clusters/pacemaker.py | 8 +- + sos/collector/clusters/satellite.py | 4 +- + sos/collector/sosnode.py | 388 +++++--------------- + sos/collector/transports/__init__.py | 317 ++++++++++++++++ + sos/collector/transports/control_persist.py | 199 ++++++++++ + sos/collector/transports/local.py | 49 +++ + 13 files changed, 705 insertions(+), 344 deletions(-) + create mode 100644 sos/collector/transports/__init__.py + create mode 100644 sos/collector/transports/control_persist.py + create mode 100644 sos/collector/transports/local.py + +diff --git a/setup.py b/setup.py +index 7653b59d..25e87a71 100644 +--- a/setup.py ++++ b/setup.py +@@ -101,8 +101,8 @@ setup( + 'sos.policies.distros', 'sos.policies.runtimes', + 'sos.policies.package_managers', 'sos.policies.init_systems', + 'sos.report', 'sos.report.plugins', 'sos.collector', +- 'sos.collector.clusters', 'sos.cleaner', 'sos.cleaner.mappings', +- 'sos.cleaner.parsers', 'sos.cleaner.archives' ++ 'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner', ++ 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives' + ], + cmdclass=cmdclass, + command_options=command_options, +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index b2a07f37..da912655 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -17,7 +17,6 @@ import re + import string + import socket + import shutil +-import subprocess + import sys + + from datetime import datetime +@@ -28,7 +27,6 @@ from pipes import quote + from textwrap import fill + from sos.cleaner import SoSCleaner + from sos.collector.sosnode import SosNode +-from sos.collector.exceptions import ControlPersistUnsupportedException + from sos.options import ClusterOption + from sos.component import SoSComponent + from sos import __version__ +@@ -154,7 +152,6 @@ class SoSCollector(SoSComponent): + try: + self.parse_node_strings() + self.parse_cluster_options() +- self._check_for_control_persist() + self.log_debug('Executing %s' % ' '.join(s for s in sys.argv)) + self.log_debug("Found cluster profiles: %s" + % self.clusters.keys()) +@@ -437,33 +434,6 @@ class SoSCollector(SoSComponent): + action='extend', + help='List of usernames to obfuscate') + +- def _check_for_control_persist(self): +- """Checks to see if the local system supported SSH ControlPersist. +- +- ControlPersist allows OpenSSH to keep a single open connection to a +- remote host rather than building a new session each time. This is the +- same feature that Ansible uses in place of paramiko, which we have a +- need to drop in sos-collector. +- +- This check relies on feedback from the ssh binary. The command being +- run should always generate stderr output, but depending on what that +- output reads we can determine if ControlPersist is supported or not. +- +- For our purposes, a host that does not support ControlPersist is not +- able to run sos-collector. +- +- Returns +- True if ControlPersist is supported, else raise Exception. +- """ +- ssh_cmd = ['ssh', '-o', 'ControlPersist'] +- cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE, +- stderr=subprocess.PIPE) +- out, err = cmd.communicate() +- err = err.decode('utf-8') +- if 'Bad configuration option' in err or 'Usage:' in err: +- raise ControlPersistUnsupportedException +- return True +- + def exit(self, msg, error=1): + """Used to safely terminate if sos-collector encounters an error""" + self.log_error(msg) +@@ -455,7 +455,7 @@ class SoSCollector(SoSComponent): + 'cmdlineopts': self.opts, + 'need_sudo': True if self.opts.ssh_user != 'root' else False, + 'tmpdir': self.tmpdir, +- 'hostlen': len(self.opts.master) or len(self.hostname), ++ 'hostlen': max(len(self.opts.primary), len(self.hostname)), + 'policy': self.policy + } + +@@ -1020,9 +1020,10 @@ class SoSCollector(SoSComponent): + self.node_list.append(self.hostname) + self.reduce_node_list() + try: +- self.commons['hostlen'] = len(max(self.node_list, key=len)) ++ _node_max = len(max(self.node_list, key=len)) ++ self.commons['hostlen'] = max(_node_max, self.commons['hostlen']) + except (TypeError, ValueError): +- self.commons['hostlen'] = len(self.opts.master) ++ pass + + def _connect_to_node(self, node): + """Try to connect to the node, and if we can add to the client list to +@@ -1068,7 +1039,7 @@ class SoSCollector(SoSComponent): + client.set_node_manifest(getattr(self.collect_md.nodes, + node[0])) + else: +- client.close_ssh_session() ++ client.disconnect() + except Exception: + pass + +@@ -1077,12 +1048,11 @@ class SoSCollector(SoSComponent): + provided on the command line + """ + disclaimer = ("""\ +-This utility is used to collect sosreports from multiple \ +-nodes simultaneously. It uses OpenSSH's ControlPersist feature \ +-to connect to nodes and run commands remotely. If your system \ +-installation of OpenSSH is older than 5.6, please upgrade. ++This utility is used to collect sos reports from multiple \ ++nodes simultaneously. Remote connections are made and/or maintained \ ++to those nodes via well-known transport protocols such as SSH. + +-An archive of sosreport tarballs collected from the nodes will be \ ++An archive of sos report tarballs collected from the nodes will be \ + generated in %s and may be provided to an appropriate support representative. + + The generated archive may contain data considered sensitive \ +@@ -1230,10 +1200,10 @@ this utility or remote systems that it connects to. + self.log_error("Error running sosreport: %s" % err) + + def close_all_connections(self): +- """Close all ssh sessions for nodes""" ++ """Close all sessions for nodes""" + for client in self.client_list: +- self.log_debug('Closing SSH connection to %s' % client.address) +- client.close_ssh_session() ++ self.log_debug('Closing connection to %s' % client.address) ++ client.disconnect() + + def create_cluster_archive(self): + """Calls for creation of tar archive then cleans up the temporary +diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py +index 2b5d7018..64ac2a44 100644 +--- a/sos/collector/clusters/__init__.py ++++ b/sos/collector/clusters/__init__.py +@@ -188,8 +188,8 @@ class Cluster(): + :rtype: ``dict`` + """ + res = self.master.run_command(cmd, get_pty=True, need_root=need_root) +- if res['stdout']: +- res['stdout'] = res['stdout'].replace('Password:', '') ++ if res['output']: ++ res['output'] = res['output'].replace('Password:', '') + return res + + def setup(self): +diff --git a/sos/collector/clusters/jbon.py b/sos/collector/clusters/jbon.py +index 488fbd16..8f083ac6 100644 +--- a/sos/collector/clusters/jbon.py ++++ b/sos/collector/clusters/jbon.py +@@ -28,3 +28,5 @@ class jbon(Cluster): + # This should never be called, but as insurance explicitly never + # allow this to be enabled via the determine_cluster() path + return False ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/clusters/kubernetes.py b/sos/collector/clusters/kubernetes.py +index cdbf8861..99f788dc 100644 +--- a/sos/collector/clusters/kubernetes.py ++++ b/sos/collector/clusters/kubernetes.py +@@ -34,7 +34,7 @@ class kubernetes(Cluster): + if res['status'] == 0: + nodes = [] + roles = [x for x in self.get_option('role').split(',') if x] +- for nodeln in res['stdout'].splitlines()[1:]: ++ for nodeln in res['output'].splitlines()[1:]: + node = nodeln.split() + if not roles: + nodes.append(node[0]) +@@ -44,3 +44,5 @@ class kubernetes(Cluster): + return nodes + else: + raise Exception('Node enumeration did not return usable output') ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index 5479417d..ad97587f 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -93,7 +93,7 @@ class ocp(Cluster): + res = self.exec_master_cmd(self.fmt_oc_cmd(cmd)) + if res['status'] == 0: + roles = [r for r in self.get_option('role').split(':')] +- self.node_dict = self._build_dict(res['stdout'].splitlines()) ++ self.node_dict = self._build_dict(res['output'].splitlines()) + for node in self.node_dict: + if roles: + for role in roles: +@@ -103,7 +103,7 @@ class ocp(Cluster): + nodes.append(node) + else: + msg = "'oc' command failed" +- if 'Missing or incomplete' in res['stdout']: ++ if 'Missing or incomplete' in res['output']: + msg = ("'oc' failed due to missing kubeconfig on master node." + " Specify one via '-c ocp.kubeconfig='") + raise Exception(msg) +@@ -168,3 +168,5 @@ class ocp(Cluster): + def set_node_options(self, node): + # don't attempt OC API collections on non-primary nodes + node.plugin_options.append('openshift.no-oc=on') ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/clusters/ovirt.py b/sos/collector/clusters/ovirt.py +index 079a122e..bd2d0c74 100644 +--- a/sos/collector/clusters/ovirt.py ++++ b/sos/collector/clusters/ovirt.py +@@ -98,7 +98,7 @@ class ovirt(Cluster): + return [] + res = self._run_db_query(self.dbquery) + if res['status'] == 0: +- nodes = res['stdout'].splitlines()[2:-1] ++ nodes = res['output'].splitlines()[2:-1] + return [n.split('(')[0].strip() for n in nodes] + else: + raise Exception('database query failed, return code: %s' +@@ -114,7 +114,7 @@ class ovirt(Cluster): + engconf = '/etc/ovirt-engine/engine.conf.d/10-setup-database.conf' + res = self.exec_primary_cmd('cat %s' % engconf, need_root=True) + if res['status'] == 0: +- config = res['stdout'].splitlines() ++ config = res['output'].splitlines() + for line in config: + try: + k = str(line.split('=')[0]) +@@ -141,7 +141,7 @@ class ovirt(Cluster): + '--batch -o postgresql {}' + ).format(self.conf['ENGINE_DB_PASSWORD'], sos_opt) + db_sos = self.exec_primary_cmd(cmd, need_root=True) +- for line in db_sos['stdout'].splitlines(): ++ for line in db_sos['output'].splitlines(): + if fnmatch.fnmatch(line, '*sosreport-*tar*'): + _pg_dump = line.strip() + self.master.manifest.add_field('postgresql_dump', +@@ -180,5 +180,7 @@ class rhhi_virt(rhv): + ret = self._run_db_query('SELECT count(server_id) FROM gluster_server') + if ret['status'] == 0: + # if there are any entries in this table, RHHI-V is in use +- return ret['stdout'].splitlines()[2].strip() != '0' ++ return ret['output'].splitlines()[2].strip() != '0' + return False ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py +index 034f3f3e..55024314 100644 +--- a/sos/collector/clusters/pacemaker.py ++++ b/sos/collector/clusters/pacemaker.py +@@ -27,7 +27,7 @@ class pacemaker(Cluster): + self.log_error('Cluster status could not be determined. Is the ' + 'cluster running on this node?') + return [] +- if 'node names do not match' in self.res['stdout']: ++ if 'node names do not match' in self.res['output']: + self.log_warn('Warning: node name mismatch reported. Attempts to ' + 'connect to some nodes may fail.\n') + return self.parse_pcs_output() +@@ -41,17 +41,19 @@ class pacemaker(Cluster): + return nodes + + def get_online_nodes(self): +- for line in self.res['stdout'].splitlines(): ++ for line in self.res['output'].splitlines(): + if line.startswith('Online:'): + nodes = line.split('[')[1].split(']')[0] + return [n for n in nodes.split(' ') if n] + + def get_offline_nodes(self): + offline = [] +- for line in self.res['stdout'].splitlines(): ++ for line in self.res['output'].splitlines(): + if line.startswith('Node') and line.endswith('(offline)'): + offline.append(line.split()[1].replace(':', '')) + if line.startswith('OFFLINE:'): + nodes = line.split('[')[1].split(']')[0] + offline.extend([n for n in nodes.split(' ') if n]) + return offline ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/clusters/satellite.py b/sos/collector/clusters/satellite.py +index e123c8a3..7c21e553 100644 +--- a/sos/collector/clusters/satellite.py ++++ b/sos/collector/clusters/satellite.py +@@ -28,7 +28,7 @@ class satellite(Cluster): + res = self.exec_primary_cmd(cmd, need_root=True) + if res['status'] == 0: + nodes = [ +- n.strip() for n in res['stdout'].splitlines() ++ n.strip() for n in res['output'].splitlines() + if 'could not change directory' not in n + ] + return nodes +@@ -38,3 +38,5 @@ class satellite(Cluster): + if node.address == self.master.address: + return 'satellite' + return 'capsule' ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 4b1ee109..f79bd5ff 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -12,22 +12,16 @@ import fnmatch + import inspect + import logging + import os +-import pexpect + import re +-import shutil + + from distutils.version import LooseVersion + from pipes import quote + from sos.policies import load + from sos.policies.init_systems import InitSystem +-from sos.collector.exceptions import (InvalidPasswordException, +- TimeoutPasswordAuthException, +- PasswordRequestException, +- AuthPermissionDeniedException, ++from sos.collector.transports.control_persist import SSHControlPersist ++from sos.collector.transports.local import LocalTransport ++from sos.collector.exceptions import (CommandTimeoutException, + ConnectionException, +- CommandTimeoutException, +- ConnectionTimeoutException, +- ControlSocketMissingException, + UnsupportedHostException) + + +@@ -61,34 +61,25 @@ class SosNode(): + 'sos_cmd': commons['sos_cmd'] + } + self.sos_bin = 'sosreport' +- filt = ['localhost', '127.0.0.1'] + self.soslog = logging.getLogger('sos') + self.ui_log = logging.getLogger('sos_ui') +- self.control_path = ("%s/.sos-collector-%s" +- % (self.tmpdir, self.address)) +- self.ssh_cmd = self._create_ssh_command() +- if self.address not in filt: +- try: +- self.connected = self._create_ssh_session() +- except Exception as err: +- self.log_error('Unable to open SSH session: %s' % err) +- raise +- else: +- self.connected = True +- self.local = True +- self.need_sudo = os.getuid() != 0 ++ self._transport = self._load_remote_transport(commons) ++ try: ++ self._transport.connect(self._password) ++ except Exception as err: ++ self.log_error('Unable to open remote session: %s' % err) ++ raise + # load the host policy now, even if we don't want to load further + # host information. This is necessary if we're running locally on the + # cluster master but do not want a local report as we still need to do + # package checks in that instance + self.host = self.determine_host_policy() +- self.get_hostname() ++ self.hostname = self._transport.hostname + if self.local and self.opts.no_local: + load_facts = False + if self.connected and load_facts: + if not self.host: +- self.connected = False +- self.close_ssh_session() ++ self._transport.disconnect() + return None + if self.local: + if self.check_in_container(): +@@ -103,11 +88,26 @@ class SosNode(): + self.create_sos_container() + self._load_sos_info() + +- def _create_ssh_command(self): +- """Build the complete ssh command for this node""" +- cmd = "ssh -oControlPath=%s " % self.control_path +- cmd += "%s@%s " % (self.opts.ssh_user, self.address) +- return cmd ++ @property ++ def connected(self): ++ if self._transport: ++ return self._transport.connected ++ # if no transport, we're running locally ++ return True ++ ++ def disconnect(self): ++ """Wrapper to close the remote session via our transport agent ++ """ ++ self._transport.disconnect() ++ ++ def _load_remote_transport(self, commons): ++ """Determine the type of remote transport to load for this node, then ++ return an instantiated instance of that transport ++ """ ++ if self.address in ['localhost', '127.0.0.1']: ++ self.local = True ++ return LocalTransport(self.address, commons) ++ return SSHControlPersist(self.address, commons) + + def _fmt_msg(self, msg): + return '{:<{}} : {}'.format(self._hostname, self.hostlen + 1, msg) +@@ -135,6 +135,7 @@ class SosNode(): + self.manifest.add_field('policy', self.host.distro) + self.manifest.add_field('sos_version', self.sos_info['version']) + self.manifest.add_field('final_sos_command', '') ++ self.manifest.add_field('transport', self._transport.name) + + def check_in_container(self): + """ +@@ -160,13 +161,13 @@ class SosNode(): + res = self.run_command(cmd, need_root=True) + if res['status'] in [0, 125]: + if res['status'] == 125: +- if 'unable to retrieve auth token' in res['stdout']: ++ if 'unable to retrieve auth token' in res['output']: + self.log_error( + "Could not pull image. Provide either a username " + "and password or authfile" + ) + raise Exception +- elif 'unknown: Not found' in res['stdout']: ++ elif 'unknown: Not found' in res['output']: + self.log_error('Specified image not found on registry') + raise Exception + # 'name exists' with code 125 means the container was +@@ -181,11 +182,11 @@ class SosNode(): + return True + else: + self.log_error("Could not start container after create: %s" +- % ret['stdout']) ++ % ret['output']) + raise Exception + else: + self.log_error("Could not create container on host: %s" +- % res['stdout']) ++ % res['output']) + raise Exception + + def get_container_auth(self): +@@ -204,18 +205,11 @@ class SosNode(): + + def file_exists(self, fname, need_root=False): + """Checks for the presence of fname on the remote node""" +- if not self.local: +- try: +- res = self.run_command("stat %s" % fname, need_root=need_root) +- return res['status'] == 0 +- except Exception: +- return False +- else: +- try: +- os.stat(fname) +- return True +- except Exception: +- return False ++ try: ++ res = self.run_command("stat %s" % fname, need_root=need_root) ++ return res['status'] == 0 ++ except Exception: ++ return False + + @property + def _hostname(self): +@@ -223,18 +217,6 @@ class SosNode(): + return self.hostname + return self.address + +- @property +- def control_socket_exists(self): +- """Check if the SSH control socket exists +- +- The control socket is automatically removed by the SSH daemon in the +- event that the last connection to the node was greater than the timeout +- set by the ControlPersist option. This can happen for us if we are +- collecting from a large number of nodes, and the timeout expires before +- we start collection. +- """ +- return os.path.exists(self.control_path) +- + def _sanitize_log_msg(self, msg): + """Attempts to obfuscate sensitive information in log messages such as + passwords""" +@@ -264,12 +246,6 @@ class SosNode(): + msg = '[%s:%s] %s' % (self._hostname, caller, msg) + self.soslog.debug(msg) + +- def get_hostname(self): +- """Get the node's hostname""" +- sout = self.run_command('hostname') +- self.hostname = sout['stdout'].strip() +- self.log_info('Hostname set to %s' % self.hostname) +- + def _format_cmd(self, cmd): + """If we need to provide a sudo or root password to a command, then + here we prefix the command with the correct bits +@@ -280,19 +256,6 @@ class SosNode(): + return "sudo -S %s" % cmd + return cmd + +- def _fmt_output(self, output=None, rc=0): +- """Formats the returned output from a command into a dict""" +- if rc == 0: +- stdout = output +- stderr = '' +- else: +- stdout = '' +- stderr = output +- res = {'status': rc, +- 'stdout': stdout, +- 'stderr': stderr} +- return res +- + def _load_sos_info(self): + """Queries the node for information about the installed version of sos + """ +@@ -306,7 +269,7 @@ class SosNode(): + pkgs = self.run_command(self.host.container_version_command, + use_container=True, need_root=True) + if pkgs['status'] == 0: +- ver = pkgs['stdout'].strip().split('-')[1] ++ ver = pkgs['output'].strip().split('-')[1] + if ver: + self.sos_info['version'] = ver + else: +@@ -321,18 +284,21 @@ class SosNode(): + self.log_error('sos is not installed on this node') + self.connected = False + return False +- cmd = 'sosreport -l' ++ # sos-4.0 changes the binary ++ if self.check_sos_version('4.0'): ++ self.sos_bin = 'sos report' ++ cmd = "%s -l" % self.sos_bin + sosinfo = self.run_command(cmd, use_container=True, need_root=True) + if sosinfo['status'] == 0: +- self._load_sos_plugins(sosinfo['stdout']) ++ self._load_sos_plugins(sosinfo['output']) + if self.check_sos_version('3.6'): + self._load_sos_presets() + + def _load_sos_presets(self): +- cmd = 'sosreport --list-presets' ++ cmd = '%s --list-presets' % self.sos_bin + res = self.run_command(cmd, use_container=True, need_root=True) + if res['status'] == 0: +- for line in res['stdout'].splitlines(): ++ for line in res['output'].splitlines(): + if line.strip().startswith('name:'): + pname = line.split('name:')[1].strip() + self.sos_info['presets'].append(pname) +@@ -372,21 +338,7 @@ class SosNode(): + """Reads the specified file and returns the contents""" + try: + self.log_info("Reading file %s" % to_read) +- if not self.local: +- res = self.run_command("cat %s" % to_read, timeout=5) +- if res['status'] == 0: +- return res['stdout'] +- else: +- if 'No such file' in res['stdout']: +- self.log_debug("File %s does not exist on node" +- % to_read) +- else: +- self.log_error("Error reading %s: %s" % +- (to_read, res['stdout'].split(':')[1:])) +- return '' +- else: +- with open(to_read, 'r') as rfile: +- return rfile.read() ++ return self._transport.read_file(to_read) + except Exception as err: + self.log_error("Exception while reading %s: %s" % (to_read, err)) + return '' +@@ -400,7 +352,8 @@ class SosNode(): + % self.commons['policy'].distro) + return self.commons['policy'] + host = load(cache={}, sysroot=self.opts.sysroot, init=InitSystem(), +- probe_runtime=True, remote_exec=self.ssh_cmd, ++ probe_runtime=True, ++ remote_exec=self._transport.remote_exec, + remote_check=self.read_file('/etc/os-release')) + if host: + self.log_info("loaded policy %s for host" % host.distro) +@@ -422,7 +375,7 @@ class SosNode(): + return self.host.package_manager.pkg_by_name(pkg) is not None + + def run_command(self, cmd, timeout=180, get_pty=False, need_root=False, +- force_local=False, use_container=False, env=None): ++ use_container=False, env=None): + """Runs a given cmd, either via the SSH session or locally + + Arguments: +@@ -433,58 +386,37 @@ class SosNode(): + need_root - if a command requires root privileges, setting this to + True tells sos-collector to format the command with + sudo or su - as appropriate and to input the password +- force_local - force a command to run locally. Mainly used for scp. + use_container - Run this command in a container *IF* the host is + containerized + """ +- if not self.control_socket_exists and not self.local: +- self.log_debug('Control socket does not exist, attempting to ' +- 're-create') ++ if not self.connected and not self.local: ++ self.log_debug('Node is disconnected, attempting to reconnect') + try: +- _sock = self._create_ssh_session() +- if not _sock: +- self.log_debug('Failed to re-create control socket') +- raise ControlSocketMissingException ++ reconnected = self._transport.reconnect(self._password) ++ if not reconnected: ++ self.log_debug('Failed to reconnect to node') ++ raise ConnectionException + except Exception as err: +- self.log_error('Cannot run command: control socket does not ' +- 'exist') +- self.log_debug("Error while trying to create new SSH control " +- "socket: %s" % err) ++ self.log_debug("Error while trying to reconnect: %s" % err) + raise + if use_container and self.host.containerized: + cmd = self.host.format_container_command(cmd) + if need_root: +- get_pty = True + cmd = self._format_cmd(cmd) +- self.log_debug('Running command %s' % cmd) ++ + if 'atomic' in cmd: + get_pty = True +- if not self.local and not force_local: +- cmd = "%s %s" % (self.ssh_cmd, quote(cmd)) +- else: +- if get_pty: +- cmd = "/bin/bash -c %s" % quote(cmd) ++ ++ if get_pty: ++ cmd = "/bin/bash -c %s" % quote(cmd) ++ + if env: + _cmd_env = self.env_vars + env = _cmd_env.update(env) +- res = pexpect.spawn(cmd, encoding='utf-8', env=env) +- if need_root: +- if self.need_sudo: +- res.sendline(self.opts.sudo_pw) +- if self.opts.become_root: +- res.sendline(self.opts.root_password) +- output = res.expect([pexpect.EOF, pexpect.TIMEOUT], +- timeout=timeout) +- if output == 0: +- out = res.before +- res.close() +- rc = res.exitstatus +- return {'status': rc, 'stdout': out} +- elif output == 1: +- raise CommandTimeoutException(cmd) ++ return self._transport.run_command(cmd, timeout, need_root, env) + + def sosreport(self): +- """Run a sosreport on the node, then collect it""" ++ """Run an sos report on the node, then collect it""" + try: + path = self.execute_sos_command() + if path: +@@ -497,109 +429,6 @@ class SosNode(): + pass + self.cleanup() + +- def _create_ssh_session(self): +- """ +- Using ControlPersist, create the initial connection to the node. +- +- This will generate an OpenSSH ControlPersist socket within the tmp +- directory created or specified for sos-collector to use. +- +- At most, we will wait 30 seconds for a connection. This involves a 15 +- second wait for the initial connection attempt, and a subsequent 15 +- second wait for a response when we supply a password. +- +- Since we connect to nodes in parallel (using the --threads value), this +- means that the time between 'Connecting to nodes...' and 'Beginning +- collection of sosreports' that users see can be up to an amount of time +- equal to 30*(num_nodes/threads) seconds. +- +- Returns +- True if session is successfully opened, else raise Exception +- """ +- # Don't use self.ssh_cmd here as we need to add a few additional +- # parameters to establish the initial connection +- self.log_info('Opening SSH session to create control socket') +- connected = False +- ssh_key = '' +- ssh_port = '' +- if self.opts.ssh_port != 22: +- ssh_port = "-p%s " % self.opts.ssh_port +- if self.opts.ssh_key: +- ssh_key = "-i%s" % self.opts.ssh_key +- cmd = ("ssh %s %s -oControlPersist=600 -oControlMaster=auto " +- "-oStrictHostKeyChecking=no -oControlPath=%s %s@%s " +- "\"echo Connected\"" % (ssh_key, +- ssh_port, +- self.control_path, +- self.opts.ssh_user, +- self.address)) +- res = pexpect.spawn(cmd, encoding='utf-8') +- +- connect_expects = [ +- u'Connected', +- u'password:', +- u'.*Permission denied.*', +- u'.* port .*: No route to host', +- u'.*Could not resolve hostname.*', +- pexpect.TIMEOUT +- ] +- +- index = res.expect(connect_expects, timeout=15) +- +- if index == 0: +- connected = True +- elif index == 1: +- if self._password: +- pass_expects = [ +- u'Connected', +- u'Permission denied, please try again.', +- pexpect.TIMEOUT +- ] +- res.sendline(self._password) +- pass_index = res.expect(pass_expects, timeout=15) +- if pass_index == 0: +- connected = True +- elif pass_index == 1: +- # Note that we do not get an exitstatus here, so matching +- # this line means an invalid password will be reported for +- # both invalid passwords and invalid user names +- raise InvalidPasswordException +- elif pass_index == 2: +- raise TimeoutPasswordAuthException +- else: +- raise PasswordRequestException +- elif index == 2: +- raise AuthPermissionDeniedException +- elif index == 3: +- raise ConnectionException(self.address, self.opts.ssh_port) +- elif index == 4: +- raise ConnectionException(self.address) +- elif index == 5: +- raise ConnectionTimeoutException +- else: +- raise Exception("Unknown error, client returned %s" % res.before) +- if connected: +- self.log_debug("Successfully created control socket at %s" +- % self.control_path) +- return True +- return False +- +- def close_ssh_session(self): +- """Remove the control socket to effectively terminate the session""" +- if self.local: +- return True +- try: +- res = self.run_command("rm -f %s" % self.control_path, +- force_local=True) +- if res['status'] == 0: +- return True +- self.log_error("Could not remove ControlPath %s: %s" +- % (self.control_path, res['stdout'])) +- return False +- except Exception as e: +- self.log_error('Error closing SSH session: %s' % e) +- return False +- + def _preset_exists(self, preset): + """Verifies if the given preset exists on the node""" + return preset in self.sos_info['presets'] +@@ -646,8 +475,8 @@ class SosNode(): + self.cluster = cluster + + def update_cmd_from_cluster(self): +- """This is used to modify the sosreport command run on the nodes. +- By default, sosreport is run without any options, using this will ++ """This is used to modify the sos report command run on the nodes. ++ By default, sos report is run without any options, using this will + allow the profile to specify what plugins to run or not and what + options to use. + +@@ -727,10 +556,6 @@ class SosNode(): + if self.opts.since: + sos_opts.append('--since=%s' % quote(self.opts.since)) + +- # sos-4.0 changes the binary +- if self.check_sos_version('4.0'): +- self.sos_bin = 'sos report' +- + if self.check_sos_version('4.1'): + if self.opts.skip_commands: + sos_opts.append( +@@ -811,7 +636,7 @@ class SosNode(): + self.manifest.add_field('final_sos_command', self.sos_cmd) + + def determine_sos_label(self): +- """Determine what, if any, label should be added to the sosreport""" ++ """Determine what, if any, label should be added to the sos report""" + label = '' + label += self.cluster.get_node_label(self) + +@@ -822,7 +647,7 @@ class SosNode(): + if not label: + return None + +- self.log_debug('Label for sosreport set to %s' % label) ++ self.log_debug('Label for sos report set to %s' % label) + if self.check_sos_version('3.6'): + lcmd = '--label' + else: +@@ -844,20 +669,20 @@ class SosNode(): + + def determine_sos_error(self, rc, stdout): + if rc == -1: +- return 'sosreport process received SIGKILL on node' ++ return 'sos report process received SIGKILL on node' + if rc == 1: + if 'sudo' in stdout: + return 'sudo attempt failed' + if rc == 127: +- return 'sosreport terminated unexpectedly. Check disk space' ++ return 'sos report terminated unexpectedly. Check disk space' + if len(stdout) > 0: + return stdout.split('\n')[0:1] + else: + return 'sos exited with code %s' % rc + + def execute_sos_command(self): +- """Run sosreport and capture the resulting file path""" +- self.ui_msg('Generating sosreport...') ++ """Run sos report and capture the resulting file path""" ++ self.ui_msg('Generating sos report...') + try: + path = False + checksum = False +@@ -867,7 +692,7 @@ class SosNode(): + use_container=True, + env=self.sos_env_vars) + if res['status'] == 0: +- for line in res['stdout'].splitlines(): ++ for line in res['output'].splitlines(): + if fnmatch.fnmatch(line, '*sosreport-*tar*'): + path = line.strip() + if line.startswith((" sha256\t", " md5\t")): +@@ -884,44 +709,31 @@ class SosNode(): + else: + self.manifest.add_field('checksum_type', 'unknown') + else: +- err = self.determine_sos_error(res['status'], res['stdout']) +- self.log_debug("Error running sosreport. rc = %s msg = %s" +- % (res['status'], res['stdout'] or +- res['stderr'])) ++ err = self.determine_sos_error(res['status'], res['output']) ++ self.log_debug("Error running sos report. rc = %s msg = %s" ++ % (res['status'], res['output'])) + raise Exception(err) + return path + except CommandTimeoutException: + self.log_error('Timeout exceeded') + raise + except Exception as e: +- self.log_error('Error running sosreport: %s' % e) ++ self.log_error('Error running sos report: %s' % e) + raise + + def retrieve_file(self, path): + """Copies the specified file from the host to our temp dir""" + destdir = self.tmpdir + '/' +- dest = destdir + path.split('/')[-1] ++ dest = os.path.join(destdir, path.split('/')[-1]) + try: +- if not self.local: +- if self.file_exists(path): +- self.log_info("Copying remote %s to local %s" % +- (path, destdir)) +- cmd = "/usr/bin/scp -oControlPath=%s %s@%s:%s %s" % ( +- self.control_path, +- self.opts.ssh_user, +- self.address, +- path, +- destdir +- ) +- res = self.run_command(cmd, force_local=True) +- return res['status'] == 0 +- else: +- self.log_debug("Attempting to copy remote file %s, but it " +- "does not exist on filesystem" % path) +- return False ++ if self.file_exists(path): ++ self.log_info("Copying remote %s to local %s" % ++ (path, destdir)) ++ self._transport.retrieve_file(path, dest) + else: +- self.log_debug("Moving %s to %s" % (path, destdir)) +- shutil.copy(path, dest) ++ self.log_debug("Attempting to copy remote file %s, but it " ++ "does not exist on filesystem" % path) ++ return False + return True + except Exception as err: + self.log_debug("Failed to retrieve %s: %s" % (path, err)) +@@ -933,7 +745,7 @@ class SosNode(): + """ + path = ''.join(path.split()) + try: +- if len(path) <= 2: # ensure we have a non '/' path ++ if len(path.split('/')) <= 2: # ensure we have a non '/' path + self.log_debug("Refusing to remove path %s: appears to be " + "incorrect and possibly dangerous" % path) + return False +@@ -959,14 +771,14 @@ class SosNode(): + except Exception: + self.log_error('Failed to make archive readable') + return False +- self.soslog.info('Retrieving sosreport from %s' % self.address) +- self.ui_msg('Retrieving sosreport...') ++ self.soslog.info('Retrieving sos report from %s' % self.address) ++ self.ui_msg('Retrieving sos report...') + ret = self.retrieve_file(self.sos_path) + if ret: +- self.ui_msg('Successfully collected sosreport') ++ self.ui_msg('Successfully collected sos report') + self.file_list.append(self.sos_path.split('/')[-1]) + else: +- self.log_error('Failed to retrieve sosreport') ++ self.log_error('Failed to retrieve sos report') + raise SystemExit + return True + else: +@@ -976,8 +788,8 @@ class SosNode(): + else: + e = [x.strip() for x in self.stdout.readlines() if x.strip][-1] + self.soslog.error( +- 'Failed to run sosreport on %s: %s' % (self.address, e)) +- self.log_error('Failed to run sosreport. %s' % e) ++ 'Failed to run sos report on %s: %s' % (self.address, e)) ++ self.log_error('Failed to run sos report. %s' % e) + return False + + def remove_sos_archive(self): +@@ -986,20 +798,20 @@ class SosNode(): + if self.sos_path is None: + return + if 'sosreport' not in self.sos_path: +- self.log_debug("Node sosreport path %s looks incorrect. Not " ++ self.log_debug("Node sos report path %s looks incorrect. Not " + "attempting to remove path" % self.sos_path) + return + removed = self.remove_file(self.sos_path) + if not removed: +- self.log_error('Failed to remove sosreport') ++ self.log_error('Failed to remove sos report') + + def cleanup(self): + """Remove the sos archive from the node once we have it locally""" + self.remove_sos_archive() + if self.sos_path: + for ext in ['.sha256', '.md5']: +- if os.path.isfile(self.sos_path + ext): +- self.remove_file(self.sos_path + ext) ++ if self.remove_file(self.sos_path + ext): ++ break + cleanup = self.host.set_cleanup_cmd() + if cleanup: + self.run_command(cleanup, need_root=True) +@@ -1040,3 +852,5 @@ class SosNode(): + msg = "Exception while making %s readable. Return code was %s" + self.log_error(msg % (filepath, res['status'])) + raise Exception ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py +new file mode 100644 +index 00000000..5be7dc6d +--- /dev/null ++++ b/sos/collector/transports/__init__.py +@@ -0,0 +1,317 @@ ++# Copyright Red Hat 2021, Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++import inspect ++import logging ++import pexpect ++import re ++ ++from pipes import quote ++from sos.collector.exceptions import (ConnectionException, ++ CommandTimeoutException) ++ ++ ++class RemoteTransport(): ++ """The base class used for defining supported remote transports to connect ++ to remote nodes in conjunction with `sos collect`. ++ ++ This abstraction is used to manage the backend connections to nodes so that ++ SoSNode() objects can be leveraged generically to connect to nodes, inspect ++ those nodes, and run commands on them. ++ """ ++ ++ name = 'undefined' ++ ++ def __init__(self, address, commons): ++ self.address = address ++ self.opts = commons['cmdlineopts'] ++ self.tmpdir = commons['tmpdir'] ++ self.need_sudo = commons['need_sudo'] ++ self._hostname = None ++ self.soslog = logging.getLogger('sos') ++ self.ui_log = logging.getLogger('sos_ui') ++ ++ def _sanitize_log_msg(self, msg): ++ """Attempts to obfuscate sensitive information in log messages such as ++ passwords""" ++ reg = r'(?P(pass|key|secret|PASS|KEY|SECRET).*?=)(?P.*?\s)' ++ return re.sub(reg, r'\g****** ', msg) ++ ++ def log_info(self, msg): ++ """Used to print and log info messages""" ++ caller = inspect.stack()[1][3] ++ lmsg = '[%s:%s] %s' % (self.hostname, caller, msg) ++ self.soslog.info(lmsg) ++ ++ def log_error(self, msg): ++ """Used to print and log error messages""" ++ caller = inspect.stack()[1][3] ++ lmsg = '[%s:%s] %s' % (self.hostname, caller, msg) ++ self.soslog.error(lmsg) ++ ++ def log_debug(self, msg): ++ """Used to print and log debug messages""" ++ msg = self._sanitize_log_msg(msg) ++ caller = inspect.stack()[1][3] ++ msg = '[%s:%s] %s' % (self.hostname, caller, msg) ++ self.soslog.debug(msg) ++ ++ @property ++ def hostname(self): ++ if self._hostname and 'localhost' not in self._hostname: ++ return self._hostname ++ return self.address ++ ++ @property ++ def connected(self): ++ """Is the transport __currently__ connected to the node, or otherwise ++ capable of seamlessly running a command or similar on the node? ++ """ ++ return False ++ ++ @property ++ def remote_exec(self): ++ """This is the command string needed to leverage the remote transport ++ when executing commands. For example, for an SSH transport this would ++ be the `ssh ` string prepended to any command so that the ++ command is executed by the ssh binary. ++ ++ This is also referenced by the `remote_exec` parameter for policies ++ when loading a policy for a remote node ++ """ ++ return None ++ ++ def connect(self, password): ++ """Perform the connection steps in order to ensure that we are able to ++ connect to the node for all future operations. Note that this should ++ not provide an interactive shell at this time. ++ """ ++ if self._connect(password): ++ if not self._hostname: ++ self._get_hostname() ++ return True ++ return False ++ ++ def _connect(self, password): ++ """Actually perform the connection requirements. Should be overridden ++ by specific transports that subclass RemoteTransport ++ """ ++ raise NotImplementedError("Transport %s does not define connect" ++ % self.name) ++ ++ def reconnect(self, password): ++ """Attempts to reconnect to the node using the standard connect() ++ but does not do so indefinitely. This imposes a strict number of retry ++ attempts before failing out ++ """ ++ attempts = 1 ++ last_err = 'unknown' ++ while attempts < 5: ++ self.log_debug("Attempting reconnect (#%s) to node" % attempts) ++ try: ++ if self.connect(password): ++ return True ++ except Exception as err: ++ self.log_debug("Attempt #%s exception: %s" % (attempts, err)) ++ last_err = err ++ attempts += 1 ++ self.log_error("Unable to reconnect to node after 5 attempts, " ++ "aborting.") ++ raise ConnectionException("last exception from transport: %s" ++ % last_err) ++ ++ def disconnect(self): ++ """Perform whatever steps are necessary, if any, to terminate any ++ connection to the node ++ """ ++ try: ++ if self._disconnect(): ++ self.log_debug("Successfully disconnected from node") ++ else: ++ self.log_error("Unable to successfully disconnect, see log for" ++ " more details") ++ except Exception as err: ++ self.log_error("Failed to disconnect: %s" % err) ++ ++ def _disconnect(self): ++ raise NotImplementedError("Transport %s does not define disconnect" ++ % self.name) ++ ++ def run_command(self, cmd, timeout=180, need_root=False, env=None): ++ """Run a command on the node, returning its output and exit code. ++ This should return the exit code of the command being executed, not the ++ exit code of whatever mechanism the transport uses to execute that ++ command ++ ++ :param cmd: The command to run ++ :type cmd: ``str`` ++ ++ :param timeout: The maximum time in seconds to allow the cmd to run ++ :type timeout: ``int`` ++ ++ :param get_pty: Does ``cmd`` require a pty? ++ :type get_pty: ``bool`` ++ ++ :param need_root: Does ``cmd`` require root privileges? ++ :type neeed_root: ``bool`` ++ ++ :param env: Specify env vars to be passed to the ``cmd`` ++ :type env: ``dict`` ++ ++ :returns: Output of ``cmd`` and the exit code ++ :rtype: ``dict`` with keys ``output`` and ``status`` ++ """ ++ self.log_debug('Running command %s' % cmd) ++ # currently we only use/support the use of pexpect for handling the ++ # execution of these commands, as opposed to directly invoking ++ # subprocess.Popen() in conjunction with tools like sshpass. ++ # If that changes in the future, we'll add decision making logic here ++ # to route to the appropriate handler, but for now we just go straight ++ # to using pexpect ++ return self._run_command_with_pexpect(cmd, timeout, need_root, env) ++ ++ def _format_cmd_for_exec(self, cmd): ++ """Format the command in the way needed for the remote transport to ++ successfully execute it as one would when manually executing it ++ ++ :param cmd: The command being executed, as formatted by SoSNode ++ :type cmd: ``str`` ++ ++ ++ :returns: The command further formatted as needed by this ++ transport ++ :rtype: ``str`` ++ """ ++ cmd = "%s %s" % (self.remote_exec, quote(cmd)) ++ cmd = cmd.lstrip() ++ return cmd ++ ++ def _run_command_with_pexpect(self, cmd, timeout, need_root, env): ++ """Execute the command using pexpect, which allows us to more easily ++ handle prompts and timeouts compared to directly leveraging the ++ subprocess.Popen() method. ++ ++ :param cmd: The command to execute. This will be automatically ++ formatted to use the transport. ++ :type cmd: ``str`` ++ ++ :param timeout: The maximum time in seconds to run ``cmd`` ++ :type timeout: ``int`` ++ ++ :param need_root: Does ``cmd`` need to run as root or with sudo? ++ :type need_root: ``bool`` ++ ++ :param env: Any env vars that ``cmd`` should be run with ++ :type env: ``dict`` ++ """ ++ cmd = self._format_cmd_for_exec(cmd) ++ result = pexpect.spawn(cmd, encoding='utf-8', env=env) ++ ++ _expects = [pexpect.EOF, pexpect.TIMEOUT] ++ if need_root and self.opts.ssh_user != 'root': ++ _expects.extend([ ++ '\\[sudo\\] password for .*:', ++ 'Password:' ++ ]) ++ ++ index = result.expect(_expects, timeout=timeout) ++ ++ if index in [2, 3]: ++ self._send_pexpect_password(index, result) ++ index = result.expect(_expects, timeout=timeout) ++ ++ if index == 0: ++ out = result.before ++ result.close() ++ return {'status': result.exitstatus, 'output': out} ++ elif index == 1: ++ raise CommandTimeoutException(cmd) ++ ++ def _send_pexpect_password(self, index, result): ++ """Handle password prompts for sudo and su usage for non-root SSH users ++ ++ :param index: The index pexpect.spawn returned to match against ++ either a sudo or su prompt ++ :type index: ``int`` ++ ++ :param result: The spawn running the command ++ :type result: ``pexpect.spawn`` ++ """ ++ if index == 2: ++ if not self.opts.sudo_pw and not self.opt.nopasswd_sudo: ++ msg = ("Unable to run command: sudo password " ++ "required but not provided") ++ self.log_error(msg) ++ raise Exception(msg) ++ result.sendline(self.opts.sudo_pw) ++ elif index == 3: ++ if not self.opts.root_password: ++ msg = ("Unable to run command as root: no root password given") ++ self.log_error(msg) ++ raise Exception(msg) ++ result.sendline(self.opts.root_password) ++ ++ def _get_hostname(self): ++ """Determine the hostname of the node and set that for future reference ++ and logging ++ ++ :returns: The hostname of the system, per the `hostname` command ++ :rtype: ``str`` ++ """ ++ _out = self.run_command('hostname') ++ if _out['status'] == 0: ++ self._hostname = _out['output'].strip() ++ self.log_info("Hostname set to %s" % self._hostname) ++ return self._hostname ++ ++ def retrieve_file(self, fname, dest): ++ """Copy a remote file, fname, to dest on the local node ++ ++ :param fname: The name of the file to retrieve ++ :type fname: ``str`` ++ ++ :param dest: Where to save the file to locally ++ :type dest: ``str`` ++ ++ :returns: True if file was successfully copied from remote, or False ++ :rtype: ``bool`` ++ """ ++ return self._retrieve_file(fname, dest) ++ ++ def _retrieve_file(self, fname, dest): ++ raise NotImplementedError("Transport %s does not support file copying" ++ % self.name) ++ ++ def read_file(self, fname): ++ """Read the given file fname and return its contents ++ ++ :param fname: The name of the file to read ++ :type fname: ``str`` ++ ++ :returns: The content of the file ++ :rtype: ``str`` ++ """ ++ self.log_debug("Reading file %s" % fname) ++ return self._read_file(fname) ++ ++ def _read_file(self, fname): ++ res = self.run_command("cat %s" % fname, timeout=5) ++ if res['status'] == 0: ++ return res['output'] ++ else: ++ if 'No such file' in res['output']: ++ self.log_debug("File %s does not exist on node" ++ % fname) ++ else: ++ self.log_error("Error reading %s: %s" % ++ (fname, res['output'].split(':')[1:])) ++ return '' ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/transports/control_persist.py b/sos/collector/transports/control_persist.py +new file mode 100644 +index 00000000..3e848b41 +--- /dev/null ++++ b/sos/collector/transports/control_persist.py +@@ -0,0 +1,199 @@ ++# Copyright Red Hat 2021, Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++ ++import os ++import pexpect ++import subprocess ++ ++from sos.collector.transports import RemoteTransport ++from sos.collector.exceptions import (InvalidPasswordException, ++ TimeoutPasswordAuthException, ++ PasswordRequestException, ++ AuthPermissionDeniedException, ++ ConnectionException, ++ ConnectionTimeoutException, ++ ControlSocketMissingException, ++ ControlPersistUnsupportedException) ++from sos.utilities import sos_get_command_output ++ ++ ++class SSHControlPersist(RemoteTransport): ++ """A transport for collect that leverages OpenSSH's Control Persist ++ functionality which uses control sockets to transparently keep a connection ++ open to the remote host without needing to rebuild the SSH connection for ++ each and every command executed on the node ++ """ ++ ++ name = 'control_persist' ++ ++ def _check_for_control_persist(self): ++ """Checks to see if the local system supported SSH ControlPersist. ++ ++ ControlPersist allows OpenSSH to keep a single open connection to a ++ remote host rather than building a new session each time. This is the ++ same feature that Ansible uses in place of paramiko, which we have a ++ need to drop in sos-collector. ++ ++ This check relies on feedback from the ssh binary. The command being ++ run should always generate stderr output, but depending on what that ++ output reads we can determine if ControlPersist is supported or not. ++ ++ For our purposes, a host that does not support ControlPersist is not ++ able to run sos-collector. ++ ++ Returns ++ True if ControlPersist is supported, else raise Exception. ++ """ ++ ssh_cmd = ['ssh', '-o', 'ControlPersist'] ++ cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE, ++ stderr=subprocess.PIPE) ++ out, err = cmd.communicate() ++ err = err.decode('utf-8') ++ if 'Bad configuration option' in err or 'Usage:' in err: ++ raise ControlPersistUnsupportedException ++ return True ++ ++ def _connect(self, password=''): ++ """ ++ Using ControlPersist, create the initial connection to the node. ++ ++ This will generate an OpenSSH ControlPersist socket within the tmp ++ directory created or specified for sos-collector to use. ++ ++ At most, we will wait 30 seconds for a connection. This involves a 15 ++ second wait for the initial connection attempt, and a subsequent 15 ++ second wait for a response when we supply a password. ++ ++ Since we connect to nodes in parallel (using the --threads value), this ++ means that the time between 'Connecting to nodes...' and 'Beginning ++ collection of sosreports' that users see can be up to an amount of time ++ equal to 30*(num_nodes/threads) seconds. ++ ++ Returns ++ True if session is successfully opened, else raise Exception ++ """ ++ try: ++ self._check_for_control_persist() ++ except ControlPersistUnsupportedException: ++ self.log_error("OpenSSH ControlPersist is not locally supported. " ++ "Please update your OpenSSH installation.") ++ raise ++ self.log_info('Opening SSH session to create control socket') ++ self.control_path = ("%s/.sos-collector-%s" % (self.tmpdir, ++ self.address)) ++ self.ssh_cmd = '' ++ connected = False ++ ssh_key = '' ++ ssh_port = '' ++ if self.opts.ssh_port != 22: ++ ssh_port = "-p%s " % self.opts.ssh_port ++ if self.opts.ssh_key: ++ ssh_key = "-i%s" % self.opts.ssh_key ++ ++ cmd = ("ssh %s %s -oControlPersist=600 -oControlMaster=auto " ++ "-oStrictHostKeyChecking=no -oControlPath=%s %s@%s " ++ "\"echo Connected\"" % (ssh_key, ++ ssh_port, ++ self.control_path, ++ self.opts.ssh_user, ++ self.address)) ++ res = pexpect.spawn(cmd, encoding='utf-8') ++ ++ connect_expects = [ ++ u'Connected', ++ u'password:', ++ u'.*Permission denied.*', ++ u'.* port .*: No route to host', ++ u'.*Could not resolve hostname.*', ++ pexpect.TIMEOUT ++ ] ++ ++ index = res.expect(connect_expects, timeout=15) ++ ++ if index == 0: ++ connected = True ++ elif index == 1: ++ if password: ++ pass_expects = [ ++ u'Connected', ++ u'Permission denied, please try again.', ++ pexpect.TIMEOUT ++ ] ++ res.sendline(password) ++ pass_index = res.expect(pass_expects, timeout=15) ++ if pass_index == 0: ++ connected = True ++ elif pass_index == 1: ++ # Note that we do not get an exitstatus here, so matching ++ # this line means an invalid password will be reported for ++ # both invalid passwords and invalid user names ++ raise InvalidPasswordException ++ elif pass_index == 2: ++ raise TimeoutPasswordAuthException ++ else: ++ raise PasswordRequestException ++ elif index == 2: ++ raise AuthPermissionDeniedException ++ elif index == 3: ++ raise ConnectionException(self.address, self.opts.ssh_port) ++ elif index == 4: ++ raise ConnectionException(self.address) ++ elif index == 5: ++ raise ConnectionTimeoutException ++ else: ++ raise Exception("Unknown error, client returned %s" % res.before) ++ if connected: ++ if not os.path.exists(self.control_path): ++ raise ControlSocketMissingException ++ self.log_debug("Successfully created control socket at %s" ++ % self.control_path) ++ return True ++ return False ++ ++ def _disconnect(self): ++ if os.path.exists(self.control_path): ++ try: ++ os.remove(self.control_path) ++ return True ++ except Exception as err: ++ self.log_debug("Could not disconnect properly: %s" % err) ++ return False ++ self.log_debug("Control socket not present when attempting to " ++ "terminate session") ++ ++ @property ++ def connected(self): ++ """Check if the SSH control socket exists ++ ++ The control socket is automatically removed by the SSH daemon in the ++ event that the last connection to the node was greater than the timeout ++ set by the ControlPersist option. This can happen for us if we are ++ collecting from a large number of nodes, and the timeout expires before ++ we start collection. ++ """ ++ return os.path.exists(self.control_path) ++ ++ @property ++ def remote_exec(self): ++ if not self.ssh_cmd: ++ self.ssh_cmd = "ssh -oControlPath=%s %s@%s" % ( ++ self.control_path, self.opts.ssh_user, self.address ++ ) ++ return self.ssh_cmd ++ ++ def _retrieve_file(self, fname, dest): ++ cmd = "/usr/bin/scp -oControlPath=%s %s@%s:%s %s" % ( ++ self.control_path, self.opts.ssh_user, self.address, fname, dest ++ ) ++ res = sos_get_command_output(cmd) ++ return res['status'] == 0 ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/collector/transports/local.py b/sos/collector/transports/local.py +new file mode 100644 +index 00000000..a4897f19 +--- /dev/null ++++ b/sos/collector/transports/local.py +@@ -0,0 +1,49 @@ ++# Copyright Red Hat 2021, Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++import os ++import shutil ++ ++from sos.collector.transports import RemoteTransport ++ ++ ++class LocalTransport(RemoteTransport): ++ """A 'transport' to represent a local node. This allows us to more easily ++ extend SoSNode() without having a ton of 'if local' or similar checks in ++ more places than we actually need them ++ """ ++ ++ name = 'local_node' ++ ++ def _connect(self, password): ++ return True ++ ++ def _disconnect(self): ++ return True ++ ++ @property ++ def connected(self): ++ return True ++ ++ def _retrieve_file(self, fname, dest): ++ self.log_debug("Moving %s to %s" % (fname, dest)) ++ shutil.copy(fname, dest) ++ ++ def _format_cmd_for_exec(self, cmd): ++ return cmd ++ ++ def _read_file(self, fname): ++ if os.path.exists(fname): ++ with open(fname, 'r') as rfile: ++ return rfile.read() ++ self.log_debug("No such file: %s" % fname) ++ return '' ++ ++# vim: set et ts=4 sw=4 : +-- +2.31.1 + +From 07d96d52ef69b9f8fe1ef32a1b88089d31c33fe8 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 27 Sep 2021 12:28:27 -0400 +Subject: [PATCH 2/2] [plugins] Update plugins to use new os.path.join wrapper + +Updates plugins to use the new `self.path_join()` wrapper for +`os.path.join()` so that these plugins now account for non-/ sysroots +for their collections. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/__init__.py | 2 +- + sos/report/plugins/azure.py | 4 +-- + sos/report/plugins/collectd.py | 2 +- + sos/report/plugins/container_log.py | 2 +- + sos/report/plugins/corosync.py | 2 +- + sos/report/plugins/docker_distribution.py | 5 ++-- + sos/report/plugins/ds.py | 3 +-- + sos/report/plugins/elastic.py | 4 ++- + sos/report/plugins/etcd.py | 2 +- + sos/report/plugins/gluster.py | 3 ++- + sos/report/plugins/jars.py | 2 +- + sos/report/plugins/kdump.py | 4 +-- + sos/report/plugins/libvirt.py | 2 +- + sos/report/plugins/logs.py | 8 +++--- + sos/report/plugins/manageiq.py | 12 ++++----- + sos/report/plugins/numa.py | 9 +++---- + sos/report/plugins/openstack_instack.py | 2 +- + sos/report/plugins/openstack_nova.py | 2 +- + sos/report/plugins/openvswitch.py | 13 ++++----- + sos/report/plugins/origin.py | 28 +++++++++++--------- + sos/report/plugins/ovirt.py | 2 +- + sos/report/plugins/ovirt_engine_backup.py | 5 ++-- + sos/report/plugins/ovn_central.py | 26 +++++++++--------- + sos/report/plugins/ovn_host.py | 4 +-- + sos/report/plugins/pacemaker.py | 4 +-- + sos/report/plugins/pcp.py | 32 +++++++++++------------ + sos/report/plugins/postfix.py | 2 +- + sos/report/plugins/postgresql.py | 2 +- + sos/report/plugins/powerpc.py | 2 +- + sos/report/plugins/processor.py | 3 +-- + sos/report/plugins/python.py | 4 +-- + sos/report/plugins/sar.py | 5 ++-- + sos/report/plugins/sos_extras.py | 2 +- + sos/report/plugins/ssh.py | 7 +++-- + sos/report/plugins/unpackaged.py | 4 +-- + sos/report/plugins/watchdog.py | 13 +++++---- + sos/report/plugins/yum.py | 2 +- + 37 files changed, 115 insertions(+), 115 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 1f84bca4..ec138f83 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -2897,7 +2897,7 @@ class Plugin(): + try: + cmd_line_paths = glob.glob(cmd_line_glob) + for path in cmd_line_paths: +- f = open(path, 'r') ++ f = open(self.path_join(path), 'r') + cmd_line = f.read().strip() + if process in cmd_line: + status = True +diff --git a/sos/report/plugins/azure.py b/sos/report/plugins/azure.py +index 45971a61..90999b3f 100644 +--- a/sos/report/plugins/azure.py ++++ b/sos/report/plugins/azure.py +@@ -8,8 +8,8 @@ + # + # See the LICENSE file in the source distribution for further information. + +-import os + from sos.report.plugins import Plugin, UbuntuPlugin, RedHatPlugin ++import os + + + class Azure(Plugin, UbuntuPlugin): +@@ -38,7 +38,7 @@ class Azure(Plugin, UbuntuPlugin): + + for path, subdirs, files in os.walk("/var/log/azure"): + for name in files: +- self.add_copy_spec(os.path.join(path, name), sizelimit=limit) ++ self.add_copy_spec(self.path_join(path, name), sizelimit=limit) + + self.add_cmd_output(( + 'curl -s -H Metadata:true ' +diff --git a/sos/report/plugins/collectd.py b/sos/report/plugins/collectd.py +index 80d4b00a..8584adf9 100644 +--- a/sos/report/plugins/collectd.py ++++ b/sos/report/plugins/collectd.py +@@ -33,7 +33,7 @@ class Collectd(Plugin, IndependentPlugin): + + p = re.compile('^LoadPlugin.*') + try: +- with open("/etc/collectd.conf") as f: ++ with open(self.path_join("/etc/collectd.conf"), 'r') as f: + for line in f: + if p.match(line): + self.add_alert("Active Plugin found: %s" % +diff --git a/sos/report/plugins/container_log.py b/sos/report/plugins/container_log.py +index 14e0b7d8..e8dedad2 100644 +--- a/sos/report/plugins/container_log.py ++++ b/sos/report/plugins/container_log.py +@@ -29,6 +29,6 @@ class ContainerLog(Plugin, IndependentPlugin): + """Collect *.log files from subdirs of passed root path + """ + for dirName, _, _ in os.walk(root): +- self.add_copy_spec(os.path.join(dirName, '*.log')) ++ self.add_copy_spec(self.path_join(dirName, '*.log')) + + # vim: set et ts=4 sw=4 : +diff --git a/sos/report/plugins/corosync.py b/sos/report/plugins/corosync.py +index d74086e3..10e096c6 100644 +--- a/sos/report/plugins/corosync.py ++++ b/sos/report/plugins/corosync.py +@@ -47,7 +47,7 @@ class Corosync(Plugin): + # (it isnt precise but sufficient) + pattern = r'^\s*(logging.)?logfile:\s*(\S+)$' + try: +- with open("/etc/corosync/corosync.conf") as f: ++ with open(self.path_join("/etc/corosync/corosync.conf"), 'r') as f: + for line in f: + if re.match(pattern, line): + self.add_copy_spec(re.search(pattern, line).group(2)) +diff --git a/sos/report/plugins/docker_distribution.py b/sos/report/plugins/docker_distribution.py +index 84222ff7..e760f252 100644 +--- a/sos/report/plugins/docker_distribution.py ++++ b/sos/report/plugins/docker_distribution.py +@@ -19,8 +19,9 @@ class DockerDistribution(Plugin): + def setup(self): + self.add_copy_spec('/etc/docker-distribution/') + self.add_journal('docker-distribution') +- if self.path_exists('/etc/docker-distribution/registry/config.yml'): +- with open('/etc/docker-distribution/registry/config.yml') as f: ++ conf = self.path_join('/etc/docker-distribution/registry/config.yml') ++ if self.path_exists(conf): ++ with open(conf) as f: + for line in f: + if 'rootdirectory' in line: + loc = line.split()[1] +diff --git a/sos/report/plugins/ds.py b/sos/report/plugins/ds.py +index addf49e1..43feb21e 100644 +--- a/sos/report/plugins/ds.py ++++ b/sos/report/plugins/ds.py +@@ -11,7 +11,6 @@ + # See the LICENSE file in the source distribution for further information. + + from sos.report.plugins import Plugin, RedHatPlugin +-import os + + + class DirectoryServer(Plugin, RedHatPlugin): +@@ -47,7 +46,7 @@ class DirectoryServer(Plugin, RedHatPlugin): + try: + for d in self.listdir("/etc/dirsrv"): + if d[0:5] == 'slapd': +- certpath = os.path.join("/etc/dirsrv", d) ++ certpath = self.path_join("/etc/dirsrv", d) + self.add_cmd_output("certutil -L -d %s" % certpath) + self.add_cmd_output("dsctl %s healthcheck" % d) + except OSError: +diff --git a/sos/report/plugins/elastic.py b/sos/report/plugins/elastic.py +index ad9a06ff..da2662bc 100644 +--- a/sos/report/plugins/elastic.py ++++ b/sos/report/plugins/elastic.py +@@ -39,7 +39,9 @@ class Elastic(Plugin, IndependentPlugin): + return hostname, port + + def setup(self): +- els_config_file = "/etc/elasticsearch/elasticsearch.yml" ++ els_config_file = self.path_join( ++ "/etc/elasticsearch/elasticsearch.yml" ++ ) + self.add_copy_spec(els_config_file) + + if self.get_option("all_logs"): +diff --git a/sos/report/plugins/etcd.py b/sos/report/plugins/etcd.py +index fd4f67eb..fe017e9f 100644 +--- a/sos/report/plugins/etcd.py ++++ b/sos/report/plugins/etcd.py +@@ -62,7 +62,7 @@ class etcd(Plugin, RedHatPlugin): + + def get_etcd_url(self): + try: +- with open('/etc/etcd/etcd.conf', 'r') as ef: ++ with open(self.path_join('/etc/etcd/etcd.conf'), 'r') as ef: + for line in ef: + if line.startswith('ETCD_LISTEN_CLIENT_URLS'): + return line.split('=')[1].replace('"', '').strip() +diff --git a/sos/report/plugins/gluster.py b/sos/report/plugins/gluster.py +index a44ffeb7..e518e3d3 100644 +--- a/sos/report/plugins/gluster.py ++++ b/sos/report/plugins/gluster.py +@@ -35,9 +35,10 @@ class Gluster(Plugin, RedHatPlugin): + ] + for statedump_file in statedump_entries: + statedumps_present = statedumps_present+1 ++ _spath = self.path_join(name_dir, statedump_file) + ret = -1 + while ret == -1: +- with open(name_dir + '/' + statedump_file, 'r') as sfile: ++ with open(_spath, 'r') as sfile: + last_line = sfile.readlines()[-1] + ret = string.count(last_line, 'DUMP_END_TIME') + +diff --git a/sos/report/plugins/jars.py b/sos/report/plugins/jars.py +index 0d3cf37e..4b98684e 100644 +--- a/sos/report/plugins/jars.py ++++ b/sos/report/plugins/jars.py +@@ -63,7 +63,7 @@ class Jars(Plugin, RedHatPlugin): + for location in locations: + for dirpath, _, filenames in os.walk(location): + for filename in filenames: +- path = os.path.join(dirpath, filename) ++ path = self.path_join(dirpath, filename) + if Jars.is_jar(path): + jar_paths.append(path) + +diff --git a/sos/report/plugins/kdump.py b/sos/report/plugins/kdump.py +index 757c2736..66565664 100644 +--- a/sos/report/plugins/kdump.py ++++ b/sos/report/plugins/kdump.py +@@ -40,7 +40,7 @@ class RedHatKDump(KDump, RedHatPlugin): + packages = ('kexec-tools',) + + def fstab_parse_fs(self, device): +- with open('/etc/fstab', 'r') as fp: ++ with open(self.path_join('/etc/fstab'), 'r') as fp: + for line in fp: + if line.startswith((device)): + return line.split()[1].rstrip('/') +@@ -50,7 +50,7 @@ class RedHatKDump(KDump, RedHatPlugin): + fs = "" + path = "/var/crash" + +- with open('/etc/kdump.conf', 'r') as fp: ++ with open(self.path_join('/etc/kdump.conf'), 'r') as fp: + for line in fp: + if line.startswith("path"): + path = line.split()[1] +diff --git a/sos/report/plugins/libvirt.py b/sos/report/plugins/libvirt.py +index be8120ff..5caa5802 100644 +--- a/sos/report/plugins/libvirt.py ++++ b/sos/report/plugins/libvirt.py +@@ -55,7 +55,7 @@ class Libvirt(Plugin, IndependentPlugin): + else: + self.add_copy_spec("/var/log/libvirt") + +- if self.path_exists(self.join_sysroot(libvirt_keytab)): ++ if self.path_exists(self.path_join(libvirt_keytab)): + self.add_cmd_output("klist -ket %s" % libvirt_keytab) + + self.add_cmd_output("ls -lR /var/lib/libvirt/qemu") +diff --git a/sos/report/plugins/logs.py b/sos/report/plugins/logs.py +index ee6bb98d..606e574a 100644 +--- a/sos/report/plugins/logs.py ++++ b/sos/report/plugins/logs.py +@@ -24,15 +24,15 @@ class Logs(Plugin, IndependentPlugin): + since = self.get_option("since") + + if self.path_exists('/etc/rsyslog.conf'): +- with open('/etc/rsyslog.conf', 'r') as conf: ++ with open(self.path_join('/etc/rsyslog.conf'), 'r') as conf: + for line in conf.readlines(): + if line.startswith('$IncludeConfig'): + confs += glob.glob(line.split()[1]) + + for conf in confs: +- if not self.path_exists(conf): ++ if not self.path_exists(self.path_join(conf)): + continue +- config = self.join_sysroot(conf) ++ config = self.path_join(conf) + logs += self.do_regex_find_all(r"^\S+\s+(-?\/.*$)\s+", config) + + for i in logs: +@@ -60,7 +60,7 @@ class Logs(Plugin, IndependentPlugin): + # - there is some data present, either persistent or runtime only + # - systemd-journald service exists + # otherwise fallback to collecting few well known logfiles directly +- journal = any([self.path_exists(p + "/log/journal/") ++ journal = any([self.path_exists(self.path_join(p, "log/journal/")) + for p in ["/var", "/run"]]) + if journal and self.is_service("systemd-journald"): + self.add_journal(since=since, tags='journal_full', priority=100) +diff --git a/sos/report/plugins/manageiq.py b/sos/report/plugins/manageiq.py +index 27ad6ef4..e20c4a2a 100644 +--- a/sos/report/plugins/manageiq.py ++++ b/sos/report/plugins/manageiq.py +@@ -58,7 +58,7 @@ class ManageIQ(Plugin, RedHatPlugin): + # Log files to collect from miq_dir/log/ + miq_log_dir = os.path.join(miq_dir, "log") + +- miq_main_log_files = [ ++ miq_main_logs = [ + 'ansible_tower.log', + 'top_output.log', + 'evm.log', +@@ -81,16 +81,16 @@ class ManageIQ(Plugin, RedHatPlugin): + self.add_copy_spec(list(self.files)) + + self.add_copy_spec([ +- os.path.join(self.miq_conf_dir, x) for x in self.miq_conf_files ++ self.path_join(self.miq_conf_dir, x) for x in self.miq_conf_files + ]) + + # Collect main log files without size limit. + self.add_copy_spec([ +- os.path.join(self.miq_log_dir, x) for x in self.miq_main_log_files ++ self.path_join(self.miq_log_dir, x) for x in self.miq_main_logs + ], sizelimit=0) + + self.add_copy_spec([ +- os.path.join(self.miq_log_dir, x) for x in self.miq_log_files ++ self.path_join(self.miq_log_dir, x) for x in self.miq_log_files + ]) + + self.add_copy_spec([ +@@ -101,8 +101,8 @@ class ManageIQ(Plugin, RedHatPlugin): + if environ.get("APPLIANCE_PG_DATA"): + pg_dir = environ.get("APPLIANCE_PG_DATA") + self.add_copy_spec([ +- os.path.join(pg_dir, 'pg_log'), +- os.path.join(pg_dir, 'postgresql.conf') ++ self.path_join(pg_dir, 'pg_log'), ++ self.path_join(pg_dir, 'postgresql.conf') + ]) + + # vim: set et ts=4 sw=4 : +diff --git a/sos/report/plugins/numa.py b/sos/report/plugins/numa.py +index 0faef8d2..9094baef 100644 +--- a/sos/report/plugins/numa.py ++++ b/sos/report/plugins/numa.py +@@ -9,7 +9,6 @@ + # See the LICENSE file in the source distribution for further information. + + from sos.report.plugins import Plugin, IndependentPlugin +-import os.path + + + class Numa(Plugin, IndependentPlugin): +@@ -42,10 +41,10 @@ class Numa(Plugin, IndependentPlugin): + ]) + + self.add_copy_spec([ +- os.path.join(numa_path, "node*/meminfo"), +- os.path.join(numa_path, "node*/cpulist"), +- os.path.join(numa_path, "node*/distance"), +- os.path.join(numa_path, "node*/hugepages/hugepages-*/*") ++ self.path_join(numa_path, "node*/meminfo"), ++ self.path_join(numa_path, "node*/cpulist"), ++ self.path_join(numa_path, "node*/distance"), ++ self.path_join(numa_path, "node*/hugepages/hugepages-*/*") + ]) + + # vim: set et ts=4 sw=4 : +diff --git a/sos/report/plugins/openstack_instack.py b/sos/report/plugins/openstack_instack.py +index 7c56c162..5b4f7d41 100644 +--- a/sos/report/plugins/openstack_instack.py ++++ b/sos/report/plugins/openstack_instack.py +@@ -68,7 +68,7 @@ class OpenStackInstack(Plugin): + p = uc_config.get(opt) + if p: + if not os.path.isabs(p): +- p = os.path.join('/home/stack', p) ++ p = self.path_join('/home/stack', p) + self.add_copy_spec(p) + except Exception: + pass +diff --git a/sos/report/plugins/openstack_nova.py b/sos/report/plugins/openstack_nova.py +index 53210c48..f840081e 100644 +--- a/sos/report/plugins/openstack_nova.py ++++ b/sos/report/plugins/openstack_nova.py +@@ -103,7 +103,7 @@ class OpenStackNova(Plugin): + "nova-scheduler.log*" + ] + for novalog in novalogs: +- self.add_copy_spec(os.path.join(novadir, novalog)) ++ self.add_copy_spec(self.path_join(novadir, novalog)) + + self.add_copy_spec([ + "/etc/nova/", +diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py +index 003596c6..179d1532 100644 +--- a/sos/report/plugins/openvswitch.py ++++ b/sos/report/plugins/openvswitch.py +@@ -10,7 +10,6 @@ + + from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin + +-from os.path import join as path_join + from os import environ + + import re +@@ -65,7 +64,9 @@ class OpenVSwitch(Plugin): + log_dirs.append(environ.get('OVS_LOGDIR')) + + if not all_logs: +- self.add_copy_spec([path_join(ld, '*.log') for ld in log_dirs]) ++ self.add_copy_spec([ ++ self.path_join(ld, '*.log') for ld in log_dirs ++ ]) + else: + self.add_copy_spec(log_dirs) + +@@ -76,13 +77,13 @@ class OpenVSwitch(Plugin): + ]) + + self.add_copy_spec([ +- path_join('/usr/local/etc/openvswitch', 'conf.db'), +- path_join('/etc/openvswitch', 'conf.db'), +- path_join('/var/lib/openvswitch', 'conf.db'), ++ self.path_join('/usr/local/etc/openvswitch', 'conf.db'), ++ self.path_join('/etc/openvswitch', 'conf.db'), ++ self.path_join('/var/lib/openvswitch', 'conf.db'), + ]) + ovs_dbdir = environ.get('OVS_DBDIR') + if ovs_dbdir: +- self.add_copy_spec(path_join(ovs_dbdir, 'conf.db')) ++ self.add_copy_spec(self.path_join(ovs_dbdir, 'conf.db')) + + self.add_cmd_output([ + # The '-t 5' adds an upper bound on how long to wait to connect +diff --git a/sos/report/plugins/origin.py b/sos/report/plugins/origin.py +index f9cc32c1..7df9c019 100644 +--- a/sos/report/plugins/origin.py ++++ b/sos/report/plugins/origin.py +@@ -69,20 +69,21 @@ class OpenShiftOrigin(Plugin): + + def is_static_etcd(self): + """Determine if we are on a node running etcd""" +- return self.path_exists(os.path.join(self.static_pod_dir, "etcd.yaml")) ++ return self.path_exists(self.path_join(self.static_pod_dir, ++ "etcd.yaml")) + + def is_static_pod_compatible(self): + """Determine if a node is running static pods""" + return self.path_exists(self.static_pod_dir) + + def setup(self): +- bstrap_node_cfg = os.path.join(self.node_base_dir, +- "bootstrap-" + self.node_cfg_file) +- bstrap_kubeconfig = os.path.join(self.node_base_dir, +- "bootstrap.kubeconfig") +- node_certs = os.path.join(self.node_base_dir, "certs", "*") +- node_client_ca = os.path.join(self.node_base_dir, "client-ca.crt") +- admin_cfg = os.path.join(self.master_base_dir, "admin.kubeconfig") ++ bstrap_node_cfg = self.path_join(self.node_base_dir, ++ "bootstrap-" + self.node_cfg_file) ++ bstrap_kubeconfig = self.path_join(self.node_base_dir, ++ "bootstrap.kubeconfig") ++ node_certs = self.path_join(self.node_base_dir, "certs", "*") ++ node_client_ca = self.path_join(self.node_base_dir, "client-ca.crt") ++ admin_cfg = self.path_join(self.master_base_dir, "admin.kubeconfig") + oc_cmd_admin = "%s --config=%s" % ("oc", admin_cfg) + static_pod_logs_cmd = "master-logs" + +@@ -92,11 +93,12 @@ class OpenShiftOrigin(Plugin): + self.add_copy_spec([ + self.master_cfg, + self.master_env, +- os.path.join(self.master_base_dir, "*.crt"), ++ self.path_join(self.master_base_dir, "*.crt"), + ]) + + if self.is_static_pod_compatible(): +- self.add_copy_spec(os.path.join(self.static_pod_dir, "*.yaml")) ++ self.add_copy_spec(self.path_join(self.static_pod_dir, ++ "*.yaml")) + self.add_cmd_output([ + "%s api api" % static_pod_logs_cmd, + "%s controllers controllers" % static_pod_logs_cmd, +@@ -177,9 +179,9 @@ class OpenShiftOrigin(Plugin): + node_client_ca, + bstrap_node_cfg, + bstrap_kubeconfig, +- os.path.join(self.node_base_dir, "*.crt"), +- os.path.join(self.node_base_dir, "resolv.conf"), +- os.path.join(self.node_base_dir, "node-dnsmasq.conf"), ++ self.path_join(self.node_base_dir, "*.crt"), ++ self.path_join(self.node_base_dir, "resolv.conf"), ++ self.path_join(self.node_base_dir, "node-dnsmasq.conf"), + ]) + + self.add_journal(units="atomic-openshift-node") +diff --git a/sos/report/plugins/ovirt.py b/sos/report/plugins/ovirt.py +index 1de606be..09647bf1 100644 +--- a/sos/report/plugins/ovirt.py ++++ b/sos/report/plugins/ovirt.py +@@ -216,7 +216,7 @@ class Ovirt(Plugin, RedHatPlugin): + "isouploader.conf" + ] + for conf_file in passwd_files: +- conf_path = os.path.join("/etc/ovirt-engine", conf_file) ++ conf_path = self.path_join("/etc/ovirt-engine", conf_file) + self.do_file_sub( + conf_path, + r"passwd=(.*)", +diff --git a/sos/report/plugins/ovirt_engine_backup.py b/sos/report/plugins/ovirt_engine_backup.py +index 676e419e..7fb6a5c7 100644 +--- a/sos/report/plugins/ovirt_engine_backup.py ++++ b/sos/report/plugins/ovirt_engine_backup.py +@@ -8,7 +8,6 @@ + # + # See the LICENSE file in the source distribution for further information. + +-import os + from sos.report.plugins import (Plugin, RedHatPlugin) + from datetime import datetime + +@@ -29,11 +28,11 @@ class oVirtEngineBackup(Plugin, RedHatPlugin): + + def setup(self): + now = datetime.now().strftime("%Y%m%d%H%M%S") +- backup_filename = os.path.join( ++ backup_filename = self.path_join( + self.get_option("backupdir"), + "engine-db-backup-%s.tar.gz" % (now) + ) +- log_filename = os.path.join( ++ log_filename = self.path_join( + self.get_option("backupdir"), + "engine-db-backup-%s.log" % (now) + ) +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index d6647aad..914eda60 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -42,7 +42,7 @@ class OVNCentral(Plugin): + return + else: + try: +- with open(filename, 'r') as f: ++ with open(self.path_join(filename), 'r') as f: + try: + db = json.load(f) + except Exception: +@@ -71,13 +71,13 @@ class OVNCentral(Plugin): + ovs_rundir = os.environ.get('OVS_RUNDIR') + for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']: + self.add_copy_spec([ +- os.path.join('/var/lib/openvswitch/ovn', pidfile), +- os.path.join('/usr/local/var/run/openvswitch', pidfile), +- os.path.join('/run/openvswitch/', pidfile), ++ self.path_join('/var/lib/openvswitch/ovn', pidfile), ++ self.path_join('/usr/local/var/run/openvswitch', pidfile), ++ self.path_join('/run/openvswitch/', pidfile), + ]) + + if ovs_rundir: +- self.add_copy_spec(os.path.join(ovs_rundir, pidfile)) ++ self.add_copy_spec(self.path_join(ovs_rundir, pidfile)) + + if self.get_option("all_logs"): + self.add_copy_spec("/var/log/ovn/") +@@ -104,7 +104,7 @@ class OVNCentral(Plugin): + + schema_dir = '/usr/share/openvswitch' + +- nb_tables = self.get_tables_from_schema(os.path.join( ++ nb_tables = self.get_tables_from_schema(self.path_join( + schema_dir, 'ovn-nb.ovsschema')) + + self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl') +@@ -116,7 +116,7 @@ class OVNCentral(Plugin): + format(self.ovn_sbdb_sock_path), + "output": "Leader: self"} + if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)): +- sb_tables = self.get_tables_from_schema(os.path.join( ++ sb_tables = self.get_tables_from_schema(self.path_join( + schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow']) + self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl') + cmds += sbctl_cmds +@@ -134,14 +134,14 @@ class OVNCentral(Plugin): + ovs_dbdir = os.environ.get('OVS_DBDIR') + for dbfile in ['ovnnb_db.db', 'ovnsb_db.db']: + self.add_copy_spec([ +- os.path.join('/var/lib/openvswitch/ovn', dbfile), +- os.path.join('/usr/local/etc/openvswitch', dbfile), +- os.path.join('/etc/openvswitch', dbfile), +- os.path.join('/var/lib/openvswitch', dbfile), +- os.path.join('/var/lib/ovn/etc', dbfile), ++ self.path_join('/var/lib/openvswitch/ovn', dbfile), ++ self.path_join('/usr/local/etc/openvswitch', dbfile), ++ self.path_join('/etc/openvswitch', dbfile), ++ self.path_join('/var/lib/openvswitch', dbfile), ++ self.path_join('/var/lib/ovn/etc', dbfile) + ]) + if ovs_dbdir: +- self.add_copy_spec(os.path.join(ovs_dbdir, dbfile)) ++ self.add_copy_spec(self.path_join(ovs_dbdir, dbfile)) + + self.add_journal(units="ovn-northd") + +diff --git a/sos/report/plugins/ovn_host.py b/sos/report/plugins/ovn_host.py +index 3742c49f..78604a15 100644 +--- a/sos/report/plugins/ovn_host.py ++++ b/sos/report/plugins/ovn_host.py +@@ -35,7 +35,7 @@ class OVNHost(Plugin): + else: + self.add_copy_spec("/var/log/ovn/*.log") + +- self.add_copy_spec([os.path.join(pp, pidfile) for pp in pid_paths]) ++ self.add_copy_spec([self.path_join(pp, pidfile) for pp in pid_paths]) + + self.add_copy_spec('/etc/sysconfig/ovn-controller') + +@@ -49,7 +49,7 @@ class OVNHost(Plugin): + + def check_enabled(self): + return (any([self.path_isfile( +- os.path.join(pp, pidfile)) for pp in pid_paths]) or ++ self.path_join(pp, pidfile)) for pp in pid_paths]) or + super(OVNHost, self).check_enabled()) + + +diff --git a/sos/report/plugins/pacemaker.py b/sos/report/plugins/pacemaker.py +index 497807ff..6ce80881 100644 +--- a/sos/report/plugins/pacemaker.py ++++ b/sos/report/plugins/pacemaker.py +@@ -129,7 +129,7 @@ class Pacemaker(Plugin): + + class DebianPacemaker(Pacemaker, DebianPlugin, UbuntuPlugin): + def setup(self): +- self.envfile = "/etc/default/pacemaker" ++ self.envfile = self.path_join("/etc/default/pacemaker") + self.setup_crm_shell() + self.setup_pcs() + super(DebianPacemaker, self).setup() +@@ -141,7 +141,7 @@ class DebianPacemaker(Pacemaker, DebianPlugin, UbuntuPlugin): + + class RedHatPacemaker(Pacemaker, RedHatPlugin): + def setup(self): +- self.envfile = "/etc/sysconfig/pacemaker" ++ self.envfile = self.path_join("/etc/sysconfig/pacemaker") + self.setup_pcs() + self.add_copy_spec("/etc/sysconfig/sbd") + super(RedHatPacemaker, self).setup() +diff --git a/sos/report/plugins/pcp.py b/sos/report/plugins/pcp.py +index 9707d7a9..ad902332 100644 +--- a/sos/report/plugins/pcp.py ++++ b/sos/report/plugins/pcp.py +@@ -41,7 +41,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + total_size = 0 + for dirpath, dirnames, filenames in os.walk(path): + for f in filenames: +- fp = os.path.join(dirpath, f) ++ fp = self.path_join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + +@@ -86,7 +86,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + # unconditionally. Obviously if someone messes up their /etc/pcp.conf + # in a ridiculous way (i.e. setting PCP_SYSCONF_DIR to '/') this will + # break badly. +- var_conf_dir = os.path.join(self.pcp_var_dir, 'config') ++ var_conf_dir = self.path_join(self.pcp_var_dir, 'config') + self.add_copy_spec([ + self.pcp_sysconf_dir, + self.pcp_conffile, +@@ -98,10 +98,10 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + # rpms. It does not make up for a lot of size but it contains many + # files + self.add_forbidden_path([ +- os.path.join(var_conf_dir, 'pmchart'), +- os.path.join(var_conf_dir, 'pmlogconf'), +- os.path.join(var_conf_dir, 'pmieconf'), +- os.path.join(var_conf_dir, 'pmlogrewrite') ++ self.path_join(var_conf_dir, 'pmchart'), ++ self.path_join(var_conf_dir, 'pmlogconf'), ++ self.path_join(var_conf_dir, 'pmieconf'), ++ self.path_join(var_conf_dir, 'pmlogrewrite') + ]) + + # Take PCP_LOG_DIR/pmlogger/`hostname` + PCP_LOG_DIR/pmmgr/`hostname` +@@ -121,13 +121,13 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + # we would collect everything + if self.pcp_hostname != '': + # collect pmmgr logs up to 'pmmgrlogs' size limit +- path = os.path.join(self.pcp_log_dir, 'pmmgr', +- self.pcp_hostname, '*') ++ path = self.path_join(self.pcp_log_dir, 'pmmgr', ++ self.pcp_hostname, '*') + self.add_copy_spec(path, sizelimit=self.sizelimit, tailit=False) + # collect newest pmlogger logs up to 'pmloggerfiles' count + files_collected = 0 +- path = os.path.join(self.pcp_log_dir, 'pmlogger', +- self.pcp_hostname, '*') ++ path = self.path_join(self.pcp_log_dir, 'pmlogger', ++ self.pcp_hostname, '*') + pmlogger_ls = self.exec_cmd("ls -t1 %s" % path) + if pmlogger_ls['status'] == 0: + for line in pmlogger_ls['output'].splitlines(): +@@ -138,15 +138,15 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + + self.add_copy_spec([ + # Collect PCP_LOG_DIR/pmcd and PCP_LOG_DIR/NOTICES +- os.path.join(self.pcp_log_dir, 'pmcd'), +- os.path.join(self.pcp_log_dir, 'NOTICES*'), ++ self.path_join(self.pcp_log_dir, 'pmcd'), ++ self.path_join(self.pcp_log_dir, 'NOTICES*'), + # Collect PCP_VAR_DIR/pmns +- os.path.join(self.pcp_var_dir, 'pmns'), ++ self.path_join(self.pcp_var_dir, 'pmns'), + # Also collect any other log and config files + # (as suggested by fche) +- os.path.join(self.pcp_log_dir, '*/*.log*'), +- os.path.join(self.pcp_log_dir, '*/*/*.log*'), +- os.path.join(self.pcp_log_dir, '*/*/config*') ++ self.path_join(self.pcp_log_dir, '*/*.log*'), ++ self.path_join(self.pcp_log_dir, '*/*/*.log*'), ++ self.path_join(self.pcp_log_dir, '*/*/config*') + ]) + + # Collect a summary for the current day +diff --git a/sos/report/plugins/postfix.py b/sos/report/plugins/postfix.py +index 8f584430..3ca0c4ad 100644 +--- a/sos/report/plugins/postfix.py ++++ b/sos/report/plugins/postfix.py +@@ -41,7 +41,7 @@ class Postfix(Plugin): + ] + fp = [] + try: +- with open('/etc/postfix/main.cf', 'r') as cffile: ++ with open(self.path_join('/etc/postfix/main.cf'), 'r') as cffile: + for line in cffile.readlines(): + # ignore comments and take the first word after '=' + if line.startswith('#'): +diff --git a/sos/report/plugins/postgresql.py b/sos/report/plugins/postgresql.py +index bec0b019..00824db7 100644 +--- a/sos/report/plugins/postgresql.py ++++ b/sos/report/plugins/postgresql.py +@@ -124,7 +124,7 @@ class RedHatPostgreSQL(PostgreSQL, SCLPlugin): + + # copy PG_VERSION and postmaster.opts + for f in ["PG_VERSION", "postmaster.opts"]: +- self.add_copy_spec(os.path.join(_dir, "data", f)) ++ self.add_copy_spec(self.path_join(_dir, "data", f)) + + + class DebianPostgreSQL(PostgreSQL, DebianPlugin, UbuntuPlugin): +diff --git a/sos/report/plugins/powerpc.py b/sos/report/plugins/powerpc.py +index 4fb4f87c..50f88650 100644 +--- a/sos/report/plugins/powerpc.py ++++ b/sos/report/plugins/powerpc.py +@@ -22,7 +22,7 @@ class PowerPC(Plugin, IndependentPlugin): + + def setup(self): + try: +- with open('/proc/cpuinfo', 'r') as fp: ++ with open(self.path_join('/proc/cpuinfo'), 'r') as fp: + contents = fp.read() + ispSeries = "pSeries" in contents + isPowerNV = "PowerNV" in contents +diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py +index 2df2dc9a..c3d8930c 100644 +--- a/sos/report/plugins/processor.py ++++ b/sos/report/plugins/processor.py +@@ -7,7 +7,6 @@ + # See the LICENSE file in the source distribution for further information. + + from sos.report.plugins import Plugin, IndependentPlugin +-import os + + + class Processor(Plugin, IndependentPlugin): +@@ -41,7 +40,7 @@ class Processor(Plugin, IndependentPlugin): + # cumulative directory size exceeds 25MB or even 100MB. + cdirs = self.listdir('/sys/devices/system/cpu') + self.add_copy_spec([ +- os.path.join('/sys/devices/system/cpu', cdir) for cdir in cdirs ++ self.path_join('/sys/devices/system/cpu', cdir) for cdir in cdirs + ]) + + self.add_cmd_output([ +diff --git a/sos/report/plugins/python.py b/sos/report/plugins/python.py +index e2ab39ab..a8ec0cd8 100644 +--- a/sos/report/plugins/python.py ++++ b/sos/report/plugins/python.py +@@ -68,9 +68,9 @@ class RedHatPython(Python, RedHatPlugin): + ] + + for py_path in py_paths: +- for root, _, files in os.walk(py_path): ++ for root, _, files in os.walk(self.path_join(py_path)): + for file_ in files: +- filepath = os.path.join(root, file_) ++ filepath = self.path_join(root, file_) + if filepath.endswith('.py'): + try: + with open(filepath, 'rb') as f: +diff --git a/sos/report/plugins/sar.py b/sos/report/plugins/sar.py +index 669f5d7b..b60005b1 100644 +--- a/sos/report/plugins/sar.py ++++ b/sos/report/plugins/sar.py +@@ -7,7 +7,6 @@ + # See the LICENSE file in the source distribution for further information. + + from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin +-import os + import re + + +@@ -24,7 +23,7 @@ class Sar(Plugin,): + "", False)] + + def setup(self): +- self.add_copy_spec(os.path.join(self.sa_path, '*'), ++ self.add_copy_spec(self.path_join(self.sa_path, '*'), + sizelimit=0 if self.get_option("all_sar") else None, + tailit=False) + +@@ -44,7 +43,7 @@ class Sar(Plugin,): + # as option for sadc + for fname in dir_list: + if sa_regex.match(fname): +- sa_data_path = os.path.join(self.sa_path, fname) ++ sa_data_path = self.path_join(self.sa_path, fname) + sar_filename = 'sar' + fname[2:] + if sar_filename not in dir_list: + sar_cmd = 'sh -c "sar -A -f %s"' % sa_data_path +diff --git a/sos/report/plugins/sos_extras.py b/sos/report/plugins/sos_extras.py +index ffde4138..55bc4dc0 100644 +--- a/sos/report/plugins/sos_extras.py ++++ b/sos/report/plugins/sos_extras.py +@@ -58,7 +58,7 @@ class SosExtras(Plugin, IndependentPlugin): + + for path, dirlist, filelist in os.walk(self.extras_dir): + for f in filelist: +- _file = os.path.join(path, f) ++ _file = self.path_join(path, f) + self._log_warn("Collecting data from extras file %s" % _file) + try: + for line in open(_file).read().splitlines(): +diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py +index 971cda8b..9ac9dec0 100644 +--- a/sos/report/plugins/ssh.py ++++ b/sos/report/plugins/ssh.py +@@ -42,7 +41,7 @@ class Ssh(Plugin, IndependentPlugin): + try: + for sshcfg in sshcfgs: + tag = sshcfg.split('/')[-1] +- with open(sshcfg, 'r') as cfgfile: ++ with open(self.path_join(sshcfg), 'r') as cfgfile: + for line in cfgfile: + # skip empty lines and comments + if len(line.split()) == 0 or line.startswith('#'): +diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py +index 9205e53f..772b1d1f 100644 +--- a/sos/report/plugins/unpackaged.py ++++ b/sos/report/plugins/unpackaged.py +@@ -40,7 +40,7 @@ class Unpackaged(Plugin, RedHatPlugin): + for e in exclude: + dirs[:] = [d for d in dirs if d not in e] + for name in files: +- path = os.path.join(root, name) ++ path = self.path_join(root, name) + try: + if stat.S_ISLNK(os.lstat(path).st_mode): + path = Path(path).resolve() +@@ -49,7 +49,7 @@ class Unpackaged(Plugin, RedHatPlugin): + file_list.append(os.path.realpath(path)) + for name in dirs: + file_list.append(os.path.realpath( +- os.path.join(root, name))) ++ self.path_join(root, name))) + + return file_list + +diff --git a/sos/report/plugins/watchdog.py b/sos/report/plugins/watchdog.py +index 1bf3f4cb..bf2dc9cb 100644 +--- a/sos/report/plugins/watchdog.py ++++ b/sos/report/plugins/watchdog.py +@@ -11,7 +11,6 @@ + from sos.report.plugins import Plugin, RedHatPlugin + + from glob import glob +-import os + + + class Watchdog(Plugin, RedHatPlugin): +@@ -56,8 +55,8 @@ class Watchdog(Plugin, RedHatPlugin): + Collect configuration files, custom executables for test-binary + and repair-binary, and stdout/stderr logs. + """ +- conf_file = self.get_option('conf_file') +- log_dir = '/var/log/watchdog' ++ conf_file = self.path_join(self.get_option('conf_file')) ++ log_dir = self.path_join('/var/log/watchdog') + + # Get service configuration and sysconfig files + self.add_copy_spec([ +@@ -80,15 +79,15 @@ class Watchdog(Plugin, RedHatPlugin): + self._log_warn("Could not read %s: %s" % (conf_file, ex)) + + if self.get_option('all_logs'): +- log_files = glob(os.path.join(log_dir, '*')) ++ log_files = glob(self.path_join(log_dir, '*')) + else: +- log_files = (glob(os.path.join(log_dir, '*.stdout')) + +- glob(os.path.join(log_dir, '*.stderr'))) ++ log_files = (glob(self.path_join(log_dir, '*.stdout')) + ++ glob(self.path_join(log_dir, '*.stderr'))) + + self.add_copy_spec(log_files) + + # Get output of "wdctl " for each /dev/watchdog* +- for dev in glob('/dev/watchdog*'): ++ for dev in glob(self.path_join('/dev/watchdog*')): + self.add_cmd_output("wdctl %s" % dev) + + # vim: set et ts=4 sw=4 : +diff --git a/sos/report/plugins/yum.py b/sos/report/plugins/yum.py +index 148464cb..e5256642 100644 +--- a/sos/report/plugins/yum.py ++++ b/sos/report/plugins/yum.py +@@ -61,7 +61,7 @@ class Yum(Plugin, RedHatPlugin): + if not p.endswith(".py"): + continue + plugins = plugins + " " if len(plugins) else "" +- plugins = plugins + os.path.join(YUM_PLUGIN_PATH, p) ++ plugins = plugins + self.path_join(YUM_PLUGIN_PATH, p) + if len(plugins): + self.add_cmd_output("rpm -qf %s" % plugins, + suggest_filename="plugin-packages") +-- +2.31.1 + +From f4af5efdc79aefe1aa685c36d095925bae14dc4a Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 28 Sep 2021 13:00:17 -0400 +Subject: [PATCH 1/4] [collect] Add --transport option and allow clusters to + set transport type + +Adds a new `--transport` option for users to be able to specify the type +of transport to use when connecting to nodes. The default value of +`auto` will defer to the cluster profile to set the transport type, +which will continue to default to use OpenSSH's ControlPersist feature. + +Clusters may override the new `set_transport_type()` method to change +the default transport used. + +If `--transport` is anything besides `auto`, then the cluster profile +will not be deferred to when choosing a transport for each remote node. + +Signed-off-by: Jake Hunsaker +--- + man/en/sos-collect.1 | 15 +++++++++++++++ + sos/collector/__init__.py | 6 ++++++ + sos/collector/clusters/__init__.py | 10 ++++++++++ + sos/collector/exceptions.py | 13 ++++++++++++- + sos/collector/sosnode.py | 16 +++++++++++++++- + 5 files changed, 58 insertions(+), 2 deletions(-) + +diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 +index e930023e..8ad4fe5e 100644 +--- a/man/en/sos-collect.1 ++++ b/man/en/sos-collect.1 +@@ -43,6 +43,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes + [\-\-sos-cmd SOS_CMD] + [\-t|\-\-threads THREADS] + [\-\-timeout TIMEOUT] ++ [\-\-transport TRANSPORT] + [\-\-tmp\-dir TMP_DIR] + [\-v|\-\-verbose] + [\-\-verify] +@@ -350,6 +351,20 @@ Note that sosreports are collected in parallel, so you can approximate the total + runtime of sos collect via timeout*(number of nodes/jobs). + + Default is 180 seconds. ++.TP ++\fB\-\-transport\fR TRANSPORT ++Specify the type of remote transport to use to manage connections to remote nodes. ++ ++\fBsos collect\fR uses locally installed binaries to connect to and interact with remote ++nodes, instead of directly establishing those connections. By default, OpenSSH's ControlPersist ++feature is preferred, however certain cluster types may have preferences of their own for how ++remote sessions should be established. ++ ++The types of transports supported are currently as follows: ++ ++ \fBauto\fR Allow the cluster type to determine the transport used ++ \fBcontrol_persist\fR Use OpenSSH's ControlPersist feature. This is the default behavior ++ + .TP + \fB\-\-tmp\-dir\fR TMP_DIR + Specify a temporary directory to save sos archives to. By default one will be created in +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index da912655..fecfe6aa 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -98,6 +98,7 @@ class SoSCollector(SoSComponent): + 'ssh_port': 22, + 'ssh_user': 'root', + 'timeout': 600, ++ 'transport': 'auto', + 'verify': False, + 'usernames': [], + 'upload': False, +@@ -378,6 +379,8 @@ class SoSCollector(SoSComponent): + help='Specify an SSH user. Default root') + collect_grp.add_argument('--timeout', type=int, required=False, + help='Timeout for sosreport on each node.') ++ collect_grp.add_argument('--transport', default='auto', type=str, ++ help='Remote connection transport to use') + collect_grp.add_argument("--upload", action="store_true", + default=False, + help="Upload archive to a policy-default " +@@ -813,6 +813,8 @@ class SoSCollector(SoSComponent): + self.collect_md.add_field('cluster_type', self.cluster_type) + if self.cluster: + self.master.cluster = self.cluster ++ if self.opts.transport == 'auto': ++ self.opts.transport = self.cluster.set_transport_type() + self.cluster.setup() + if self.cluster.cluster_ssh_key: + if not self.opts.ssh_key: +@@ -1041,6 +1046,7 @@ class SoSCollector(SoSComponent): + else: + client.disconnect() + except Exception: ++ # all exception logging is handled within SoSNode + pass + + def intro(self): +diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py +index 64ac2a44..cf1e7a0b 100644 +--- a/sos/collector/clusters/__init__.py ++++ b/sos/collector/clusters/__init__.py +@@ -149,6 +149,16 @@ class Cluster(): + """ + pass + ++ def set_transport_type(self): ++ """The default connection type used by sos collect is to leverage the ++ local system's SSH installation using ControlPersist, however certain ++ cluster types may want to use something else. ++ ++ Override this in a specific cluster profile to set the ``transport`` ++ option according to what type of transport should be used. ++ """ ++ return 'control_persist' ++ + def set_master_options(self, node): + """If there is a need to set specific options in the sos command being + run on the cluster's master nodes, override this method in the cluster +diff --git a/sos/collector/exceptions.py b/sos/collector/exceptions.py +index 1e44768b..2bb07e7b 100644 +--- a/sos/collector/exceptions.py ++++ b/sos/collector/exceptions.py +@@ -94,6 +94,16 @@ class UnsupportedHostException(Exception): + super(UnsupportedHostException, self).__init__(message) + + ++class InvalidTransportException(Exception): ++ """Raised when a transport is requested but it does not exist or is ++ not supported locally""" ++ ++ def __init__(self, transport=None): ++ message = ("Connection failed: unknown or unsupported transport %s" ++ % transport if transport else '') ++ super(InvalidTransportException, self).__init__(message) ++ ++ + __all__ = [ + 'AuthPermissionDeniedException', + 'CommandTimeoutException', +@@ -104,5 +114,6 @@ __all__ = [ + 'InvalidPasswordException', + 'PasswordRequestException', + 'TimeoutPasswordAuthException', +- 'UnsupportedHostException' ++ 'UnsupportedHostException', ++ 'InvalidTransportException' + ] +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index f79bd5ff..5c5c7201 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -22,7 +22,13 @@ from sos.collector.transports.control_persist import SSHControlPersist + from sos.collector.transports.local import LocalTransport + from sos.collector.exceptions import (CommandTimeoutException, + ConnectionException, +- UnsupportedHostException) ++ UnsupportedHostException, ++ InvalidTransportException) ++ ++TRANSPORTS = { ++ 'local': LocalTransport, ++ 'control_persist': SSHControlPersist, ++} + + + class SosNode(): +@@ -107,6 +113,14 @@ class SosNode(): + if self.address in ['localhost', '127.0.0.1']: + self.local = True + return LocalTransport(self.address, commons) ++ elif self.opts.transport in TRANSPORTS.keys(): ++ return TRANSPORTS[self.opts.transport](self.address, commons) ++ elif self.opts.transport != 'auto': ++ self.log_error( ++ "Connection failed: unknown or unsupported transport %s" ++ % self.opts.transport ++ ) ++ raise InvalidTransportException(self.opts.transport) + return SSHControlPersist(self.address, commons) + + def _fmt_msg(self, msg): +-- +2.31.1 + + +From dbc49345384404600f45d68b8d3c6541b2a26480 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 30 Sep 2021 10:38:18 -0400 +Subject: [PATCH 2/4] [transports] Add 'oc' as a transport option for remote + nodes + +This commit adds a new transport for `sos collect` by leveraging a +locally available `oc` binary that has been properly configured for +access to an OCP cluster. + +This transport will allow users to use `sos collect` to collect reports +from an OCP cluster without directly connecting to any of the nodes +involved. We do this by using the `oc` binary to first launch a pod on +target node(s) and then exec our discovery commands and eventual `sos +report` command to that pod. This in turn is dependent on a function API +for the `oc` binary to communicate with. In the event that `oc` is not +__locally__ available or is not properly configured, we will fallback to +the current default of using SSH ControlPersist to directly connect to +the nodes. Otherwise, the OCP cluster will attempt to automatically use +this new transport. +--- + man/en/sos-collect.1 | 1 + + sos/collector/clusters/ocp.py | 14 ++ + sos/collector/sosnode.py | 8 +- + sos/collector/transports/__init__.py | 20 ++- + sos/collector/transports/oc.py | 220 +++++++++++++++++++++++++++ + 5 files changed, 257 insertions(+), 6 deletions(-) + create mode 100644 sos/collector/transports/oc.py + +diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 +index 8ad4fe5e..a1f6c10e 100644 +--- a/man/en/sos-collect.1 ++++ b/man/en/sos-collect.1 +@@ -364,6 +364,7 @@ The types of transports supported are currently as follows: + + \fBauto\fR Allow the cluster type to determine the transport used + \fBcontrol_persist\fR Use OpenSSH's ControlPersist feature. This is the default behavior ++ \fBoc\fR Use a \fBlocally\fR configured \fBoc\fR binary to deploy collection pods on OCP nodes + + .TP + \fB\-\-tmp\-dir\fR TMP_DIR +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index ad97587f..a9357dbf 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -12,6 +12,7 @@ import os + + from pipes import quote + from sos.collector.clusters import Cluster ++from sos.utilities import is_executable + + + class ocp(Cluster): +@@ -83,6 +84,19 @@ class ocp(Cluster): + nodes[_node[0]][column] = _node[idx[column]] + return nodes + ++ def set_transport_type(self): ++ if is_executable('oc'): ++ return 'oc' ++ self.log_info("Local installation of 'oc' not found or is not " ++ "correctly configured. Will use ControlPersist") ++ self.ui_log.warn( ++ "Preferred transport 'oc' not available, will fallback to SSH." ++ ) ++ if not self.opts.batch: ++ input("Press ENTER to continue connecting with SSH, or Ctrl+C to" ++ "abort.") ++ return 'control_persist' ++ + def get_nodes(self): + nodes = [] + self.node_dict = {} +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 5c5c7201..8a9dbd7a 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -20,6 +20,7 @@ from sos.policies import load + from sos.policies.init_systems import InitSystem + from sos.collector.transports.control_persist import SSHControlPersist + from sos.collector.transports.local import LocalTransport ++from sos.collector.transports.oc import OCTransport + from sos.collector.exceptions import (CommandTimeoutException, + ConnectionException, + UnsupportedHostException, +@@ -28,6 +29,7 @@ from sos.collector.exceptions import (CommandTimeoutException, + TRANSPORTS = { + 'local': LocalTransport, + 'control_persist': SSHControlPersist, ++ 'oc': OCTransport + } + + +@@ -421,13 +423,11 @@ class SosNode(): + if 'atomic' in cmd: + get_pty = True + +- if get_pty: +- cmd = "/bin/bash -c %s" % quote(cmd) +- + if env: + _cmd_env = self.env_vars + env = _cmd_env.update(env) +- return self._transport.run_command(cmd, timeout, need_root, env) ++ return self._transport.run_command(cmd, timeout, need_root, env, ++ get_pty) + + def sosreport(self): + """Run an sos report on the node, then collect it""" +diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py +index 5be7dc6d..7bffee62 100644 +--- a/sos/collector/transports/__init__.py ++++ b/sos/collector/transports/__init__.py +@@ -144,7 +144,8 @@ class RemoteTransport(): + raise NotImplementedError("Transport %s does not define disconnect" + % self.name) + +- def run_command(self, cmd, timeout=180, need_root=False, env=None): ++ def run_command(self, cmd, timeout=180, need_root=False, env=None, ++ get_pty=False): + """Run a command on the node, returning its output and exit code. + This should return the exit code of the command being executed, not the + exit code of whatever mechanism the transport uses to execute that +@@ -165,10 +166,15 @@ class RemoteTransport(): + :param env: Specify env vars to be passed to the ``cmd`` + :type env: ``dict`` + ++ :param get_pty: Does ``cmd`` require execution with a pty? ++ :type get_pty: ``bool`` ++ + :returns: Output of ``cmd`` and the exit code + :rtype: ``dict`` with keys ``output`` and ``status`` + """ + self.log_debug('Running command %s' % cmd) ++ if get_pty: ++ cmd = "/bin/bash -c %s" % quote(cmd) + # currently we only use/support the use of pexpect for handling the + # execution of these commands, as opposed to directly invoking + # subprocess.Popen() in conjunction with tools like sshpass. +@@ -212,6 +218,13 @@ class RemoteTransport(): + :type env: ``dict`` + """ + cmd = self._format_cmd_for_exec(cmd) ++ ++ # if for any reason env is empty, set it to None as otherwise ++ # pexpect interprets this to mean "run this command with no env vars of ++ # any kind" ++ if not env: ++ env = None ++ + result = pexpect.spawn(cmd, encoding='utf-8', env=env) + + _expects = [pexpect.EOF, pexpect.TIMEOUT] +@@ -268,6 +281,9 @@ class RemoteTransport(): + _out = self.run_command('hostname') + if _out['status'] == 0: + self._hostname = _out['output'].strip() ++ ++ if not self._hostname: ++ self._hostname = self.address + self.log_info("Hostname set to %s" % self._hostname) + return self._hostname + +@@ -302,7 +318,7 @@ class RemoteTransport(): + return self._read_file(fname) + + def _read_file(self, fname): +- res = self.run_command("cat %s" % fname, timeout=5) ++ res = self.run_command("cat %s" % fname, timeout=10) + if res['status'] == 0: + return res['output'] + else: +diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py +new file mode 100644 +index 00000000..649037b9 +--- /dev/null ++++ b/sos/collector/transports/oc.py +@@ -0,0 +1,220 @@ ++# Copyright Red Hat 2021, Jake Hunsaker ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++import json ++import tempfile ++import os ++ ++from sos.collector.transports import RemoteTransport ++from sos.utilities import (is_executable, sos_get_command_output, ++ SoSTimeoutError) ++ ++ ++class OCTransport(RemoteTransport): ++ """This transport leverages the execution of commands via a locally ++ available and configured ``oc`` binary for OCPv4 environments. ++ ++ OCPv4 clusters generally discourage the use of SSH, so this transport may ++ be used to remove our use of SSH in favor of the environment provided ++ method of connecting to nodes and executing commands via debug pods. ++ ++ Note that this approach will generate multiple debug pods over the course ++ of our execution ++ """ ++ ++ name = 'oc' ++ project = 'sos-collect-tmp' ++ ++ def run_oc(self, cmd, **kwargs): ++ """Format and run a command with `oc` in the project defined for our ++ execution ++ """ ++ return sos_get_command_output( ++ "oc -n sos-collect-tmp %s" % cmd, ++ **kwargs ++ ) ++ ++ @property ++ def connected(self): ++ up = self.run_oc( ++ "wait --timeout=0s --for=condition=ready pod/%s" % self.pod_name ++ ) ++ return up['status'] == 0 ++ ++ def get_node_pod_config(self): ++ """Based on our template for the debug container, add the node-specific ++ items so that we can deploy one of these on each node we're collecting ++ from ++ """ ++ return { ++ "kind": "Pod", ++ "apiVersion": "v1", ++ "metadata": { ++ "name": "%s-sos-collector" % self.address.split('.')[0], ++ "namespace": "sos-collect-tmp" ++ }, ++ "priorityClassName": "system-cluster-critical", ++ "spec": { ++ "volumes": [ ++ { ++ "name": "host", ++ "hostPath": { ++ "path": "/", ++ "type": "Directory" ++ } ++ }, ++ { ++ "name": "run", ++ "hostPath": { ++ "path": "/run", ++ "type": "Directory" ++ } ++ }, ++ { ++ "name": "varlog", ++ "hostPath": { ++ "path": "/var/log", ++ "type": "Directory" ++ } ++ }, ++ { ++ "name": "machine-id", ++ "hostPath": { ++ "path": "/etc/machine-id", ++ "type": "File" ++ } ++ } ++ ], ++ "containers": [ ++ { ++ "name": "sos-collector-tmp", ++ "image": "registry.redhat.io/rhel8/support-tools", ++ "command": [ ++ "/bin/bash" ++ ], ++ "env": [ ++ { ++ "name": "HOST", ++ "value": "/host" ++ } ++ ], ++ "resources": {}, ++ "volumeMounts": [ ++ { ++ "name": "host", ++ "mountPath": "/host" ++ }, ++ { ++ "name": "run", ++ "mountPath": "/run" ++ }, ++ { ++ "name": "varlog", ++ "mountPath": "/var/log" ++ }, ++ { ++ "name": "machine-id", ++ "mountPath": "/etc/machine-id" ++ } ++ ], ++ "securityContext": { ++ "privileged": True, ++ "runAsUser": 0 ++ }, ++ "stdin": True, ++ "stdinOnce": True, ++ "tty": True ++ } ++ ], ++ "restartPolicy": "Never", ++ "nodeName": self.address, ++ "hostNetwork": True, ++ "hostPID": True, ++ "hostIPC": True ++ } ++ } ++ ++ def _connect(self, password): ++ # the oc binary must be _locally_ available for this to work ++ if not is_executable('oc'): ++ return False ++ ++ # deploy the debug container we'll exec into ++ podconf = self.get_node_pod_config() ++ self.pod_name = podconf['metadata']['name'] ++ fd, self.pod_tmp_conf = tempfile.mkstemp(dir=self.tmpdir) ++ with open(fd, 'w') as cfile: ++ json.dump(podconf, cfile) ++ self.log_debug("Starting sos collector container '%s'" % self.pod_name) ++ # this specifically does not need to run with a project definition ++ out = sos_get_command_output( ++ "oc create -f %s" % self.pod_tmp_conf ++ ) ++ if (out['status'] != 0 or "pod/%s created" % self.pod_name not in ++ out['output']): ++ self.log_error("Unable to deploy sos collect pod") ++ self.log_debug("Debug pod deployment failed: %s" % out['output']) ++ return False ++ self.log_debug("Pod '%s' successfully deployed, waiting for pod to " ++ "enter ready state" % self.pod_name) ++ ++ # wait for the pod to report as running ++ try: ++ up = self.run_oc("wait --for=condition=Ready pod/%s --timeout=30s" ++ % self.pod_name, ++ # timeout is for local safety, not oc ++ timeout=40) ++ if not up['status'] == 0: ++ self.log_error("Pod not available after 30 seconds") ++ return False ++ except SoSTimeoutError: ++ self.log_error("Timeout while polling for pod readiness") ++ return False ++ except Exception as err: ++ self.log_error("Error while waiting for pod to be ready: %s" ++ % err) ++ return False ++ ++ return True ++ ++ def _format_cmd_for_exec(self, cmd): ++ if cmd.startswith('oc'): ++ return ("oc -n %s exec --request-timeout=0 %s -- chroot /host %s" ++ % (self.project, self.pod_name, cmd)) ++ return super(OCTransport, self)._format_cmd_for_exec(cmd) ++ ++ def run_command(self, cmd, timeout=180, need_root=False, env=None, ++ get_pty=False): ++ # debug pod setup is slow, extend all timeouts to account for this ++ if timeout: ++ timeout += 10 ++ ++ # since we always execute within a bash shell, force disable get_pty ++ # to avoid double-quoting ++ return super(OCTransport, self).run_command(cmd, timeout, need_root, ++ env, False) ++ ++ def _disconnect(self): ++ os.unlink(self.pod_tmp_conf) ++ removed = self.run_oc("delete pod %s" % self.pod_name) ++ if "deleted" not in removed['output']: ++ self.log_debug("Calling delete on pod '%s' failed: %s" ++ % (self.pod_name, removed)) ++ return False ++ return True ++ ++ @property ++ def remote_exec(self): ++ return ("oc -n %s exec --request-timeout=0 %s -- /bin/bash -c" ++ % (self.project, self.pod_name)) ++ ++ def _retrieve_file(self, fname, dest): ++ cmd = self.run_oc("cp %s:%s %s" % (self.pod_name, fname, dest)) ++ return cmd['status'] == 0 +-- +2.31.1 + + +From 460494c4296db1a7529b44fe8f6597544c917c02 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 11 Oct 2021 11:50:44 -0400 +Subject: [PATCH 3/4] [ocp] Create temporary project and restrict default node + list to masters + +Adds explicit setup of a new project to use in the `ocp` cluster and +adds better handling of cluster setup generally, which the `ocp` cluster +is the first to make use of. + +Included in this change is a correction to +`Cluster.exec_primary_cmd()`'s use of `get_pty` to now be determined on +if the primary node is the local node or not. + +Additionally, based on feedback from the OCP engineering team, by +default restrict node lists to masters. + +Signed-off-by: Jake Hunsaker +--- + sos/collector/__init__.py | 5 ++++ + sos/collector/clusters/__init__.py | 13 +++++++- + sos/collector/clusters/ocp.py | 48 ++++++++++++++++++++++++++++-- + sos/collector/transports/oc.py | 4 +-- + 4 files changed, 64 insertions(+), 6 deletions(-) + +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index fecfe6aa..a76f8a79 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -850,6 +850,7 @@ class SoSCollector(SoSComponent): + "CTRL-C to quit\n") + self.ui_log.info("") + except KeyboardInterrupt: ++ self.cluster.cleanup() + self.exit("Exiting on user cancel", 130) + + def configure_sos_cmd(self): +@@ -1098,6 +1099,7 @@ this utility or remote systems that it connects to. + self.archive.makedirs('sos_logs', 0o755) + + self.collect() ++ self.cluster.cleanup() + self.cleanup() + + def collect(self): +@@ -1156,9 +1158,11 @@ this utility or remote systems that it connects to. + pool.shutdown(wait=True) + except KeyboardInterrupt: + self.log_error('Exiting on user cancel\n') ++ self.cluster.cleanup() + os._exit(130) + except Exception as err: + self.log_error('Could not connect to nodes: %s' % err) ++ self.cluster.cleanup() + os._exit(1) + + if hasattr(self.cluster, 'run_extra_cmd'): +@@ -1199,6 +1199,7 @@ this utility or remote systems that it c + arc_name = self.create_cluster_archive() + else: + msg = 'No sosreports were collected, nothing to archive...' ++ self.cluster.cleanup() + self.exit(msg, 1) + + if self.opts.upload and self.policy.get_upload_url(): +diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py +index cf1e7a0b..2a4665a1 100644 +--- a/sos/collector/clusters/__init__.py ++++ b/sos/collector/clusters/__init__.py +@@ -192,7 +192,8 @@ class Cluster(): + :returns: The output and status of `cmd` + :rtype: ``dict`` + """ +- res = self.master.run_command(cmd, get_pty=True, need_root=need_root) ++ pty = self.master.local is False ++ res = self.master.run_command(cmd, get_pty=pty, need_root=need_root) + if res['output']: + res['output'] = res['output'].replace('Password:', '') + return res +@@ -223,6 +224,16 @@ class Cluster(): + return True + return False + ++ def cleanup(self): ++ """ ++ This may be overridden by clusters ++ ++ Perform any necessary cleanup steps required by the cluster profile. ++ This helps ensure that sos does make lasting changes to the environment ++ in which we are running ++ """ ++ pass ++ + def get_nodes(self): + """ + This MUST be overridden by a cluster profile subclassing this class +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index a9357dbf..92da4e6e 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -23,10 +23,12 @@ class ocp(Cluster): + + api_collect_enabled = False + token = None ++ project = 'sos-collect-tmp' ++ oc_cluster_admin = None + + option_list = [ + ('label', '', 'Colon delimited list of labels to select nodes with'), +- ('role', '', 'Colon delimited list of roles to select nodes with'), ++ ('role', 'master', 'Colon delimited list of roles to filter on'), + ('kubeconfig', '', 'Path to the kubeconfig file'), + ('token', '', 'Service account token to use for oc authorization') + ] +@@ -58,6 +58,42 @@ class ocp(Cluster): + _who = self.fmt_oc_cmd('whoami') + return self.exec_master_cmd(_who)['status'] == 0 + ++ def setup(self): ++ """Create the project that we will be executing in for any nodes' ++ collection via a container image ++ """ ++ if not self.set_transport_type() == 'oc': ++ return ++ ++ out = self.exec_master_cmd(self.fmt_oc_cmd("auth can-i '*' '*'")) ++ self.oc_cluster_admin = out['status'] == 0 ++ if not self.oc_cluster_admin: ++ self.log_debug("Check for cluster-admin privileges returned false," ++ " cannot create project in OCP cluster") ++ raise Exception("Insufficient permissions to create temporary " ++ "collection project.\nAborting...") ++ ++ self.log_info("Creating new temporary project '%s'" % self.project) ++ ret = self.exec_master_cmd("oc new-project %s" % self.project) ++ if ret['status'] == 0: ++ return True ++ ++ self.log_debug("Failed to create project: %s" % ret['output']) ++ raise Exception("Failed to create temporary project for collection. " ++ "\nAborting...") ++ ++ def cleanup(self): ++ """Remove the project we created to execute within ++ """ ++ if self.project: ++ ret = self.exec_master_cmd("oc delete project %s" % self.project) ++ if not ret['status'] == 0: ++ self.log_error("Error deleting temporary project: %s" ++ % ret['output']) ++ # don't leave the config on a non-existing project ++ self.exec_master_cmd("oc project default") ++ return True ++ + def _build_dict(self, nodelist): + """From the output of get_nodes(), construct an easier-to-reference + dict of nodes that will be used in determining labels, master status, +@@ -85,10 +123,10 @@ class ocp(Cluster): + return nodes + + def set_transport_type(self): +- if is_executable('oc'): ++ if is_executable('oc') or self.opts.transport == 'oc': + return 'oc' + self.log_info("Local installation of 'oc' not found or is not " +- "correctly configured. Will use ControlPersist") ++ "correctly configured. Will use ControlPersist.") + self.ui_log.warn( + "Preferred transport 'oc' not available, will fallback to SSH." + ) +@@ -106,6 +144,10 @@ class ocp(Cluster): + cmd += " -l %s" % quote(labels) + res = self.exec_master_cmd(self.fmt_oc_cmd(cmd)) + if res['status'] == 0: ++ if self.get_option('role') == 'master': ++ self.log_warn("NOTE: By default, only master nodes are listed." ++ "\nTo collect from all/more nodes, override the " ++ "role option with '-c ocp.role=role1:role2'") + roles = [r for r in self.get_option('role').split(':')] + self.node_dict = self._build_dict(res['output'].splitlines()) + for node in self.node_dict: +diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py +index 649037b9..de044ccb 100644 +--- a/sos/collector/transports/oc.py ++++ b/sos/collector/transports/oc.py +@@ -37,7 +37,7 @@ class OCTransport(RemoteTransport): + execution + """ + return sos_get_command_output( +- "oc -n sos-collect-tmp %s" % cmd, ++ "oc -n %s %s" % (self.project, cmd), + **kwargs + ) + +@@ -58,7 +58,7 @@ class OCTransport(RemoteTransport): + "apiVersion": "v1", + "metadata": { + "name": "%s-sos-collector" % self.address.split('.')[0], +- "namespace": "sos-collect-tmp" ++ "namespace": self.project + }, + "priorityClassName": "system-cluster-critical", + "spec": { +-- +2.31.1 + + +From 1bc0e9fe32491e764e622368bfe216f97bf32620 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 4 Oct 2021 15:12:04 -0400 +Subject: [PATCH 4/4] [sosnode] Fix typo and small logic break + +Fixes a typo in setting the non-primary node options from the ocp +profile against the sosnode object. Second, fixes a small break in +checksum handling for the manifest discovered during `oc` transport +testing for edge cases. + +Signed-off-by: Jake Hunsaker +--- + sos/collector/clusters/ocp.py | 4 ++-- + sos/collector/sosnode.py | 4 +++- + 2 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index 92da4e6e..22a7289a 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -183,7 +183,7 @@ class ocp(Cluster): + if self.api_collect_enabled: + # a primary has already been enabled for API collection, disable + # it among others +- node.plugin_options.append('openshift.no-oc=on') ++ node.plugopts.append('openshift.no-oc=on') + else: + _oc_cmd = 'oc' + if node.host.containerized: +@@ -223,6 +223,6 @@ class ocp(Cluster): + + def set_node_options(self, node): + # don't attempt OC API collections on non-primary nodes +- node.plugin_options.append('openshift.no-oc=on') ++ node.plugopts.append('openshift.no-oc=on') + + # vim: set et ts=4 sw=4 : +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 8a9dbd7a..ab7f23cc 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -714,7 +714,7 @@ class SosNode(): + elif line.startswith("The checksum is: "): + checksum = line.split()[3] + +- if checksum is not None: ++ if checksum: + self.manifest.add_field('checksum', checksum) + if len(checksum) == 32: + self.manifest.add_field('checksum_type', 'md5') +@@ -722,6 +722,8 @@ class SosNode(): + self.manifest.add_field('checksum_type', 'sha256') + else: + self.manifest.add_field('checksum_type', 'unknown') ++ else: ++ self.manifest.add_field('checksum_type', 'unknown') + else: + err = self.determine_sos_error(res['status'], res['output']) + self.log_debug("Error running sos report. rc = %s msg = %s" +-- +2.31.1 + +From 38a0533de3dd2613eefcc4865a2916e225e3ceed Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 9 Nov 2021 19:34:25 +0100 +Subject: [PATCH] [presets] Optimise OCP preset for hundreds of network + namespaces + +Sos report on OCP having hundreds of namespaces timeouts in networking +plugin, as it collects >10 commands for each namespace. + +Let use a balanced approach in: +- increasing network.timeout +- limiting namespaces to traverse +- disabling ethtool per namespace + +to ensure sos report successfully finish in a reasonable time, +collecting rasonable amount of data. + +Resolves: #2754 + +Signed-off-by: Pavel Moravec +--- + sos/presets/redhat/__init__.py | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py +index e6d63611..865c9b6b 100644 +--- a/sos/presets/redhat/__init__.py ++++ b/sos/presets/redhat/__init__.py +@@ -29,11 +29,15 @@ RHEL_DESC = RHEL_RELEASE_STR + + RHOSP = "rhosp" + RHOSP_DESC = "Red Hat OpenStack Platform" ++RHOSP_OPTS = SoSOptions(plugopts=[ ++ 'process.lsof=off', ++ 'networking.ethtool_namespaces=False', ++ 'networking.namespaces=200']) + + RHOCP = "ocp" + RHOCP_DESC = "OpenShift Container Platform by Red Hat" +-RHOSP_OPTS = SoSOptions(plugopts=[ +- 'process.lsof=off', ++RHOCP_OPTS = SoSOptions(all_logs=True, verify=True, plugopts=[ ++ 'networking.timeout=600', + 'networking.ethtool_namespaces=False', + 'networking.namespaces=200']) + +@@ -62,7 +66,7 @@ RHEL_PRESETS = { + RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC), + RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, opts=RHOSP_OPTS), + RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME, +- opts=_opts_all_logs_verify), ++ opts=RHOCP_OPTS), + RH_CFME: PresetDefaults(name=RH_CFME, desc=RH_CFME_DESC, note=NOTE_TIME, + opts=_opts_verify), + RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC, +-- +2.31.1 + +From 97b93c7f8755d04bdeb4f93759c20dcb787f2046 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 2 Nov 2021 11:34:13 -0400 +Subject: [PATCH] [Plugin] Rework get_container_logs to be more useful + +`get_container_logs()` is now `add_container_logs()` to align it better +with our more common `add_*` methods for plugin collections. + +Additionally, it has been extended to accept either a single string or a +list of strings like the other methods, and plugin authors may now +specify either specific container names or regexes. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/__init__.py | 22 +++++++++++++++++----- + sos/report/plugins/rabbitmq.py | 2 +- + 2 files changed, 18 insertions(+), 6 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 08eee118..4b0e4fd5 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -2366,20 +2366,32 @@ class Plugin(): + return _runtime.volumes + return [] + +- def get_container_logs(self, container, **kwargs): +- """Helper to get the ``logs`` output for a given container ++ def add_container_logs(self, containers, get_all=False, **kwargs): ++ """Helper to get the ``logs`` output for a given container or list ++ of container names and/or regexes. + + Supports passthru of add_cmd_output() options + +- :param container: The name of the container to retrieve logs from +- :type container: ``str`` ++ :param containers: The name of the container to retrieve logs from, ++ may be a single name or a regex ++ :type containers: ``str`` or ``list` of strs ++ ++ :param get_all: Should non-running containers also be queried? ++ Default: False ++ :type get_all: ``bool`` + + :param kwargs: Any kwargs supported by ``add_cmd_output()`` are + supported here + """ + _runtime = self._get_container_runtime() + if _runtime is not None: +- self.add_cmd_output(_runtime.get_logs_command(container), **kwargs) ++ if isinstance(containers, str): ++ containers = [containers] ++ for container in containers: ++ _cons = self.get_all_containers_by_regex(container, get_all) ++ for _con in _cons: ++ cmd = _runtime.get_logs_command(_con[1]) ++ self.add_cmd_output(cmd, **kwargs) + + def fmt_container_cmd(self, container, cmd, quotecmd=False): + """Format a command to be executed by the loaded ``ContainerRuntime`` +diff --git a/sos/report/plugins/rabbitmq.py b/sos/report/plugins/rabbitmq.py +index e84b52da..1bfa741f 100644 +--- a/sos/report/plugins/rabbitmq.py ++++ b/sos/report/plugins/rabbitmq.py +@@ -32,7 +32,7 @@ class RabbitMQ(Plugin, IndependentPlugin): + + if in_container: + for container in container_names: +- self.get_container_logs(container) ++ self.add_container_logs(container) + self.add_cmd_output( + self.fmt_container_cmd(container, 'rabbitmqctl report'), + foreground=True +-- +2.31.1 + +From 8bf602108f75db10e449eff5e2266c6466504086 Mon Sep 17 00:00:00 2001 +From: Nadia Pinaeva +Date: Thu, 2 Dec 2021 16:30:44 +0100 +Subject: [PATCH] [clusters:ocp] fix get_nodes function + +Signed-off-by: Nadia Pinaeva +--- + sos/collector/clusters/ocp.py | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index 22a7289a..2ce4e977 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -150,13 +150,13 @@ class ocp(Cluster): + "role option with '-c ocp.role=role1:role2'") + roles = [r for r in self.get_option('role').split(':')] + self.node_dict = self._build_dict(res['output'].splitlines()) +- for node in self.node_dict: ++ for node_name, node in self.node_dict.items(): + if roles: + for role in roles: +- if role in node: +- nodes.append(node) ++ if role == node['roles']: ++ nodes.append(node_name) + else: +- nodes.append(node) ++ nodes.append(node_name) + else: + msg = "'oc' command failed" + if 'Missing or incomplete' in res['output']: +-- +2.31.1 + +From 5d80ac6dc67e12ef00903436c088a1694f9a7dd7 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 1 Dec 2021 14:13:16 -0500 +Subject: [PATCH] [collect] Catch command not found exceptions from pexpect + +When running a command that does not exist on the system, catch the +resulting pexpect exception and return the proper status code rather +than allowing an untrapped exception. + +Closes: #2768 + +Signed-off-by: Jake Hunsaker +--- + sos/collector/transports/__init__.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py +index 7bffee62..33f2f66d 100644 +--- a/sos/collector/transports/__init__.py ++++ b/sos/collector/transports/__init__.py +@@ -225,7 +225,11 @@ class RemoteTransport(): + if not env: + env = None + +- result = pexpect.spawn(cmd, encoding='utf-8', env=env) ++ try: ++ result = pexpect.spawn(cmd, encoding='utf-8', env=env) ++ except pexpect.exceptions.ExceptionPexpect as err: ++ self.log_debug(err.value) ++ return {'status': 127, 'output': ''} + + _expects = [pexpect.EOF, pexpect.TIMEOUT] + if need_root and self.opts.ssh_user != 'root': +-- +2.31.1 + +From decb5d26c165e664fa879a669f2d80165181f0e1 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 2 Dec 2021 14:02:17 -0500 +Subject: [PATCH] [report,collect] Add option to control default container + runtime + +Adds a new `--container-runtime` option that allows users to control +what default container runtime is used by plugins for container based +collections, effectively overriding policy defaults. + +If no runtimes are active, this option is effectively ignored. If +however runtimes are active, but the requested one is not, raise an +exception to abort collection with an appropriate message to the user. + +Signed-off-by: Jake Hunsaker +--- + man/en/sos-collect.1 | 6 ++++++ + man/en/sos-report.1 | 19 +++++++++++++++++++ + sos/collector/__init__.py | 4 ++++ + sos/collector/sosnode.py | 6 ++++++ + sos/report/__init__.py | 36 ++++++++++++++++++++++++++++++++++++ + 5 files changed, 71 insertions(+) + +diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 +index a1f6c10e..9b0a5d7b 100644 +--- a/man/en/sos-collect.1 ++++ b/man/en/sos-collect.1 +@@ -11,6 +11,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes + [\-\-chroot CHROOT] + [\-\-case\-id CASE_ID] + [\-\-cluster\-type CLUSTER_TYPE] ++ [\-\-container\-runtime RUNTIME] + [\-e ENABLE_PLUGINS] + [--encrypt-key KEY]\fR + [--encrypt-pass PASS]\fR +@@ -113,6 +114,11 @@ Example: \fBsos collect --cluster-type=kubernetes\fR will force the kubernetes p + to be run, and thus set sosreport options and attempt to determine a list of nodes using + that profile. + .TP ++\fB\-\-container\-runtime\fR RUNTIME ++\fB sos report\fR option. Using this with \fBcollect\fR will pass this option thru ++to nodes with sos version 4.3 or later. This option controls the default container ++runtime plugins will use for collections. See \fBman sos-report\fR. ++.TP + \fB\-e\fR ENABLE_PLUGINS, \fB\-\-enable\-plugins\fR ENABLE_PLUGINS + Sosreport option. Use this to enable a plugin that would otherwise not be run. + +diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 +index e8efc8f8..464a77e5 100644 +--- a/man/en/sos-report.1 ++++ b/man/en/sos-report.1 +@@ -19,6 +19,7 @@ sos report \- Collect and package diagnostic and support data + [--plugin-timeout TIMEOUT]\fR + [--cmd-timeout TIMEOUT]\fR + [--namespaces NAMESPACES]\fR ++ [--container-runtime RUNTIME]\fR + [-s|--sysroot SYSROOT]\fR + [-c|--chroot {auto|always|never}\fR + [--tmp-dir directory]\fR +@@ -299,6 +300,24 @@ Use '0' (default) for no limit - all namespaces will be used for collections. + + Note that specific plugins may provide a similar `namespaces` plugin option. If + the plugin option is used, it will override this option. ++.TP ++.B \--container-runtime RUNTIME ++Force the use of the specified RUNTIME as the default runtime that plugins will ++use to collect data from and about containers and container images. By default, ++the setting of \fBauto\fR results in the local policy determining what runtime ++will be the default runtime (in configurations where multiple runtimes are installed ++and active). ++ ++If no container runtimes are active, this option is ignored. If there are runtimes ++active, but not one with a name matching RUNTIME, sos will abort. ++ ++Setting this to \fBnone\fR, \fBoff\fR, or \fBdisabled\fR will cause plugins to ++\fBNOT\fR leverage any active runtimes for collections. Note that if disabled, plugins ++specifically for runtimes (e.g. the podman or docker plugins) will still collect ++general data about the runtime, but will not inspect existing containers or images. ++ ++Default: 'auto' (policy determined) ++.TP + .B \--case-id NUMBER + Specify a case identifier to associate with the archive. + Identifiers may include alphanumeric characters, commas and periods ('.'). +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index 42a7731d..3ad703d3 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -55,6 +55,7 @@ class SoSCollector(SoSComponent): + 'clean': False, + 'cluster_options': [], + 'cluster_type': None, ++ 'container_runtime': 'auto', + 'domains': [], + 'enable_plugins': [], + 'encrypt_key': '', +@@ -268,6 +269,9 @@ class SoSCollector(SoSComponent): + sos_grp.add_argument('--chroot', default='', + choices=['auto', 'always', 'never'], + help="chroot executed commands to SYSROOT") ++ sos_grp.add_argument("--container-runtime", default="auto", ++ help="Default container runtime to use for " ++ "collections. 'auto' for policy control.") + sos_grp.add_argument('-e', '--enable-plugins', action="extend", + help='Enable specific plugins for sosreport') + sos_grp.add_argument('-k', '--plugin-option', '--plugopts', +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index ab7f23cc..f5957e17 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -586,6 +586,12 @@ class SosNode(): + sos_opts.append('--cmd-timeout=%s' + % quote(str(self.opts.cmd_timeout))) + ++ if self.check_sos_version('4.3'): ++ if self.opts.container_runtime != 'auto': ++ sos_opts.append( ++ "--container-runtime=%s" % self.opts.container_runtime ++ ) ++ + self.update_cmd_from_cluster() + + sos_cmd = sos_cmd.replace( +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index a6c72778..0daad82f 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -82,6 +82,7 @@ class SoSReport(SoSComponent): + 'case_id': '', + 'chroot': 'auto', + 'clean': False, ++ 'container_runtime': 'auto', + 'keep_binary_files': False, + 'desc': '', + 'domains': [], +@@ -187,6 +188,7 @@ class SoSReport(SoSComponent): + self.tempfile_util.clean() + self._exit(1) + ++ self._check_container_runtime() + self._get_hardware_devices() + self._get_namespaces() + +@@ -218,6 +220,9 @@ class SoSReport(SoSComponent): + dest="chroot", default='auto', + help="chroot executed commands to SYSROOT " + "[auto, always, never] (default=auto)") ++ report_grp.add_argument("--container-runtime", default="auto", ++ help="Default container runtime to use for " ++ "collections. 'auto' for policy control.") + report_grp.add_argument("--desc", "--description", type=str, + action="store", default="", + help="Description for a new preset",) +@@ -373,6 +378,37 @@ class SoSReport(SoSComponent): + } + # TODO: enumerate network devices, preferably with devtype info + ++ def _check_container_runtime(self): ++ """Check the loaded container runtimes, and the policy default runtime ++ (if set), against any requested --container-runtime value. This can be ++ useful for systems that have multiple runtimes, such as RHCOS, but do ++ not have a clearly defined 'default' (or one that is determined based ++ entirely on configuration). ++ """ ++ if self.opts.container_runtime != 'auto': ++ crun = self.opts.container_runtime.lower() ++ if crun in ['none', 'off', 'diabled']: ++ self.policy.runtimes = {} ++ self.soslog.info( ++ "Disabled all container runtimes per user option." ++ ) ++ elif not self.policy.runtimes: ++ msg = ("WARNING: No container runtimes are active, ignoring " ++ "option to set default runtime to '%s'\n" % crun) ++ self.soslog.warn(msg) ++ elif crun not in self.policy.runtimes.keys(): ++ valid = ', '.join(p for p in self.policy.runtimes.keys() ++ if p != 'default') ++ raise Exception("Cannot use container runtime '%s': no such " ++ "runtime detected. Available runtimes: %s" ++ % (crun, valid)) ++ else: ++ self.policy.runtimes['default'] = self.policy.runtimes[crun] ++ self.soslog.info( ++ "Set default container runtime to '%s'" ++ % self.policy.runtimes['default'].name ++ ) ++ + def get_fibre_devs(self): + """Enumerate a list of fibrechannel devices on this system so that + plugins can iterate over them +-- +2.31.1 + +From 9d4b5af39d76ac99afa40d004fe9888633218356 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Fri, 3 Dec 2021 13:37:09 -0500 +Subject: [PATCH 1/2] [Plugin] Add container parameter for add_cmd_output() + +Adds a new `container` parameter for `Plugin.add_cmd_output()`, which if +set will format all commands passed to that call for execution in the +specified container. + +`Plugin.fmt_container_cmd()` is called for this purpose, and has been +modified so that if the given container does not exist, an empty string +is returned instead, thus preventing execution on the host. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/__init__.py | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index e180ae17..3ff7c191 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -1707,7 +1707,7 @@ class Plugin(): + chroot=True, runat=None, env=None, binary=False, + sizelimit=None, pred=None, subdir=None, + changes=False, foreground=False, tags=[], +- priority=10, cmd_as_tag=False): ++ priority=10, cmd_as_tag=False, container=None): + """Run a program or a list of programs and collect the output + + Output will be limited to `sizelimit`, collecting the last X amount +@@ -1772,6 +1772,10 @@ class Plugin(): + :param cmd_as_tag: Should the command string be automatically formatted + to a tag? + :type cmd_as_tag: ``bool`` ++ ++ :param container: Run the specified `cmds` inside a container with this ++ ID or name ++ :type container: ``str`` + """ + if isinstance(cmds, str): + cmds = [cmds] +@@ -1782,6 +1786,14 @@ class Plugin(): + if pred is None: + pred = self.get_predicate(cmd=True) + for cmd in cmds: ++ if container: ++ ocmd = cmd ++ cmd = self.fmt_container_cmd(container, cmd) ++ if not cmd: ++ self._log_debug("Skipping command '%s' as the requested " ++ "container '%s' does not exist." ++ % (ocmd, container)) ++ continue + self._add_cmd_output(cmd=cmd, suggest_filename=suggest_filename, + root_symlink=root_symlink, timeout=timeout, + stderr=stderr, chroot=chroot, runat=runat, +@@ -2420,7 +2432,7 @@ class Plugin(): + if self.container_exists(container): + _runtime = self._get_container_runtime() + return _runtime.fmt_container_cmd(container, cmd, quotecmd) +- return cmd ++ return '' + + def is_module_loaded(self, module_name): + """Determine whether specified module is loaded or not +-- +2.31.1 + + +From 874d2adfbff9e51dc902669af3c4a5083dbc19b1 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Fri, 3 Dec 2021 14:49:43 -0500 +Subject: [PATCH 2/2] [plugins] Update existing plugins to use a_c_o container + parameter + +Updates plugins currently calling `fmt_container_cmd()` in their +`add_cmd_output()` calls to instead use the new `container` parameter +and rely on the automatic formatting. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/opencontrail.py | 3 +-- + sos/report/plugins/openstack_database.py | 20 ++++++-------------- + sos/report/plugins/openstack_designate.py | 6 ++---- + sos/report/plugins/openstack_ironic.py | 3 +-- + sos/report/plugins/ovn_central.py | 7 +++---- + sos/report/plugins/rabbitmq.py | 11 ++++++----- + 9 files changed, 47 insertions(+), 69 deletions(-) + +diff --git a/sos/report/plugins/opencontrail.py b/sos/report/plugins/opencontrail.py +index b368bffe..76c03e21 100644 +--- a/sos/report/plugins/opencontrail.py ++++ b/sos/report/plugins/opencontrail.py +@@ -25,8 +25,7 @@ class OpenContrail(Plugin, IndependentPlugin): + cnames = self.get_containers(get_all=True) + cnames = [c[1] for c in cnames if 'opencontrail' in c[1]] + for cntr in cnames: +- _cmd = self.fmt_container_cmd(cntr, 'contrail-status') +- self.add_cmd_output(_cmd) ++ self.add_cmd_output('contrail-status', container=cntr) + else: + self.add_cmd_output("contrail-status") + +diff --git a/sos/report/plugins/openstack_database.py b/sos/report/plugins/openstack_database.py +index 1e98fabf..e9f84cf8 100644 +--- a/sos/report/plugins/openstack_database.py ++++ b/sos/report/plugins/openstack_database.py +@@ -37,36 +37,28 @@ class OpenStackDatabase(Plugin): + ] + + def setup(self): +- +- in_container = False + # determine if we're running databases on the host or in a container + _db_containers = [ + 'galera-bundle-.*', # overcloud + 'mysql' # undercloud + ] + ++ cname = None + for container in _db_containers: + cname = self.get_container_by_name(container) +- if cname is not None: +- in_container = True ++ if cname: + break + +- if in_container: +- fname = "clustercheck_%s" % cname +- cmd = self.fmt_container_cmd(cname, 'clustercheck') +- self.add_cmd_output(cmd, timeout=15, suggest_filename=fname) +- else: +- self.add_cmd_output('clustercheck', timeout=15) ++ fname = "clustercheck_%s" % cname if cname else None ++ self.add_cmd_output('clustercheck', container=cname, timeout=15, ++ suggest_filename=fname) + + if self.get_option('dump') or self.get_option('dumpall'): + db_dump = self.get_mysql_db_string(container=cname) + db_cmd = "mysqldump --opt %s" % db_dump + +- if in_container: +- db_cmd = self.fmt_container_cmd(cname, db_cmd) +- + self.add_cmd_output(db_cmd, suggest_filename='mysql_dump.sql', +- sizelimit=0) ++ sizelimit=0, container=cname) + + def get_mysql_db_string(self, container=None): + +diff --git a/sos/report/plugins/openstack_designate.py b/sos/report/plugins/openstack_designate.py +index 0ae991b0..a2ea37ab 100644 +--- a/sos/report/plugins/openstack_designate.py ++++ b/sos/report/plugins/openstack_designate.py +@@ -20,12 +20,10 @@ class OpenStackDesignate(Plugin): + + def setup(self): + # collect current pool config +- pools_cmd = self.fmt_container_cmd( +- self.get_container_by_name(".*designate_central"), +- "designate-manage pool generate_file --file /dev/stdout") + + self.add_cmd_output( +- pools_cmd, ++ "designate-manage pool generate_file --file /dev/stdout", ++ container=self.get_container_by_name(".*designate_central"), + suggest_filename="openstack_designate_current_pools.yaml" + ) + +diff --git a/sos/report/plugins/openstack_ironic.py b/sos/report/plugins/openstack_ironic.py +index c36fb6b6..49beb2d9 100644 +--- a/sos/report/plugins/openstack_ironic.py ++++ b/sos/report/plugins/openstack_ironic.py +@@ -80,8 +80,7 @@ class OpenStackIronic(Plugin): + 'ironic_pxe_tftp', 'ironic_neutron_agent', + 'ironic_conductor', 'ironic_api']: + if self.container_exists('.*' + container_name): +- self.add_cmd_output(self.fmt_container_cmd(container_name, +- 'rpm -qa')) ++ self.add_cmd_output('rpm -qa', container=container_name) + + else: + self.conf_list = [ +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index 914eda60..ddbf288d 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -123,11 +123,10 @@ class OVNCentral(Plugin): + + # If OVN is containerized, we need to run the above commands inside + # the container. +- cmds = [ +- self.fmt_container_cmd(self._container_name, cmd) for cmd in cmds +- ] + +- self.add_cmd_output(cmds, foreground=True) ++ self.add_cmd_output( ++ cmds, foreground=True, container=self._container_name ++ ) + + self.add_copy_spec("/etc/sysconfig/ovn-northd") + +diff --git a/sos/report/plugins/rabbitmq.py b/sos/report/plugins/rabbitmq.py +index 1bfa741f..607802e4 100644 +--- a/sos/report/plugins/rabbitmq.py ++++ b/sos/report/plugins/rabbitmq.py +@@ -34,14 +34,15 @@ class RabbitMQ(Plugin, IndependentPlugin): + for container in container_names: + self.add_container_logs(container) + self.add_cmd_output( +- self.fmt_container_cmd(container, 'rabbitmqctl report'), ++ 'rabbitmqctl report', ++ container=container, + foreground=True + ) + self.add_cmd_output( +- self.fmt_container_cmd( +- container, "rabbitmqctl eval " +- "'rabbit_diagnostics:maybe_stuck().'"), +- foreground=True, timeout=10 ++ "rabbitmqctl eval 'rabbit_diagnostics:maybe_stuck().'", ++ container=container, ++ foreground=True, ++ timeout=10 + ) + else: + self.add_cmd_output("rabbitmqctl report") +-- +2.31.1 + +From faa15754f82e9841cd624afe18dc2198644decdf Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Wed, 8 Dec 2021 13:51:20 -0500 +Subject: [PATCH] [Policy,collect] Prevent remote node policies from setting + local PATH + +This commit fixes an issue where policies loaded for remote nodes when +using `sos collect` would override the PATH setting for the local +policy, which in turn could prevent successful execution of cluster +profile operations. + +Related: #2777 + +Signed-off-by: Jake Hunsaker +--- + sos/policies/__init__.py | 8 +++++--- + sos/policies/distros/__init__.py | 6 ++++-- + sos/policies/distros/debian.py | 3 ++- + sos/policies/distros/redhat.py | 6 ++++-- + sos/policies/distros/suse.py | 3 ++- + 5 files changed, 17 insertions(+), 9 deletions(-) + +diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py +index ef9188de..826d03a1 100644 +--- a/sos/policies/__init__.py ++++ b/sos/policies/__init__.py +@@ -45,7 +45,7 @@ def load(cache={}, sysroot=None, init=None, probe_runtime=True, + return cache['policy'] + + +-class Policy(object): ++class Policy(): + """Policies represent distributions that sos supports, and define the way + in which sos behaves on those distributions. A policy should define at + minimum a way to identify the distribution, and a package manager to allow +@@ -111,7 +111,7 @@ any third party. + presets_path = PRESETS_PATH + _in_container = False + +- def __init__(self, sysroot=None, probe_runtime=True): ++ def __init__(self, sysroot=None, probe_runtime=True, remote_exec=None): + """Subclasses that choose to override this initializer should call + super() to ensure that they get the required platform bits attached. + super(SubClass, self).__init__(). Policies that require runtime +@@ -122,7 +122,9 @@ any third party. + self.probe_runtime = probe_runtime + self.package_manager = PackageManager() + self.valid_subclasses = [IndependentPlugin] +- self.set_exec_path() ++ self.remote_exec = remote_exec ++ if not self.remote_exec: ++ self.set_exec_path() + self.sysroot = sysroot + self.register_presets(GENERIC_PRESETS) + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index c69fc1e7..9c91a918 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -68,9 +68,11 @@ class LinuxPolicy(Policy): + container_version_command = None + container_authfile = None + +- def __init__(self, sysroot=None, init=None, probe_runtime=True): ++ def __init__(self, sysroot=None, init=None, probe_runtime=True, ++ remote_exec=None): + super(LinuxPolicy, self).__init__(sysroot=sysroot, +- probe_runtime=probe_runtime) ++ probe_runtime=probe_runtime, ++ remote_exec=remote_exec) + + if sysroot: + self.sysroot = sysroot +diff --git a/sos/policies/distros/debian.py b/sos/policies/distros/debian.py +index 639fd5eb..41f09428 100644 +--- a/sos/policies/distros/debian.py ++++ b/sos/policies/distros/debian.py +@@ -26,7 +26,8 @@ class DebianPolicy(LinuxPolicy): + def __init__(self, sysroot=None, init=None, probe_runtime=True, + remote_exec=None): + super(DebianPolicy, self).__init__(sysroot=sysroot, init=init, +- probe_runtime=probe_runtime) ++ probe_runtime=probe_runtime, ++ remote_exec=remote_exec) + self.package_manager = DpkgPackageManager(chroot=self.sysroot, + remote_exec=remote_exec) + self.valid_subclasses += [DebianPlugin] +diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py +index 4b14abaf..eb75e15b 100644 +--- a/sos/policies/distros/redhat.py ++++ b/sos/policies/distros/redhat.py +@@ -53,7 +53,8 @@ class RedHatPolicy(LinuxPolicy): + def __init__(self, sysroot=None, init=None, probe_runtime=True, + remote_exec=None): + super(RedHatPolicy, self).__init__(sysroot=sysroot, init=init, +- probe_runtime=probe_runtime) ++ probe_runtime=probe_runtime, ++ remote_exec=remote_exec) + self.usrmove = False + + self.package_manager = RpmPackageManager(chroot=self.sysroot, +@@ -76,7 +77,8 @@ class RedHatPolicy(LinuxPolicy): + self.PATH = "/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" + self.PATH += os.pathsep + "/usr/local/bin" + self.PATH += os.pathsep + "/usr/local/sbin" +- self.set_exec_path() ++ if not self.remote_exec: ++ self.set_exec_path() + self.load_presets() + + @classmethod +diff --git a/sos/policies/distros/suse.py b/sos/policies/distros/suse.py +index 1c1feff5..b9d4a3b1 100644 +--- a/sos/policies/distros/suse.py ++++ b/sos/policies/distros/suse.py +@@ -25,7 +25,8 @@ class SuSEPolicy(LinuxPolicy): + def __init__(self, sysroot=None, init=None, probe_runtime=True, + remote_exec=None): + super(SuSEPolicy, self).__init__(sysroot=sysroot, init=init, +- probe_runtime=probe_runtime) ++ probe_runtime=probe_runtime, ++ remote_exec=remote_exec) + self.valid_subclasses += [SuSEPlugin, RedHatPlugin] + + self.usrmove = False +-- +2.31.1 + +From d4383fec5f8a80121aa4f5a37575b37988c51663 Mon Sep 17 00:00:00 2001 +From: Nadia Pinaeva +Date: Wed, 1 Dec 2021 12:23:34 +0100 +Subject: [PATCH] Add crio runtime and openshift_ovn plugin openshift_ovn + plugin collects logs from crio containers Fix get_container_by_name function + returning container_id and not name + +Signed-off-by: Nadia Pinaeva +--- + sos/policies/distros/__init__.py | 4 +- + sos/policies/runtimes/__init__.py | 2 +- + sos/policies/runtimes/crio.py | 79 +++++++++++++++++++++++++++++ + sos/report/plugins/openshift_ovn.py | 41 +++++++++++++++ + 4 files changed, 124 insertions(+), 2 deletions(-) + create mode 100644 sos/policies/runtimes/crio.py + create mode 100644 sos/report/plugins/openshift_ovn.py + +diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py +index 9c91a918..7acc7e49 100644 +--- a/sos/policies/distros/__init__.py ++++ b/sos/policies/distros/__init__.py +@@ -17,6 +17,7 @@ from sos import _sos as _ + from sos.policies import Policy + from sos.policies.init_systems import InitSystem + from sos.policies.init_systems.systemd import SystemdInit ++from sos.policies.runtimes.crio import CrioContainerRuntime + from sos.policies.runtimes.podman import PodmanContainerRuntime + from sos.policies.runtimes.docker import DockerContainerRuntime + +@@ -92,7 +93,8 @@ class LinuxPolicy(Policy): + if self.probe_runtime: + _crun = [ + PodmanContainerRuntime(policy=self), +- DockerContainerRuntime(policy=self) ++ DockerContainerRuntime(policy=self), ++ CrioContainerRuntime(policy=self) + ] + for runtime in _crun: + if runtime.check_is_active(): +diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py +index 2e60ad23..4e9a45c1 100644 +--- a/sos/policies/runtimes/__init__.py ++++ b/sos/policies/runtimes/__init__.py +@@ -100,7 +100,7 @@ class ContainerRuntime(): + return None + for c in self.containers: + if re.match(name, c[1]): +- return c[1] ++ return c[0] + return None + + def get_images(self): +diff --git a/sos/policies/runtimes/crio.py b/sos/policies/runtimes/crio.py +new file mode 100644 +index 00000000..980c3ea1 +--- /dev/null ++++ b/sos/policies/runtimes/crio.py +@@ -0,0 +1,79 @@ ++# Copyright (C) 2021 Red Hat, Inc., Nadia Pinaeva ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.policies.runtimes import ContainerRuntime ++from sos.utilities import sos_get_command_output ++from pipes import quote ++ ++ ++class CrioContainerRuntime(ContainerRuntime): ++ """Runtime class to use for systems running crio""" ++ ++ name = 'crio' ++ binary = 'crictl' ++ ++ def get_containers(self, get_all=False): ++ """Get a list of containers present on the system. ++ ++ :param get_all: If set, include stopped containers as well ++ :type get_all: ``bool`` ++ """ ++ containers = [] ++ _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '') ++ if self.active: ++ out = sos_get_command_output(_cmd, chroot=self.policy.sysroot) ++ if out['status'] == 0: ++ for ent in out['output'].splitlines()[1:]: ++ ent = ent.split() ++ # takes the form (container_id, container_name) ++ containers.append((ent[0], ent[-3])) ++ return containers ++ ++ def get_images(self): ++ """Get a list of images present on the system ++ ++ :returns: A list of 2-tuples containing (image_name, image_id) ++ :rtype: ``list`` ++ """ ++ images = [] ++ if self.active: ++ out = sos_get_command_output("%s images" % self.binary, ++ chroot=self.policy.sysroot) ++ if out['status'] == 0: ++ for ent in out['output'].splitlines(): ++ ent = ent.split() ++ # takes the form (image_name, image_id) ++ images.append((ent[0] + ':' + ent[1], ent[2])) ++ return images ++ ++ def fmt_container_cmd(self, container, cmd, quotecmd): ++ """Format a command to run inside a container using the runtime ++ ++ :param container: The name or ID of the container in which to run ++ :type container: ``str`` ++ ++ :param cmd: The command to run inside `container` ++ :type cmd: ``str`` ++ ++ :param quotecmd: Whether the cmd should be quoted. ++ :type quotecmd: ``bool`` ++ ++ :returns: Formatted string to run `cmd` inside `container` ++ :rtype: ``str`` ++ """ ++ if quotecmd: ++ quoted_cmd = quote(cmd) ++ else: ++ quoted_cmd = cmd ++ container_id = self.get_container_by_name(container) ++ return "%s %s %s" % (self.run_cmd, container_id, ++ quoted_cmd) if container_id is not None else '' ++ ++# vim: set et ts=4 sw=4 : +diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py +new file mode 100644 +index 00000000..168f1dd3 +--- /dev/null ++++ b/sos/report/plugins/openshift_ovn.py +@@ -0,0 +1,41 @@ ++# Copyright (C) 2021 Nadia Pinaeva ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.report.plugins import Plugin, RedHatPlugin ++ ++ ++class OpenshiftOVN(Plugin, RedHatPlugin): ++ """This plugin is used to collect OCP 4.x OVN logs. ++ """ ++ short_desc = 'Openshift OVN' ++ plugin_name = "openshift_ovn" ++ containers = ('ovnkube-master', 'ovn-ipsec') ++ profiles = ('openshift',) ++ ++ def setup(self): ++ self.add_copy_spec([ ++ "/var/lib/ovn/etc/ovnnb_db.db", ++ "/var/lib/ovn/etc/ovnsb_db.db", ++ "/var/lib/openvswitch/etc/keys", ++ "/var/log/openvswitch/libreswan.log", ++ "/var/log/openvswitch/ovs-monitor-ipsec.log" ++ ]) ++ ++ self.add_cmd_output([ ++ 'ovn-appctl -t /var/run/ovn/ovnnb_db.ctl ' + ++ 'cluster/status OVN_Northbound', ++ 'ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ' + ++ 'cluster/status OVN_Southbound'], ++ container='ovnkube-master') ++ self.add_cmd_output([ ++ 'ovs-appctl -t ovs-monitor-ipsec tunnels/show', ++ 'ipsec status', ++ 'certutil -L -d sql:/etc/ipsec.d'], ++ container='ovn-ipsec') +-- +2.31.1 + +From 17218ca17e49cb8491c688095b56503d041c1ae9 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 9 Dec 2021 15:07:23 -0500 +Subject: [PATCH 1/3] [ocp] Skip project setup whenever oc transport is not + used + +Fixes a corner case where we would still attempt to create a new project +within the OCP cluster even if we weren't using the `oc` transport. + +Signed-off-by: Jake Hunsaker +--- + sos/collector/clusters/ocp.py | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index 2ce4e977..56f8cc47 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -123,7 +123,9 @@ class ocp(Cluster): + return nodes + + def set_transport_type(self): +- if is_executable('oc') or self.opts.transport == 'oc': ++ if self.opts.transport != 'auto': ++ return self.opts.transport ++ if is_executable('oc'): + return 'oc' + self.log_info("Local installation of 'oc' not found or is not " + "correctly configured. Will use ControlPersist.") +-- +2.31.1 + + +From 9faabdc3df08516a91c1adb3326bbf43db155f71 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 9 Dec 2021 16:04:39 -0500 +Subject: [PATCH 2/3] [crio] Put inspect output in the containers subdir + +Given the environments where crio is run, having `crictl inspect` output +in the main plugin directory can be a bit overwhelming. As such, put +this output into a `containers` subdir, and nest container log output in +a `containers/logs/` subdir. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/crio.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/crio.py b/sos/report/plugins/crio.py +index cb2c9796..56cf64a7 100644 +--- a/sos/report/plugins/crio.py ++++ b/sos/report/plugins/crio.py +@@ -79,10 +79,11 @@ class CRIO(Plugin, RedHatPlugin, UbuntuPlugin): + pods = self._get_crio_list(pod_cmd) + + for container in containers: +- self.add_cmd_output("crictl inspect %s" % container) ++ self.add_cmd_output("crictl inspect %s" % container, ++ subdir="containers") + if self.get_option('logs'): + self.add_cmd_output("crictl logs -t %s" % container, +- subdir="containers", priority=100) ++ subdir="containers/logs", priority=100) + + for image in images: + self.add_cmd_output("crictl inspecti %s" % image, subdir="images") +-- +2.31.1 + + +From 9118562c47fb521da3eeeed1a8746d45aaa769e7 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 9 Dec 2021 16:06:06 -0500 +Subject: [PATCH 3/3] [networking] Put namespaced commands into subdirs + +Where networking namespaces are used, there tend to be large numbers of +namespaces used. This in turn results in sos running and collecting very +large numbers of namespaced commands. + +To aid in consumability, place these collections under a subdir for the +namespace under another "namespaces" subdir within the plugin directory. + +Signed-off-by: Jake Hunsaker +--- + sos/report/plugins/networking.py | 27 ++++++++++++--------------- + 1 file changed, 12 insertions(+), 15 deletions(-) + +diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py +index 80e24abb..bcb5e6ae 100644 +--- a/sos/report/plugins/networking.py ++++ b/sos/report/plugins/networking.py +@@ -198,6 +198,7 @@ class Networking(Plugin): + pred=SoSPredicate(self, cmd_outputs=co6)) + else None) + for namespace in namespaces: ++ _subdir = "namespaces/%s" % namespace + ns_cmd_prefix = cmd_prefix + namespace + " " + self.add_cmd_output([ + ns_cmd_prefix + "ip address show", +@@ -213,29 +214,27 @@ class Networking(Plugin): + ns_cmd_prefix + "netstat -s", + ns_cmd_prefix + "netstat %s -agn" % self.ns_wide, + ns_cmd_prefix + "nstat -zas", +- ], priority=50) ++ ], priority=50, subdir=_subdir) + self.add_cmd_output([ns_cmd_prefix + "iptables-save"], + pred=iptables_with_nft, ++ subdir=_subdir, + priority=50) + self.add_cmd_output([ns_cmd_prefix + "ip6tables-save"], + pred=ip6tables_with_nft, ++ subdir=_subdir, + priority=50) + + ss_cmd = ns_cmd_prefix + "ss -peaonmi" + # --allow-system-changes is handled directly in predicate + # evaluation, so plugin code does not need to separately + # check for it +- self.add_cmd_output(ss_cmd, pred=ss_pred) +- +- # Collect ethtool commands only when ethtool_namespaces +- # is set to true. +- if self.get_option("ethtool_namespaces"): +- # Devices that exist in a namespace use less ethtool +- # parameters. Run this per namespace. +- for namespace in self.get_network_namespaces( +- self.get_option("namespace_pattern"), +- self.get_option("namespaces")): +- ns_cmd_prefix = cmd_prefix + namespace + " " ++ self.add_cmd_output(ss_cmd, pred=ss_pred, subdir=_subdir) ++ ++ # Collect ethtool commands only when ethtool_namespaces ++ # is set to true. ++ if self.get_option("ethtool_namespaces"): ++ # Devices that exist in a namespace use less ethtool ++ # parameters. Run this per namespace. + netns_netdev_list = self.exec_cmd( + ns_cmd_prefix + "ls -1 /sys/class/net/" + ) +@@ -250,9 +249,7 @@ class Networking(Plugin): + ns_cmd_prefix + "ethtool -i " + eth, + ns_cmd_prefix + "ethtool -k " + eth, + ns_cmd_prefix + "ethtool -S " + eth +- ], priority=50) +- +- return ++ ], priority=50, subdir=_subdir) + + + class RedHatNetworking(Networking, RedHatPlugin): +-- +2.31.1 + +From 4bf5f9143c962c839c1d27217ba74127551a5c00 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Fri, 17 Dec 2021 11:10:15 -0500 +Subject: [PATCH] [transport] Detect retrieval failures and automatically retry + +If a paritcular attempt to retrieve a remote file fails, we should +automatically retry that collection up to a certain point. This provides +`sos collect` more resiliency for the collection of sos report archives. + +This change necessitates a change in how we handle the SoSNode flow for +failed sos report retrievals, and as such contains minor fixes to +transports to ensure that we do not incorrectly hit exceptions in error +handling that were not previously possible with how we exited the +SoSNode retrieval flow. + +Closes: #2777 + +Signed-off-by: Jake Hunsaker +--- + sos/collector/__init__.py | 5 +++-- + sos/collector/clusters/ocp.py | 1 + + sos/collector/sosnode.py | 17 ++++++++++------- + sos/collector/transports/__init__.py | 15 ++++++++++++++- + sos/collector/transports/local.py | 1 + + sos/collector/transports/oc.py | 3 ++- + 6 files changed, 31 insertions(+), 11 deletions(-) + +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index b825d8fc..a25e794e 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -1221,8 +1221,9 @@ this utility or remote systems that it connects to. + def close_all_connections(self): + """Close all sessions for nodes""" + for client in self.client_list: +- self.log_debug('Closing connection to %s' % client.address) +- client.disconnect() ++ if client.connected: ++ self.log_debug('Closing connection to %s' % client.address) ++ client.disconnect() + + def create_cluster_archive(self): + """Calls for creation of tar archive then cleans up the temporary +diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py +index 56f8cc47..ae93ad58 100644 +--- a/sos/collector/clusters/ocp.py ++++ b/sos/collector/clusters/ocp.py +@@ -92,6 +92,7 @@ class ocp(Cluster): + % ret['output']) + # don't leave the config on a non-existing project + self.exec_master_cmd("oc project default") ++ self.project = None + return True + + def _build_dict(self, nodelist): +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 1341e39f..925f2790 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -751,12 +751,11 @@ class SosNode(): + if self.file_exists(path): + self.log_info("Copying remote %s to local %s" % + (path, destdir)) +- self._transport.retrieve_file(path, dest) ++ return self._transport.retrieve_file(path, dest) + else: + self.log_debug("Attempting to copy remote file %s, but it " + "does not exist on filesystem" % path) + return False +- return True + except Exception as err: + self.log_debug("Failed to retrieve %s: %s" % (path, err)) + return False +@@ -793,16 +792,20 @@ class SosNode(): + except Exception: + self.log_error('Failed to make archive readable') + return False +- self.soslog.info('Retrieving sos report from %s' % self.address) ++ self.log_info('Retrieving sos report from %s' % self.address) + self.ui_msg('Retrieving sos report...') +- ret = self.retrieve_file(self.sos_path) ++ try: ++ ret = self.retrieve_file(self.sos_path) ++ except Exception as err: ++ self.log_error(err) ++ return False + if ret: + self.ui_msg('Successfully collected sos report') + self.file_list.append(self.sos_path.split('/')[-1]) ++ return True + else: +- self.log_error('Failed to retrieve sos report') +- raise SystemExit +- return True ++ self.ui_msg('Failed to retrieve sos report') ++ return False + else: + # sos sometimes fails but still returns a 0 exit code + if self.stderr.read(): +diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py +index 33f2f66d..dcdebdde 100644 +--- a/sos/collector/transports/__init__.py ++++ b/sos/collector/transports/__init__.py +@@ -303,7 +303,20 @@ class RemoteTransport(): + :returns: True if file was successfully copied from remote, or False + :rtype: ``bool`` + """ +- return self._retrieve_file(fname, dest) ++ attempts = 0 ++ try: ++ while attempts < 5: ++ attempts += 1 ++ ret = self._retrieve_file(fname, dest) ++ if ret: ++ return True ++ self.log_info("File retrieval attempt %s failed" % attempts) ++ self.log_info("File retrieval failed after 5 attempts") ++ return False ++ except Exception as err: ++ self.log_error("Exception encountered during retrieval attempt %s " ++ "for %s: %s" % (attempts, fname, err)) ++ raise err + + def _retrieve_file(self, fname, dest): + raise NotImplementedError("Transport %s does not support file copying" +diff --git a/sos/collector/transports/local.py b/sos/collector/transports/local.py +index a4897f19..2996d524 100644 +--- a/sos/collector/transports/local.py ++++ b/sos/collector/transports/local.py +@@ -35,6 +35,7 @@ class LocalTransport(RemoteTransport): + def _retrieve_file(self, fname, dest): + self.log_debug("Moving %s to %s" % (fname, dest)) + shutil.copy(fname, dest) ++ return True + + def _format_cmd_for_exec(self, cmd): + return cmd +diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py +index de044ccb..720dd61d 100644 +--- a/sos/collector/transports/oc.py ++++ b/sos/collector/transports/oc.py +@@ -202,7 +202,8 @@ class OCTransport(RemoteTransport): + env, False) + + def _disconnect(self): +- os.unlink(self.pod_tmp_conf) ++ if os.path.exists(self.pod_tmp_conf): ++ os.unlink(self.pod_tmp_conf) + removed = self.run_oc("delete pod %s" % self.pod_name) + if "deleted" not in removed['output']: + self.log_debug("Calling delete on pod '%s' failed: %s" +-- +2.31.1 + +From 304c9ef6c1015f1ebe1a8d569c3e16bada4d23f1 Mon Sep 17 00:00:00 2001 +From: Nadia Pinaeva +Date: Tue, 4 Jan 2022 16:37:09 +0100 +Subject: [PATCH] Add cluster cleanup for all exit() calls + +Signed-off-by: Nadia Pinaeva +--- + sos/collector/__init__.py | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index a25e794e1..ffd63bc63 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -443,6 +443,7 @@ def add_parser_options(cls, parser): + + def exit(self, msg, error=1): + """Used to safely terminate if sos-collector encounters an error""" ++ self.cluster.cleanup() + self.log_error(msg) + try: + self.close_all_connections() +@@ -858,8 +858,9 @@ class SoSCollector(SoSComponent): + "CTRL-C to quit\n") + self.ui_log.info("") + except KeyboardInterrupt: +- self.cluster.cleanup() + self.exit("Exiting on user cancel", 130) ++ except Exception as e: ++ self.exit(repr(e), 1) + + def configure_sos_cmd(self): + """Configures the sosreport command that is run on the nodes""" +@@ -1185,7 +1185,6 @@ def collect(self): + arc_name = self.create_cluster_archive() + else: + msg = 'No sosreports were collected, nothing to archive...' +- self.cluster.cleanup() + self.exit(msg, 1) + + if self.opts.upload and self.policy.get_upload_url(): +From 2c3a647817dfbac36be3768acf6026e91d1a6e8f Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 21 Dec 2021 14:20:19 -0500 +Subject: [PATCH] [options] Allow spaces in --keywords values in sos.conf + +The `--keywords` option supports spaces to allow for obfuscated phrases, +not just words. This however breaks if a phrase is added to the config +file *before* a run with the phrase in the cmdline option, due to the +safeguards we have for all other values that do not support spaces. + +Add a check in our flow for updating options from the config file to not +replace illegal spaces if we're checking the `keywords` option, for +which spaces are legal. + +Signed-off-by: Jake Hunsaker +--- + sos/options.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/sos/options.py b/sos/options.py +index 7bea3ffc1..4846a5096 100644 +--- a/sos/options.py ++++ b/sos/options.py +@@ -200,7 +200,10 @@ def _update_from_section(section, config): + odict[rename_opts[key]] = odict.pop(key) + # set the values according to the config file + for key, val in odict.items(): +- if isinstance(val, str): ++ # most option values do not tolerate spaces, special ++ # exception however for --keywords which we do want to ++ # support phrases, and thus spaces, for ++ if isinstance(val, str) and key != 'keywords': + val = val.replace(' ', '') + if key not in self.arg_defaults: + # read an option that is not loaded by the current +From f912fc9e31b406a24b7a9c012e12cda920632051 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 10 Jan 2022 14:13:42 +0100 +Subject: [PATCH] [collect] Deal None sos_version properly + +In case collector cluster hits an error during init, sos_version +is None what LooseVersion can't compare properly and raises exception + +'LooseVersion' object has no attribute 'version' + +Related: #2822 + +Signed-off-by: Pavel Moravec +--- + sos/collector/sosnode.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 925f27909..7bbe0cd1f 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -382,7 +382,8 @@ def check_sos_version(self, ver): + given ver. This means that if the installed version is greater than + ver, this will still return True + """ +- return LooseVersion(self.sos_info['version']) >= ver ++ return self.sos_info['version'] is not None and \ ++ LooseVersion(self.sos_info['version']) >= ver + + def is_installed(self, pkg): + """Checks if a given package is installed on the node""" +From 0c67e8ebaeef17dac3b5b9e42a59b4e673e4403b Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 10 Jan 2022 14:17:13 +0100 +Subject: [PATCH] [collector] Cleanup cluster only if defined + +In case cluster init fails, self.cluster = None and its cleanup +must be skipped. + +Resolves: #2822 + +Signed-off-by: Pavel Moravec +--- + sos/collector/__init__.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py +index ffd63bc63..3e22bca3e 100644 +--- a/sos/collector/__init__.py ++++ b/sos/collector/__init__.py +@@ -443,7 +443,8 @@ def add_parser_options(cls, parser): + + def exit(self, msg, error=1): + """Used to safely terminate if sos-collector encounters an error""" +- self.cluster.cleanup() ++ if self.cluster: ++ self.cluster.cleanup() + self.log_error(msg) + try: + self.close_all_connections() +From ef27a6ee6737c23b3beda1437768a91679024697 Mon Sep 17 00:00:00 2001 +From: Nadia Pinaeva +Date: Fri, 3 Dec 2021 15:41:35 +0100 +Subject: [PATCH] Add journal logs for NetworkManager plugin + +Signed-off-by: Nadia Pinaeva +--- + sos/report/plugins/networkmanager.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/sos/report/plugins/networkmanager.py b/sos/report/plugins/networkmanager.py +index 30f99a1140..3aca0c7460 100644 +--- a/sos/report/plugins/networkmanager.py ++++ b/sos/report/plugins/networkmanager.py +@@ -25,6 +25,8 @@ def setup(self): + "/etc/NetworkManager/dispatcher.d" + ]) + ++ self.add_journal(units="NetworkManager") ++ + # There are some incompatible changes in nmcli since + # the release of NetworkManager >= 0.9.9. In addition, + # NetworkManager >= 0.9.9 will use the long names of +From 9eb60f0bb6ea36f9c1cf099c1fd20cf3938b4b26 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 17 Jan 2022 11:11:24 -0500 +Subject: [PATCH] [clean] Ignore empty items for obfuscation better + +This commit fixes a couple edge cases where an item empty (e.g. and +empty string '') was not being properly ignored, which in turned caused +failures in writing both obfuscations and replacement files. + +This should no longer be possible. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/mappings/__init__.py | 5 ++++- + sos/cleaner/mappings/username_map.py | 2 +- + sos/cleaner/parsers/username_parser.py | 2 +- + 3 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/sos/cleaner/mappings/__init__.py b/sos/cleaner/mappings/__init__.py +index 5cf5c8b2d..48171a052 100644 +--- a/sos/cleaner/mappings/__init__.py ++++ b/sos/cleaner/mappings/__init__.py +@@ -49,6 +49,8 @@ def add(self, item): + :param item: The plaintext object to obfuscate + """ + with self.lock: ++ if not item: ++ return item + self.dataset[item] = self.sanitize_item(item) + return self.dataset[item] + +@@ -67,7 +69,8 @@ def get(self, item): + """Retrieve an item's obfuscated counterpart from the map. If the item + does not yet exist in the map, add it by generating one on the fly + """ +- if self.ignore_item(item) or self.item_in_dataset_values(item): ++ if (not item or self.ignore_item(item) or ++ self.item_in_dataset_values(item)): + return item + if item not in self.dataset: + return self.add(item) +diff --git a/sos/cleaner/mappings/username_map.py b/sos/cleaner/mappings/username_map.py +index 7ecccd7bc..ed6dc0912 100644 +--- a/sos/cleaner/mappings/username_map.py ++++ b/sos/cleaner/mappings/username_map.py +@@ -24,7 +24,7 @@ class SoSUsernameMap(SoSMap): + + def load_names_from_options(self, opt_names): + for name in opt_names: +- if name not in self.dataset.keys(): ++ if name and name not in self.dataset.keys(): + self.add(name) + + def sanitize_item(self, username): +diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py +index 49640f7fd..2853c860f 100644 +--- a/sos/cleaner/parsers/username_parser.py ++++ b/sos/cleaner/parsers/username_parser.py +@@ -55,7 +55,7 @@ def load_usernames_into_map(self, content): + user = line.split()[0] + except Exception: + continue +- if user.lower() in self.skip_list: ++ if not user or user.lower() in self.skip_list: + continue + users.add(user) + for each in users: +From ed618678fd3d07e68e1a430eb7d225a9701332e0 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Thu, 13 Jan 2022 13:52:34 -0500 +Subject: [PATCH] [clean,parsers] Build regex lists for static items only once + +For parsers such as the username and keyword parsers, we don't discover +new items through parsing archives - these parsers use static lists +determined before we begin the actual obfuscation process. + +As such, we can build a list of regexes for these static items once, and +then reference those regexes during execution, rather than rebuilding +the regex for each of these items for every obfuscation. + +For use cases where hundreds of items, e.g. hundreds of usernames, are +being obfuscated this results in a significant performance increase. +Individual per-file gains are minor - fractions of a second - however +these gains build up over the course of the hundreds to thousands of +files a typical archive can be expected to contain. + +Signed-off-by: Jake Hunsaker +--- + sos/cleaner/__init__.py | 9 +++++++++ + sos/cleaner/parsers/__init__.py | 10 ++++++++++ + sos/cleaner/parsers/keyword_parser.py | 15 ++++++++++----- + sos/cleaner/parsers/username_parser.py | 14 ++++++++------ + tests/unittests/cleaner_tests.py | 1 + + 5 files changed, 38 insertions(+), 11 deletions(-) + +diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py +index 5686e2131..b76bef644 100644 +--- a/sos/cleaner/__init__.py ++++ b/sos/cleaner/__init__.py +@@ -294,6 +294,7 @@ def execute(self): + # we have at least one valid target to obfuscate + self.completed_reports = [] + self.preload_all_archives_into_maps() ++ self.generate_parser_item_regexes() + self.obfuscate_report_paths() + + if not self.completed_reports: +@@ -498,6 +499,14 @@ def _replace_obfuscated_archives(self): + shutil.move(archive.final_archive_path, dest) + archive.final_archive_path = dest_name + ++ def generate_parser_item_regexes(self): ++ """For the parsers that use prebuilt lists of items, generate those ++ regexes now since all the parsers should be preloaded by the archive(s) ++ as well as being handed cmdline options and mapping file configuration. ++ """ ++ for parser in self.parsers: ++ parser.generate_item_regexes() ++ + def preload_all_archives_into_maps(self): + """Before doing the actual obfuscation, if we have multiple archives + to obfuscate then we need to preload each of them into the mappings +diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py +index e62fd9384..6def863a6 100644 +--- a/sos/cleaner/parsers/__init__.py ++++ b/sos/cleaner/parsers/__init__.py +@@ -46,9 +46,19 @@ class SoSCleanerParser(): + map_file_key = 'unset' + + def __init__(self, config={}): ++ self.regexes = {} + if self.map_file_key in config: + self.mapping.conf_update(config[self.map_file_key]) + ++ def generate_item_regexes(self): ++ """Generate regexes for items the parser will be searching for ++ repeatedly without needing to generate them for every file and/or line ++ we process ++ ++ Not used by all parsers. ++ """ ++ pass ++ + def parse_line(self, line): + """This will be called for every line in every file we process, so that + every parser has a chance to scrub everything. +diff --git a/sos/cleaner/parsers/keyword_parser.py b/sos/cleaner/parsers/keyword_parser.py +index 694c6073a..362a1929e 100644 +--- a/sos/cleaner/parsers/keyword_parser.py ++++ b/sos/cleaner/parsers/keyword_parser.py +@@ -9,6 +9,7 @@ + # See the LICENSE file in the source distribution for further information. + + import os ++import re + + from sos.cleaner.parsers import SoSCleanerParser + from sos.cleaner.mappings.keyword_map import SoSKeywordMap +@@ -33,16 +34,20 @@ def __init__(self, config, keywords=None, keyword_file=None): + # pre-generate an obfuscation mapping for each keyword + # this is necessary for cases where filenames are being + # obfuscated before or instead of file content +- self.mapping.get(keyword) ++ self.mapping.get(keyword.lower()) + self.user_keywords.append(keyword) + if keyword_file and os.path.exists(keyword_file): + with open(keyword_file, 'r') as kwf: + self.user_keywords.extend(kwf.read().splitlines()) + ++ def generate_item_regexes(self): ++ for kw in self.user_keywords: ++ self.regexes[kw] = re.compile(kw, re.I) ++ + def parse_line(self, line): + count = 0 +- for keyword in sorted(self.user_keywords, reverse=True): +- if keyword in line: +- line = line.replace(keyword, self.mapping.get(keyword)) +- count += 1 ++ for kwrd, reg in sorted(self.regexes.items(), key=len, reverse=True): ++ if reg.search(line): ++ line, _count = reg.subn(self.mapping.get(kwrd.lower()), line) ++ count += _count + return line, count +diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py +index 3208a6557..49640f7fd 100644 +--- a/sos/cleaner/parsers/username_parser.py ++++ b/sos/cleaner/parsers/username_parser.py +@@ -61,12 +61,14 @@ def load_usernames_into_map(self, content): + for each in users: + self.mapping.get(each) + ++ def generate_item_regexes(self): ++ for user in self.mapping.dataset: ++ self.regexes[user] = re.compile(user, re.I) ++ + def parse_line(self, line): + count = 0 +- for username in sorted(self.mapping.dataset.keys(), reverse=True): +- _reg = re.compile(username, re.I) +- if _reg.search(line): +- line, count = _reg.subn( +- self.mapping.get(username.lower()), line +- ) ++ for user, reg in sorted(self.regexes.items(), key=len, reverse=True): ++ if reg.search(line): ++ line, _count = reg.subn(self.mapping.get(user.lower()), line) ++ count += _count + return line, count +diff --git a/tests/unittests/cleaner_tests.py b/tests/unittests/cleaner_tests.py +index cb20772fd..b59eade9a 100644 +--- a/tests/unittests/cleaner_tests.py ++++ b/tests/unittests/cleaner_tests.py +@@ -105,6 +105,7 @@ def setUp(self): + self.host_parser = SoSHostnameParser(config={}, opt_domains='foobar.com') + self.kw_parser = SoSKeywordParser(config={}, keywords=['foobar']) + self.kw_parser_none = SoSKeywordParser(config={}) ++ self.kw_parser.generate_item_regexes() + + def test_ip_parser_valid_ipv4_line(self): + line = 'foobar foo 10.0.0.1/24 barfoo bar' +From 2ae16e0245e1b01b8547e507abb69c11871a8467 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 21 Feb 2022 14:37:09 -0500 +Subject: [PATCH] [sosnode] Handle downstream versioning for runtime option + check + +First, adds parsing and formatting for an sos installation's release +version according to the loaded package manager for that node. + +Adds a fallback version check for 4.2-13 for RHEL downstreams that +backport the `container-runtime` option into sos-4.2. + +Carry this in upstream to account for use cases where a workstation used +to run `collect` from may be from a different stream than those used by +cluster nodes. + +Signed-off-by: Jake Hunsaker +--- + sos/collector/sosnode.py | 60 ++++++++++++++++++++++++++++++++++------ + 1 file changed, 51 insertions(+), 9 deletions(-) + +diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py +index 7bbe0cd1..d9b998b0 100644 +--- a/sos/collector/sosnode.py ++++ b/sos/collector/sosnode.py +@@ -275,21 +275,34 @@ class SosNode(): + def _load_sos_info(self): + """Queries the node for information about the installed version of sos + """ ++ ver = None ++ rel = None + if self.host.container_version_command is None: + pkg = self.host.package_manager.pkg_version(self.host.sos_pkg_name) + if pkg is not None: + ver = '.'.join(pkg['version']) +- self.sos_info['version'] = ver ++ if pkg['release']: ++ rel = pkg['release'] ++ + else: + # use the containerized policy's command + pkgs = self.run_command(self.host.container_version_command, + use_container=True, need_root=True) + if pkgs['status'] == 0: +- ver = pkgs['output'].strip().split('-')[1] +- if ver: +- self.sos_info['version'] = ver +- else: +- self.sos_info['version'] = None ++ _, ver, rel = pkgs['output'].strip().split('-') ++ ++ if ver: ++ if len(ver.split('.')) == 2: ++ # safeguard against maintenance releases throwing off the ++ # comparison by LooseVersion ++ ver += '.0' ++ try: ++ ver += '-%s' % rel.split('.')[0] ++ except Exception as err: ++ self.log_debug("Unable to fully parse sos release: %s" % err) ++ ++ self.sos_info['version'] = ver ++ + if self.sos_info['version']: + self.log_info('sos version is %s' % self.sos_info['version']) + else: +@@ -381,9 +394,37 @@ class SosNode(): + """Checks to see if the sos installation on the node is AT LEAST the + given ver. This means that if the installed version is greater than + ver, this will still return True ++ ++ :param ver: Version number we are trying to verify is installed ++ :type ver: ``str`` ++ ++ :returns: True if installed version is at least ``ver``, else False ++ :rtype: ``bool`` + """ +- return self.sos_info['version'] is not None and \ +- LooseVersion(self.sos_info['version']) >= ver ++ def _format_version(ver): ++ # format the version we're checking to a standard form of X.Y.Z-R ++ try: ++ _fver = ver.split('-')[0] ++ _rel = '' ++ if '-' in ver: ++ _rel = '-' + ver.split('-')[-1].split('.')[0] ++ if len(_fver.split('.')) == 2: ++ _fver += '.0' ++ ++ return _fver + _rel ++ except Exception as err: ++ self.log_debug("Unable to format '%s': %s" % (ver, err)) ++ return ver ++ ++ _ver = _format_version(ver) ++ ++ try: ++ _node_ver = LooseVersion(self.sos_info['version']) ++ _test_ver = LooseVersion(_ver) ++ return _node_ver >= _test_ver ++ except Exception as err: ++ self.log_error("Error checking sos version: %s" % err) ++ return False + + def is_installed(self, pkg): + """Checks if a given package is installed on the node""" +@@ -587,7 +628,8 @@ class SosNode(): + sos_opts.append('--cmd-timeout=%s' + % quote(str(self.opts.cmd_timeout))) + +- if self.check_sos_version('4.3'): ++ # handle downstream versions that backported this option ++ if self.check_sos_version('4.3') or self.check_sos_version('4.2-13'): + if self.opts.container_runtime != 'auto': + sos_opts.append( + "--container-runtime=%s" % self.opts.container_runtime +-- +2.34.1 + diff --git a/SOURCES/sos-bz2041488-virsh-in-foreground.patch b/SOURCES/sos-bz2041488-virsh-in-foreground.patch new file mode 100644 index 0000000..66bca13 --- /dev/null +++ b/SOURCES/sos-bz2041488-virsh-in-foreground.patch @@ -0,0 +1,146 @@ +From 137abd394f64a63b6633949b5c81159af12038b7 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 14 Jan 2022 20:07:17 +0100 +Subject: [PATCH] [report] pass foreground argument to collect_cmd_output + +Related to: #2825 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/__init__.py | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 98f163ab9..1bbdf28a4 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -1920,6 +1920,8 @@ class Plugin(object): + :param subdir: Subdir in plugin directory to save to + :param changes: Does this cmd potentially make a change + on the system? ++ :param foreground: Run the `cmd` in the foreground with a ++ TTY + :param tags: Add tags in the archive manifest + :param cmd_as_tag: Format command string to tag + +@@ -2145,7 +2147,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None, + root_symlink=False, timeout=None, + stderr=True, chroot=True, runat=None, env=None, + binary=False, sizelimit=None, pred=None, +- changes=False, subdir=None, tags=[]): ++ changes=False, foreground=False, subdir=None, ++ tags=[]): + """Execute a command and save the output to a file for inclusion in the + report, then return the results for further use by the plugin + +@@ -2188,6 +2191,9 @@ def collect_cmd_output(self, cmd, suggest_filename=None, + on the system? + :type changes: ``bool`` + ++ :param foreground: Run the `cmd` in the foreground with a TTY ++ :type foreground: ``bool`` ++ + :param tags: Add tags in the archive manifest + :type tags: ``str`` or a ``list`` of strings + +@@ -2206,8 +2212,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None, + return self._collect_cmd_output( + cmd, suggest_filename=suggest_filename, root_symlink=root_symlink, + timeout=timeout, stderr=stderr, chroot=chroot, runat=runat, +- env=env, binary=binary, sizelimit=sizelimit, subdir=subdir, +- tags=tags ++ env=env, binary=binary, sizelimit=sizelimit, foreground=foreground, ++ subdir=subdir, tags=tags + ) + + def exec_cmd(self, cmd, timeout=None, stderr=True, chroot=True, +From 747fef695e4ff08f320c5f03090bdefa7154c761 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 14 Jan 2022 20:10:22 +0100 +Subject: [PATCH] [virsh] Call virsh commands in the foreground / with a TTY + +In some virsh errors (like unable to connect to a hypervisor), +the tool requires to communicate to TTY otherwise it can get stuck +(when called via Popen with a timeout). + +Calling it on foreground prevents the stuck / waiting on cmd timeout. + +Resolves: #2825 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/virsh.py | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py +index d6b7c16761..08f9a8488c 100644 +--- a/sos/report/plugins/virsh.py ++++ b/sos/report/plugins/virsh.py +@@ -39,26 +39,30 @@ def setup(self): + ] + + for subcmd in subcmds: +- self.add_cmd_output('%s %s' % (cmd, subcmd)) ++ self.add_cmd_output('%s %s' % (cmd, subcmd), foreground=True) + + # get network, pool and nwfilter elements + for k in ['net', 'nwfilter', 'pool']: +- k_list = self.collect_cmd_output('%s %s-list' % (cmd, k)) ++ k_list = self.collect_cmd_output('%s %s-list' % (cmd, k), ++ foreground=True) + if k_list['status'] == 0: + k_lines = k_list['output'].splitlines() + # the 'Name' column position changes between virsh cmds + pos = k_lines[0].split().index('Name') + for j in filter(lambda x: x, k_lines[2:]): + n = j.split()[pos] +- self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n)) ++ self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n), ++ foreground=True) + + # cycle through the VMs/domains list, ignore 2 header lines and latest + # empty line, and dumpxml domain name in 2nd column +- domains_output = self.exec_cmd('%s list --all' % cmd) ++ domains_output = self.exec_cmd('%s list --all' % cmd, foreground=True) + if domains_output['status'] == 0: + domains_lines = domains_output['output'].splitlines()[2:] + for domain in filter(lambda x: x, domains_lines): + d = domain.split()[1] + for x in ['dumpxml', 'dominfo', 'domblklist']: +- self.add_cmd_output('%s %s %s' % (cmd, x, d)) ++ self.add_cmd_output('%s %s %s' % (cmd, x, d), ++ foreground=True) ++ + # vim: et ts=4 sw=4 +From 9bc032129ec66766f07349dd115335f104888efa Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 26 Jan 2022 09:44:01 +0100 +Subject: [PATCH] [virsh] Catch parsing exception + +In case virsh output is malformed or missing 'Name' otherwise, +catch parsing exception and continue in next for loop iteration. + +Resolves: #2836 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/virsh.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py +index 08f9a8488..2ce1df15c 100644 +--- a/sos/report/plugins/virsh.py ++++ b/sos/report/plugins/virsh.py +@@ -48,7 +48,11 @@ def setup(self): + if k_list['status'] == 0: + k_lines = k_list['output'].splitlines() + # the 'Name' column position changes between virsh cmds +- pos = k_lines[0].split().index('Name') ++ # catch the rare exceptions when 'Name' is not found ++ try: ++ pos = k_lines[0].split().index('Name') ++ except Exception: ++ continue + for j in filter(lambda x: x, k_lines[2:]): + n = j.split()[pos] + self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n), diff --git a/SOURCES/sos-bz2042966-ovn-proper-package-enablement.patch b/SOURCES/sos-bz2042966-ovn-proper-package-enablement.patch new file mode 100644 index 0000000..16c48c4 --- /dev/null +++ b/SOURCES/sos-bz2042966-ovn-proper-package-enablement.patch @@ -0,0 +1,252 @@ +From 210b83e1d1164d29b1f6198675b8b596c4af8336 Mon Sep 17 00:00:00 2001 +From: Daniel Alvarez Sanchez +Date: Thu, 20 Jan 2022 12:58:44 +0100 +Subject: [PATCH] [ovn_central] Account for Red Hat ovn package naming + +Previous ovn packages were 'ovn2xxx' and now they have +been renamed to 'ovn-2xxx'. This causes sos tool to not +recognize that the packages are installed and it won't +collect the relevant data. + +This patch is changing the match to be compatible +with the previous and newer naming conventions. + +Signed-off-by: Daniel Alvarez Sanchez +--- + sos/report/plugins/ovn_central.py | 2 +- + sos/report/plugins/ovn_host.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index ddbf288da..0f947d4c5 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -147,7 +147,7 @@ def setup(self): + + class RedHatOVNCentral(OVNCentral, RedHatPlugin): + +- packages = ('openvswitch-ovn-central', 'ovn2.*-central', ) ++ packages = ('openvswitch-ovn-central', 'ovn.*-central', ) + ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl' + + +diff --git a/sos/report/plugins/ovn_host.py b/sos/report/plugins/ovn_host.py +index 78604a15a..25c38cccc 100644 +--- a/sos/report/plugins/ovn_host.py ++++ b/sos/report/plugins/ovn_host.py +@@ -55,7 +55,7 @@ def check_enabled(self): + + class RedHatOVNHost(OVNHost, RedHatPlugin): + +- packages = ('openvswitch-ovn-host', 'ovn2.*-host', ) ++ packages = ('openvswitch-ovn-host', 'ovn.*-host', ) + + + class DebianOVNHost(OVNHost, DebianPlugin, UbuntuPlugin): +From 21fc376d97a5f74743e2b7cf7069349e874b979e Mon Sep 17 00:00:00 2001 +From: Hemanth Nakkina +Date: Fri, 4 Feb 2022 07:57:59 +0530 +Subject: [PATCH] [ovn-central] collect NB/SB ovsdb-server cluster status + +Add commands to collect cluster status of Northbound and +Southbound ovsdb servers. + +Resolves: #2840 + +Signed-off-by: Hemanth Nakkina hemanth.nakkina@canonical.com +--- + sos/report/plugins/ovn_central.py | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index 0f947d4c5..2f0438df3 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -84,6 +84,14 @@ def setup(self): + else: + self.add_copy_spec("/var/log/ovn/*.log") + ++ # ovsdb nb/sb cluster status commands ++ ovsdb_cmds = [ ++ 'ovs-appctl -t {} cluster/status OVN_Northbound'.format( ++ self.ovn_nbdb_sock_path), ++ 'ovs-appctl -t {} cluster/status OVN_Southbound'.format( ++ self.ovn_sbdb_sock_path), ++ ] ++ + # Some user-friendly versions of DB output + nbctl_cmds = [ + 'ovn-nbctl show', +@@ -109,7 +117,8 @@ def setup(self): + + self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl') + +- cmds = nbctl_cmds ++ cmds = ovsdb_cmds ++ cmds += nbctl_cmds + + # Can only run sbdb commands if we are the leader + co = {'cmd': "ovs-appctl -t {} cluster/status OVN_Southbound". +@@ -148,10 +157,12 @@ def setup(self): + class RedHatOVNCentral(OVNCentral, RedHatPlugin): + + packages = ('openvswitch-ovn-central', 'ovn.*-central', ) ++ ovn_nbdb_sock_path = '/var/run/openvswitch/ovnnb_db.ctl' + ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl' + + + class DebianOVNCentral(OVNCentral, DebianPlugin, UbuntuPlugin): + + packages = ('ovn-central', ) ++ ovn_nbdb_sock_path = '/var/run/ovn/ovnnb_db.ctl' + ovn_sbdb_sock_path = '/var/run/ovn/ovnsb_db.ctl' +From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Tue, 15 Feb 2022 16:24:47 -0500 +Subject: [PATCH] [runtimes] Allow container IDs to be used with + `container_exists()` + +As container runtimes can interchange container names and container IDs, +sos should also allow the use of container IDs when checking for the +presence of a given container. + +In particular, this change unblocks the use of `Plugin.exec_cmd()` when +used in conjunction with `Plugin.get_container_by_name()` to pick a +container based on a provided regex that the container name may match. + +Related: #2856 + +Signed-off-by: Jake Hunsaker +--- + sos/policies/runtimes/__init__.py | 17 +++++++++++++++++ + sos/report/plugins/__init__.py | 6 +++--- + 2 files changed, 20 insertions(+), 3 deletions(-) + +diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py +index 5ac673544..d28373496 100644 +--- a/sos/policies/runtimes/__init__.py ++++ b/sos/policies/runtimes/__init__.py +@@ -147,6 +147,23 @@ def get_volumes(self): + vols.append(ent[-1]) + return vols + ++ def container_exists(self, container): ++ """Check if a given container ID or name exists on the system from the ++ perspective of the container runtime. ++ ++ Note that this will only check _running_ containers ++ ++ :param container: The name or ID of the container ++ :type container: ``str`` ++ ++ :returns: True if the container exists, else False ++ :rtype: ``bool`` ++ """ ++ for _contup in self.containers: ++ if container in _contup: ++ return True ++ return False ++ + def fmt_container_cmd(self, container, cmd, quotecmd): + """Format a command to run inside a container using the runtime + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index 2988be089..cc5cb65bc 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -2593,7 +2593,7 @@ def container_exists(self, name): + """If a container runtime is present, check to see if a container with + a given name is currently running + +- :param name: The name of the container to check presence of ++ :param name: The name or ID of the container to check presence of + :type name: ``str`` + + :returns: ``True`` if `name` exists, else ``False`` +@@ -2601,8 +2601,8 @@ def container_exists(self, name): + """ + _runtime = self._get_container_runtime() + if _runtime is not None: +- con = _runtime.get_container_by_name(name) +- return con is not None ++ return (_runtime.container_exists(name) or ++ _runtime.get_container_by_name(name) is not None) + return False + + def get_all_containers_by_regex(self, regex, get_all=False): + +From de9b020a72d1ceda39587db4c6d5acf72cd90da2 Mon Sep 17 00:00:00 2001 +From: Fernando Royo +Date: Tue, 15 Feb 2022 10:00:38 +0100 +Subject: [PATCH] [ovn_central] Rename container responsable of Red Hat + ovn_central plugin + +ovn_central plugin is running by container with +name 'ovn-dbs-bundle*', a typo has been identified and +this cause plugin ovn_central not enabled by default as it +does not recognize any container responsible of this. + +This patch fix this container name match, searching schema db +keeping backward compatibility with openvswitch. +--- + sos/report/plugins/ovn_central.py | 23 ++++++++++++----------- + 1 file changed, 12 insertions(+), 11 deletions(-) + +diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py +index 2f0438df..2f34bff0 100644 +--- a/sos/report/plugins/ovn_central.py ++++ b/sos/report/plugins/ovn_central.py +@@ -24,7 +24,7 @@ class OVNCentral(Plugin): + short_desc = 'OVN Northd' + plugin_name = "ovn_central" + profiles = ('network', 'virt') +- containers = ('ovs-db-bundle.*',) ++ containers = ('ovn-dbs-bundle.*',) + + def get_tables_from_schema(self, filename, skip=[]): + if self._container_name: +@@ -66,7 +66,7 @@ class OVNCentral(Plugin): + cmds.append('%s list %s' % (ovn_cmd, table)) + + def setup(self): +- self._container_name = self.get_container_by_name('ovs-dbs-bundle.*') ++ self._container_name = self.get_container_by_name(self.containers[0]) + + ovs_rundir = os.environ.get('OVS_RUNDIR') + for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']: +@@ -110,12 +110,11 @@ class OVNCentral(Plugin): + 'ovn-sbctl get-connection', + ] + +- schema_dir = '/usr/share/openvswitch' +- +- nb_tables = self.get_tables_from_schema(self.path_join( +- schema_dir, 'ovn-nb.ovsschema')) +- +- self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl') ++ # backward compatibility ++ for path in ['/usr/share/openvswitch', '/usr/share/ovn']: ++ nb_tables = self.get_tables_from_schema(self.path_join( ++ path, 'ovn-nb.ovsschema')) ++ self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl') + + cmds = ovsdb_cmds + cmds += nbctl_cmds +@@ -125,9 +124,11 @@ class OVNCentral(Plugin): + format(self.ovn_sbdb_sock_path), + "output": "Leader: self"} + if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)): +- sb_tables = self.get_tables_from_schema(self.path_join( +- schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow']) +- self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl') ++ # backward compatibility ++ for path in ['/usr/share/openvswitch', '/usr/share/ovn']: ++ sb_tables = self.get_tables_from_schema(self.path_join( ++ path, 'ovn-sb.ovsschema'), ['Logical_Flow']) ++ self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl') + cmds += sbctl_cmds + + # If OVN is containerized, we need to run the above commands inside +-- +2.34.1 + diff --git a/SOURCES/sos-bz2043102-foreman-tasks-msgpack.patch b/SOURCES/sos-bz2043102-foreman-tasks-msgpack.patch new file mode 100644 index 0000000..900389c --- /dev/null +++ b/SOURCES/sos-bz2043102-foreman-tasks-msgpack.patch @@ -0,0 +1,59 @@ +From 5634f7dd77eff821f37daa953fa86cc783d3b937 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Fri, 21 Jan 2022 16:27:33 +0100 +Subject: [PATCH] [foreman] Use psql-msgpack-decode wrapper for dynflow >= 1.6 + +In dynflow >=1.6.3, dynflow* tables in postgres are encoded by +msgpack which makes plain CSV dumps unreadable. In such a case, +psql-msgpack-decode wrapper tool from dynflow-utils (of any +version) must be used instead of the plain psql command. + +Resolves: #2830 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/foreman.py | 16 ++++++++++++---- + 1 file changed, 12 insertions(+), 4 deletions(-) + +diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py +index 314a651d1..3fd80e6a8 100644 +--- a/sos/report/plugins/foreman.py ++++ b/sos/report/plugins/foreman.py +@@ -244,8 +244,16 @@ def setup(self): + self.add_cmd_output(_cmd, suggest_filename=table, timeout=600, + sizelimit=100, env=self.env) + ++ # dynflow* tables on dynflow >=1.6.3 are encoded and hence in that ++ # case, psql-msgpack-decode wrapper tool from dynflow-utils (any ++ # version) must be used instead of plain psql command ++ dynutils = self.is_installed('dynflow-utils') + for dyn in foremancsv: +- _cmd = self.build_query_cmd(foremancsv[dyn], csv=True) ++ binary = "psql" ++ if dyn != 'foreman_tasks_tasks' and dynutils: ++ binary = "/usr/libexec/psql-msgpack-decode" ++ _cmd = self.build_query_cmd(foremancsv[dyn], csv=True, ++ binary=binary) + self.add_cmd_output(_cmd, suggest_filename=dyn, timeout=600, + sizelimit=100, env=self.env) + +@@ -270,7 +278,7 @@ def setup(self): + # collect http[|s]_proxy env.variables + self.add_env_var(["http_proxy", "https_proxy"]) + +- def build_query_cmd(self, query, csv=False): ++ def build_query_cmd(self, query, csv=False, binary="psql"): + """ + Builds the command needed to invoke the pgsql query as the postgres + user. +@@ -281,8 +289,8 @@ def build_query_cmd(self, query, csv=False): + if csv: + query = "COPY (%s) TO STDOUT " \ + "WITH (FORMAT 'csv', DELIMITER ',', HEADER)" % query +- _dbcmd = "psql --no-password -h %s -p 5432 -U foreman -d foreman -c %s" +- return _dbcmd % (self.dbhost, quote(query)) ++ _dbcmd = "%s --no-password -h %s -p 5432 -U foreman -d foreman -c %s" ++ return _dbcmd % (binary, self.dbhost, quote(query)) + + def postproc(self): + self.do_path_regex_sub( diff --git a/SOURCES/sos-bz2054882-plugopt-logging-effective-opts.patch b/SOURCES/sos-bz2054882-plugopt-logging-effective-opts.patch new file mode 100644 index 0000000..f8e7ed3 --- /dev/null +++ b/SOURCES/sos-bz2054882-plugopt-logging-effective-opts.patch @@ -0,0 +1,94 @@ +From 5824cd5d3bddf39e0382d568419e2453abc93d8a Mon Sep 17 00:00:00 2001 +From: Jake Hunsaker +Date: Mon, 30 Aug 2021 15:09:07 -0400 +Subject: [PATCH] [options] Fix logging on plugopts in effective sos command + +First, provide a special-case handling for plugin options specified in +sos.conf in `SoSOptions.to_args().has_value()` that allows for plugin +options to be included in the "effective options now" log message. + +Second, move the logging of said message (and thus the merging of +preset options, if used), to being _prior_ to the loading of plugin +options. + +Combined, plugin options specified in sos.conf will now be logged +properly and this logging will occur before we set (and log the setting +of) those options. + +Resolves: #2663 + +Signed-off-by: Jake Hunsaker +--- + sos/options.py | 2 ++ + sos/report/__init__.py | 30 ++++++++++++++++-------------- + 2 files changed, 18 insertions(+), 14 deletions(-) + +diff --git a/sos/options.py b/sos/options.py +index a014a022..7bea3ffc 100644 +--- a/sos/options.py ++++ b/sos/options.py +@@ -281,6 +281,8 @@ class SoSOptions(): + null_values = ("False", "None", "[]", '""', "''", "0") + if not value or value in null_values: + return False ++ if name == 'plugopts' and value: ++ return True + if name in self.arg_defaults: + if str(value) == str(self.arg_defaults[name]): + return False +diff --git a/sos/report/__init__.py b/sos/report/__init__.py +index b0159e5b..82484f1d 100644 +--- a/sos/report/__init__.py ++++ b/sos/report/__init__.py +@@ -925,20 +925,6 @@ class SoSReport(SoSComponent): + self._exit(1) + + def setup(self): +- # Log command line options +- msg = "[%s:%s] executing 'sos %s'" +- self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline))) +- +- # Log active preset defaults +- preset_args = self.preset.opts.to_args() +- msg = ("[%s:%s] using '%s' preset defaults (%s)" % +- (__name__, "setup", self.preset.name, " ".join(preset_args))) +- self.soslog.info(msg) +- +- # Log effective options after applying preset defaults +- self.soslog.info("[%s:%s] effective options now: %s" % +- (__name__, "setup", " ".join(self.opts.to_args()))) +- + self.ui_log.info(_(" Setting up plugins ...")) + for plugname, plug in self.loaded_plugins: + try: +@@ -1386,11 +1372,27 @@ class SoSReport(SoSComponent): + self.report_md.add_list('disabled_plugins', self.opts.skip_plugins) + self.report_md.add_section('plugins') + ++ def _merge_preset_options(self): ++ # Log command line options ++ msg = "[%s:%s] executing 'sos %s'" ++ self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline))) ++ ++ # Log active preset defaults ++ preset_args = self.preset.opts.to_args() ++ msg = ("[%s:%s] using '%s' preset defaults (%s)" % ++ (__name__, "setup", self.preset.name, " ".join(preset_args))) ++ self.soslog.info(msg) ++ ++ # Log effective options after applying preset defaults ++ self.soslog.info("[%s:%s] effective options now: %s" % ++ (__name__, "setup", " ".join(self.opts.to_args()))) ++ + def execute(self): + try: + self.policy.set_commons(self.get_commons()) + self.load_plugins() + self._set_all_options() ++ self._merge_preset_options() + self._set_tunables() + self._check_for_unknown_plugins() + self._set_plugin_options() +-- +2.34.1 + diff --git a/SOURCES/sos-bz2055547-honour-plugins-timeout-hardcoded.patch b/SOURCES/sos-bz2055547-honour-plugins-timeout-hardcoded.patch new file mode 100644 index 0000000..3adde40 --- /dev/null +++ b/SOURCES/sos-bz2055547-honour-plugins-timeout-hardcoded.patch @@ -0,0 +1,39 @@ +From 7069e99d1c5c443f96a98a7ed6db67fa14683e67 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 17 Feb 2022 09:14:15 +0100 +Subject: [PATCH] [report] Honor plugins' hardcoded plugin_timeout + +Currently, plugin's plugin_timeout hardcoded default is superseded by +whatever --plugin-timeout value, even when this option is not used and +we eval it to TIMEOUT_DEFAULT. + +In this case of not setting --plugin-timeout either -k plugin.timeout, +honour plugin's plugin_timeout instead. + +Resolves: #2863 +Closes: #2864 + +Signed-off-by: Pavel Moravec +--- + sos/report/plugins/__init__.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py +index cc5cb65b..336b4d22 100644 +--- a/sos/report/plugins/__init__.py ++++ b/sos/report/plugins/__init__.py +@@ -636,7 +636,10 @@ class Plugin(): + if opt_timeout is None: + _timeout = own_timeout + elif opt_timeout is not None and own_timeout == -1: +- _timeout = int(opt_timeout) ++ if opt_timeout == TIMEOUT_DEFAULT: ++ _timeout = default_timeout ++ else: ++ _timeout = int(opt_timeout) + elif opt_timeout is not None and own_timeout > -1: + _timeout = own_timeout + else: +-- +2.34.1 + diff --git a/SPECS/sos.spec b/SPECS/sos.spec index 95931ee..e7c9ca6 100644 --- a/SPECS/sos.spec +++ b/SPECS/sos.spec @@ -4,14 +4,14 @@ Summary: A set of tools to gather troubleshooting information from a system Name: sos -Version: 4.1 -Release: 9%{?dist} +Version: 4.2 +Release: 15%{?dist} Group: Applications/System Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz Source1: sos-audit-%{auditversion}.tgz License: GPLv2+ BuildArch: noarch -Url: http://github.com/sosreport/sos +Url: https://github.com/sosreport/sos BuildRequires: python3-devel BuildRequires: gettext Requires: libxml2-python3 @@ -21,30 +21,29 @@ Conflicts: vdsm < 4.40 Obsoletes: sos-collector Recommends: python3-pexpect Recommends: python3-requests -Patch1: sos-bz1930181-collect-cleaning-consistency.patch -Patch2: sos-bz1935603-manpages-see-also.patch -Patch3: sos-bz1937418-add-cmd-timeout.patch -Patch4: sos-bz1937298-ds-mask-password-in-ldif.patch -Patch5: sos-bz1939963-gather-cups-browsed-logs.patch -Patch6: sos-bz1940502-sssd-memcache-and-logs.patch -Patch7: sos-bz1942276-ibmvNIC-dynamic-debugs.patch -Patch8: sos-bz1956673-pulpcore-plugin.patch -Patch9: sos-bz1959413-saphana-traceback.patch -Patch10: sos-bz1961458-collect-nstat.patch -Patch11: sos-bz1961229-snapper-plugin-and-allocation-failures.patch -Patch12: sos-bz1925419-all-gluster-files.patch -Patch13: sos-bz1964499-obfuscate-fqdn-from-dnf-log.patch -Patch14: sos-bz1886711-enhance-tc-hw-offload.patch -Patch15: sos-bz1965001-fix-avc-copystating-proc-sys.patch -Patch16: sos-bz1967613-sssd-common.patch -Patch17: sos-bz1973675-ocp-cluster-cleaner.patch -Patch18: sos-bz1923938-sos-log-effective-options.patch -Patch19: sos-bz1985986-potential-issues-static-analyse.patch -Patch20: sos-bz1959598-conversions-and-upgrades.patch -Patch21: sos-bz1665947-rhui-plugin.patch -Patch22: sos-bz1985037-cleaner-AD-users-obfuscation.patch -Patch23: sos-bz2011349-replace-dropbox-by-sftp.patch - +Patch1: sos-bz2011413-cpuX-individual-sizelimits.patch +Patch2: sos-bz1998521-unpackaged-recursive-symlink.patch +Patch3: sos-bz1998433-opacapture-under-allow-system-changes.patch +Patch4: sos-bz2002145-kernel-psi.patch +Patch5: sos-bz2001096-iptables-save-under-nf_tables-kmod.patch +Patch6: sos-bz1873185-estimate-only-option.patch +Patch7: sos-bz2005195-iptables-based-on-ntf.patch +Patch8: sos-bz2011506-foreman-puma-status.patch +Patch9: sos-bz2012856-dryrun-uncaught-exception.patch +Patch10: sos-bz2004929-openvswitch-offline-analysis.patch +Patch11: sos-bz2012857-plugin-timeout-unhandled-exception.patch +Patch12: sos-bz2018033-plugin-timeouts-proper-handling.patch +Patch13: sos-bz2020777-filter-namespace-per-pattern.patch +Patch14: sos-bz2023867-cleaner-hostnames-improvements.patch +Patch15: sos-bz2025610-RHTS-api-change.patch +Patch16: sos-bz2025403-nvidia-GPU-info.patch +Patch17: sos-bz2030741-rhui-logs.patch +Patch18: sos-bz2036697-ocp-backports.patch +Patch19: sos-bz2043102-foreman-tasks-msgpack.patch +Patch20: sos-bz2041488-virsh-in-foreground.patch +Patch21: sos-bz2042966-ovn-proper-package-enablement.patch +Patch22: sos-bz2054882-plugopt-logging-effective-opts.patch +Patch23: sos-bz2055547-honour-plugins-timeout-hardcoded.patch %description Sos is a set of tools that gathers information about system @@ -144,13 +143,105 @@ of the system. Currently storage and filesystem commands are audited. %ghost /etc/audit/rules.d/40-sos-storage.rules %changelog -* Tue Nov 30 2021 Pavel Moravec = 4.1-9 -- [redhat] Fix broken URI to upload to customer portal - Resolves: bz2011349 +* Wed Feb 23 2022 Pavel Moravec = 4.2-15 +- [sosnode] Handle downstream versioning for runtime option + Resolves: bz2036697 +- [options] Fix logging on plugopts in effective sos command + Resolves: bz2054882 +- [report] Honor plugins' hardcoded plugin_timeout + Resolves: bz2055547 +- [policies] Set fallback to None sysroot, don't chroot to '/' + Resolves: bz1873185 +- [ovn_central] Rename container responsable of Red Hat + Resolves: bz2042966 -* Mon Nov 22 2021 Pavel Moravec = 4.1-8 -- [Red Hat] Update policy to use SFTP, update RHST API to v2 - Resolves: bz2011349 +* Wed Jan 26 2022 Pavel Moravec = 4.2-13 +- [virsh] Catch parsing exception + Resolves: bz2041488 + +* Tue Jan 25 2022 Pavel Moravec = 4.2-12 +- [foreman] Use psql-msgpack-decode wrapper for dynflow >= 1.6 + Resolves: bz2043102 +- [virsh] Call virsh commands in the foreground / with a TTY + Resolves: bz2041488 +- [ovn_central] Account for Red Hat ovn package naming + Resolves: bz2042966 +- [clean,parsers] Build regex lists for static items only once + Resolves: bz2036697 + +* Mon Jan 10 2022 Pavel Moravec = 4.2-11 +- [report] Add journal logs for NetworkManager plugin + Resolves: bz2036697 + +* Fri Jan 07 2022 Pavel Moravec = 4.2-9 +- add oc transport, backport various PRs for OCP + Resolves: bz2036697 +- [report] Provide better warning about estimate-mode + Resolves: bz1873185 +- [hostname] Fix loading and detection of long base domains + Resolves: bz2023867 + +* Sun Dec 19 2021 Pavel Moravec = 4.2-8 +- [rhui] New log folder + Resolves: bz2030741 +- nvidia]:Patch to update nvidia plugin for GPU info + Resolves: bz2025403 +- [hostname] Fix edge case for new hosts in a known subdomain + Resolves: bz2023867 + +* Wed Dec 08 2021 Pavel Moravec = 4.2-7 +- [hostname] Simplify case matching for domains + Resolves: bz2023867 + +* Tue Nov 30 2021 Pavel Moravec = 4.2-6 +- [redhat] Fix broken URI to upload to customer portal + Resolves: bz2025610 + +* Mon Nov 22 2021 Pavel Moravec = 4.2-5 +- [clean,hostname_parser] Source /etc/hosts for obfuscation + Resolves: bz2023867 +- [clean, hostname] Fix unintentionally case sensitive + Resolves: bz2023863 +- [redhat] update SFTP API version to v2 + Resolves: bz2025610 + +* Tue Nov 16 2021 Pavel Moravec = 4.2-4 +- [report] Calculate sizes of dirs, symlinks and manifest in + Resolves: bz1873185 +- [report] shutdown threads for timeouted plugins + Resolves: bz2012857 +- [report] fix filter_namespace per pattern + Resolves: bz2020777 +- Ensure specific plugin timeouts are only set + Resolves: bz2018033 + +* Wed Nov 03 2021 Pavel Moravec = 4.2-2 +- [firewall_tables] call iptables -t
based on nft + Resolves: bz2005195 +- [report] Count with sos_logs and sos_reports in + Resolves: bz1873185 +- [foreman] Collect puma status and stats + Resolves: bz2011506 +- [report] Overwrite pred=None before refering predicate + Resolves: bz2012856 +- [openvswitch] add commands for offline analysis + Resolves: bz2004929 + +* Wed Oct 06 2021 Pavel Moravec = 4.2-1 +- Rebase on upstream 4.2 + Resolves: bz1998133 +- [report] Implement --estimate-only + Resolves: bz1873185 +- [omnipath_client] Opacapture to run only with allow changes + Resolves: bz1998433 +- [unpackaged] deal with recursive loop of symlinks properly + Resolves: bz1998521 +- [networking] prevent iptables-save commands to load nf_tables + Resolves: bz2001096 +- [kernel] Capture Pressure Stall Information + Resolves: bz2002145 +- [processor] Apply sizelimit to /sys/devices/system/cpu/cpuX + Resolves: bz2011413 * Wed Aug 11 2021 Pavel Moravec = 4.1-5 - [report,collect] unify --map-file arguments