1
0
forked from rpms/sos

import sos-4.5.0-1.el9

This commit is contained in:
CentOS Sources 2023-03-16 11:27:58 +00:00 committed by Stepan Oksanichenko
parent 0c65a38c25
commit 817ff617a3
16 changed files with 36 additions and 2150 deletions

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/sos-4.3.tar.gz
SOURCES/sos-4.5.0.tar.gz
SOURCES/sos-audit-0.3.tgz

View File

@ -1,2 +1,2 @@
6d443271a3eb26af8fb400ed417a4b572730d316 SOURCES/sos-4.3.tar.gz
d5e166c75250aef01c86a3a9d8c9fcc8db335f4e SOURCES/sos-4.5.0.tar.gz
9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz

View File

@ -1,169 +0,0 @@
From b5389aa195675f473acdd22f20017a8854ff82d0 Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Wed, 16 Feb 2022 08:43:32 +0100
Subject: [PATCH] [man] Mention sos-help in main sos manpage
Related to #2860
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
man/en/sos.1 | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/man/en/sos.1 b/man/en/sos.1
index ce4918f99..c335b7e10 100644
--- a/man/en/sos.1
+++ b/man/en/sos.1
@@ -67,6 +67,14 @@ May be invoked via either \fBsos clean\fR, \fBsos cleaner\fR, \fBsos mask\fR,
or via the \fB--clean\fR, \fB--cleaner\fR or \fB --mask\fR options
for \fBreport\fR and \fBcollect\fR.
+.TP
+.B help
+This subcommand is used to retrieve more detailed information on the various SoS
+commands and components than is directly available in either other manpages or
+--help output.
+
+See \fB sos help --help\fR and \fB man sos-help\fR for more information.
+
.SH GLOBAL OPTIONS
sos components provide their own set of options, however the following are available
to be set across all components.
From ac4eb48fa35c13b99ada41540831412480babf8d Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Wed, 16 Feb 2022 08:44:16 +0100
Subject: [PATCH] [setup] Add sos-help to build process
Resolves: #2860
Closes: #2861
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
setup.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/setup.py b/setup.py
index 25e87a71b..8db8641f0 100644
--- a/setup.py
+++ b/setup.py
@@ -90,7 +90,7 @@ def copy_file (self, filename, dirname):
('share/man/man1', ['man/en/sosreport.1', 'man/en/sos-report.1',
'man/en/sos.1', 'man/en/sos-collect.1',
'man/en/sos-collector.1', 'man/en/sos-clean.1',
- 'man/en/sos-mask.1']),
+ 'man/en/sos-mask.1', 'man/en/sos-help.1']),
('share/man/man5', ['man/en/sos.conf.5']),
('share/licenses/sos', ['LICENSE']),
('share/doc/sos', ['AUTHORS', 'README.md']),
@@ -102,7 +102,8 @@ def copy_file (self, filename, dirname):
'sos.policies.package_managers', 'sos.policies.init_systems',
'sos.report', 'sos.report.plugins', 'sos.collector',
'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner',
- 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives'
+ 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives',
+ 'sos.help'
],
cmdclass=cmdclass,
command_options=command_options,
From de9b020a72d1ceda39587db4c6d5acf72cd90da2 Mon Sep 17 00:00:00 2001
From: Fernando Royo <froyo@redhat.com>
Date: Tue, 15 Feb 2022 10:00:38 +0100
Subject: [PATCH] [ovn_central] Rename container responsable of Red Hat
ovn_central plugin
ovn_central plugin is running by container with
name 'ovn-dbs-bundle*', a typo has been identified and
this cause plugin ovn_central not enabled by default as it
does not recognize any container responsible of this.
This patch fix this container name match, searching schema db
keeping backward compatibility with openvswitch.
---
sos/report/plugins/ovn_central.py | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
index 2f0438df3..2f34bff09 100644
--- a/sos/report/plugins/ovn_central.py
+++ b/sos/report/plugins/ovn_central.py
@@ -24,7 +24,7 @@ class OVNCentral(Plugin):
short_desc = 'OVN Northd'
plugin_name = "ovn_central"
profiles = ('network', 'virt')
- containers = ('ovs-db-bundle.*',)
+ containers = ('ovn-dbs-bundle.*',)
def get_tables_from_schema(self, filename, skip=[]):
if self._container_name:
@@ -66,7 +66,7 @@ def add_database_output(self, tables, cmds, ovn_cmd):
cmds.append('%s list %s' % (ovn_cmd, table))
def setup(self):
- self._container_name = self.get_container_by_name('ovs-dbs-bundle.*')
+ self._container_name = self.get_container_by_name(self.containers[0])
ovs_rundir = os.environ.get('OVS_RUNDIR')
for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
@@ -110,12 +110,11 @@ def setup(self):
'ovn-sbctl get-connection',
]
- schema_dir = '/usr/share/openvswitch'
-
- nb_tables = self.get_tables_from_schema(self.path_join(
- schema_dir, 'ovn-nb.ovsschema'))
-
- self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
+ # backward compatibility
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
+ nb_tables = self.get_tables_from_schema(self.path_join(
+ path, 'ovn-nb.ovsschema'))
+ self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
cmds = ovsdb_cmds
cmds += nbctl_cmds
@@ -125,9 +124,11 @@ def setup(self):
format(self.ovn_sbdb_sock_path),
"output": "Leader: self"}
if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
- sb_tables = self.get_tables_from_schema(self.path_join(
- schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow'])
- self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
+ # backward compatibility
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
+ sb_tables = self.get_tables_from_schema(self.path_join(
+ path, 'ovn-sb.ovsschema'), ['Logical_Flow'])
+ self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
cmds += sbctl_cmds
# If OVN is containerized, we need to run the above commands inside
From 7ebb2ce0bcd13c1b3aada648aceb20b5aff636d9 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Tue, 15 Feb 2022 14:18:02 -0500
Subject: [PATCH] [host] Skip entire /etc/sos/cleaner directory
While `default_mapping` is typically the only file expected under
`/etc/sos/cleaner/` it is possible for other mapping files (such as
backups) to appear there.
Make the `add_forbidden_path()` spec here target the entire cleaner
directory to avoid ever capturing these map files.
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/report/plugins/host.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sos/report/plugins/host.py b/sos/report/plugins/host.py
index 5e21da7b8..95a3b9cd9 100644
--- a/sos/report/plugins/host.py
+++ b/sos/report/plugins/host.py
@@ -20,7 +20,7 @@ class Host(Plugin, IndependentPlugin):
def setup(self):
- self.add_forbidden_path('/etc/sos/cleaner/default_mapping')
+ self.add_forbidden_path('/etc/sos/cleaner')
self.add_cmd_output('hostname', root_symlink='hostname')
self.add_cmd_output('uptime', root_symlink='uptime')

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +0,0 @@
From 4c92968ce461cdfc6a5d913748b2ce4f148ff4a9 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Thu, 10 Mar 2022 12:31:49 -0500
Subject: [PATCH] [tigervnc] Update collections for newer versions of TigerVNC
First, relaxes the file specifications for collection by capturing the
entire `/etc/tigervnc/` directory.
Second, adds collection of service status and journal output for each
configured vnc server. Collection of `vncserver -list` is kept for
backwards compatibility.
Finally, add a short docstring for the plugin for --help output.
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/report/plugins/tigervnc.py | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/sos/report/plugins/tigervnc.py b/sos/report/plugins/tigervnc.py
index 1302f6d4..e31aee25 100644
--- a/sos/report/plugins/tigervnc.py
+++ b/sos/report/plugins/tigervnc.py
@@ -12,17 +12,35 @@ from sos.report.plugins import Plugin, RedHatPlugin
class TigerVNC(Plugin, RedHatPlugin):
+ """
+ This plugin gathers information for VNC servers provided by the tigervnc
+ package. This is explicitly for server-side collections, not clients.
+
+ By default, this plugin will capture the contents of /etc/tigervnc, which
+ may include usernames. If usernames are sensitive information for end
+ users of sos, consider using the `--clean` option to obfuscate these
+ names.
+ """
short_desc = 'TigerVNC server configuration'
plugin_name = 'tigervnc'
packages = ('tigervnc-server',)
def setup(self):
- self.add_copy_spec([
- '/etc/tigervnc/vncserver-config-defaults',
- '/etc/tigervnc/vncserver-config-mandatory',
- '/etc/tigervnc/vncserver.users'
- ])
+ self.add_copy_spec('/etc/tigervnc/')
+
+ # service names are 'vncserver@$port' where $port is :1,, :2, etc...
+ # however they are not reported via list-unit-files, only list-units
+ vncs = self.exec_cmd(
+ 'systemctl list-units --type=service --no-legend vncserver*'
+ )
+ if vncs['status'] == 0:
+ for serv in vncs['output'].splitlines():
+ vnc = serv.split()
+ if not vnc:
+ continue
+ self.add_service_status(vnc[0])
+ self.add_journal(vnc[0])
self.add_cmd_output('vncserver -list')
--
2.34.3

View File

@ -1,39 +0,0 @@
From 7069e99d1c5c443f96a98a7ed6db67fa14683e67 Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Thu, 17 Feb 2022 09:14:15 +0100
Subject: [PATCH] [report] Honor plugins' hardcoded plugin_timeout
Currently, plugin's plugin_timeout hardcoded default is superseded by
whatever --plugin-timeout value, even when this option is not used and
we eval it to TIMEOUT_DEFAULT.
In this case of not setting --plugin-timeout either -k plugin.timeout,
honour plugin's plugin_timeout instead.
Resolves: #2863
Closes: #2864
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
sos/report/plugins/__init__.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
index cc5cb65b..336b4d22 100644
--- a/sos/report/plugins/__init__.py
+++ b/sos/report/plugins/__init__.py
@@ -636,7 +636,10 @@ class Plugin():
if opt_timeout is None:
_timeout = own_timeout
elif opt_timeout is not None and own_timeout == -1:
- _timeout = int(opt_timeout)
+ if opt_timeout == TIMEOUT_DEFAULT:
+ _timeout = default_timeout
+ else:
+ _timeout = int(opt_timeout)
elif opt_timeout is not None and own_timeout > -1:
_timeout = own_timeout
else:
--
2.34.3

View File

@ -1,68 +0,0 @@
From f3dc8cd574614572d441f76c02453fd85d0c57e2 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Wed, 27 Apr 2022 10:40:55 -0400
Subject: [PATCH] [report] --list-plugins should report used, not default,
option values
When using `--list-plugins`, sos should report the values that will be
used in a given command, or with a given config file, not what the
default values are.
By reporting the set value, users can be sure their configuration or
commandline settings are being honored correctly before executing a
report collection.
Closes: #2921
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/report/__init__.py | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/sos/report/__init__.py b/sos/report/__init__.py
index 74c7973a..8735c903 100644
--- a/sos/report/__init__.py
+++ b/sos/report/__init__.py
@@ -868,24 +868,32 @@ class SoSReport(SoSComponent):
_defaults = self.loaded_plugins[0][1].get_default_plugin_opts()
for _opt in _defaults:
opt = _defaults[_opt]
- val = opt.default
- if opt.default == -1:
- val = TIMEOUT_DEFAULT
+ val = opt.value
+ if opt.value == -1:
+ if _opt == 'timeout':
+ val = self.opts.plugin_timeout or TIMEOUT_DEFAULT
+ elif _opt == 'cmd-timeout':
+ val = self.opts.cmd_timeout or TIMEOUT_DEFAULT
+ else:
+ val = TIMEOUT_DEFAULT
+ if opt.name == 'postproc':
+ val = not self.opts.no_postproc
self.ui_log.info(" %-25s %-15s %s" % (opt.name, val, opt.desc))
self.ui_log.info("")
self.ui_log.info(_("The following plugin options are available:"))
for opt in self.all_options:
if opt.name in ('timeout', 'postproc', 'cmd-timeout'):
- continue
+ if opt.value == opt.default:
+ continue
# format option value based on its type (int or bool)
- if isinstance(opt.default, bool):
- if opt.default is True:
+ if isinstance(opt.value, bool):
+ if opt.value is True:
tmpopt = "on"
else:
tmpopt = "off"
else:
- tmpopt = opt.default
+ tmpopt = opt.value
if tmpopt is None:
tmpopt = 0
--
2.34.3

View File

@ -1,34 +0,0 @@
From 9b10abcdd4aaa41e2549438d5bc52ece86dcb21f Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Sat, 7 May 2022 14:23:04 +0200
Subject: [PATCH] [plugins] Allow 'str' PlugOpt type to accept any value
For PlugOpt type 'str', we should allow any content including e.g.
numbers, and interpret it as a string.
Resolves: #2922
Closes: #2935
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
sos/report/plugins/__init__.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
index d6be42b9..2a42e6b0 100644
--- a/sos/report/plugins/__init__.py
+++ b/sos/report/plugins/__init__.py
@@ -452,6 +452,10 @@ class PluginOpt():
return self.__str__()
def set_value(self, val):
+ # 'str' type accepts any value, incl. numbers
+ if type('') in self.val_type:
+ self.value = str(val)
+ return
if not any([type(val) == _t for _t in self.val_type]):
valid = []
for t in self.val_type:
--
2.34.3

View File

@ -1,31 +0,0 @@
From 5e27b92a8a9f066af4c41ddd0bedc7c69187ff52 Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Mon, 2 May 2022 22:13:34 +0200
Subject: [PATCH] [utilities] Close file only when storing to file
Call _output.close() only when to_file=true.
Closes: #2925
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
sos/utilities.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/sos/utilities.py b/sos/utilities.py
index d2f73d86..1075d1d4 100644
--- a/sos/utilities.py
+++ b/sos/utilities.py
@@ -212,7 +212,8 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False,
p.wait(timeout if timeout else None)
except Exception:
p.terminate()
- _output.close()
+ if to_file:
+ _output.close()
# until we separate timeouts from the `timeout` command
# handle per-cmd timeouts via Plugin status checks
return {'status': 124, 'output': reader.get_contents(),
--
2.34.3

View File

@ -1,230 +0,0 @@
From 3b84b4ccfa9e4924a5a3829d3810568dfb69bf63 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Fri, 18 Mar 2022 16:25:35 -0400
Subject: [PATCH 1/2] [pacemaker] Redesign node enumeration logic
It has been found that `pcs status` output is liable to change, which
ends up breaking our parsing of node lists when using it on newer
versions.
Instead, first try to parse through `crm_mon` output, which is what `pcs
status` uses under the hood, but as a stable and reliable xml format.
Failing that, for example if the `--primary` node is not functioning as
part of the cluster, source `/etc/corosync/corosync.conf` instead.
Related: RHBZ2065805
Related: RHBZ2065811
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/collector/clusters/pacemaker.py | 110 +++++++++++++++++++---------
1 file changed, 76 insertions(+), 34 deletions(-)
diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
index 55024314..49d0ce51 100644
--- a/sos/collector/clusters/pacemaker.py
+++ b/sos/collector/clusters/pacemaker.py
@@ -8,7 +8,11 @@
#
# See the LICENSE file in the source distribution for further information.
+import re
+
from sos.collector.clusters import Cluster
+from setuptools._vendor.packaging import version
+from xml.etree import ElementTree
class pacemaker(Cluster):
@@ -18,42 +22,80 @@ class pacemaker(Cluster):
packages = ('pacemaker',)
option_list = [
('online', True, 'Collect nodes listed as online'),
- ('offline', True, 'Collect nodes listed as offline')
+ ('offline', True, 'Collect nodes listed as offline'),
+ ('only-corosync', False, 'Only use corosync.conf to enumerate nodes')
]
def get_nodes(self):
- self.res = self.exec_primary_cmd('pcs status')
- if self.res['status'] != 0:
- self.log_error('Cluster status could not be determined. Is the '
- 'cluster running on this node?')
- return []
- if 'node names do not match' in self.res['output']:
- self.log_warn('Warning: node name mismatch reported. Attempts to '
- 'connect to some nodes may fail.\n')
- return self.parse_pcs_output()
-
- def parse_pcs_output(self):
- nodes = []
- if self.get_option('online'):
- nodes += self.get_online_nodes()
- if self.get_option('offline'):
- nodes += self.get_offline_nodes()
- return nodes
-
- def get_online_nodes(self):
- for line in self.res['output'].splitlines():
- if line.startswith('Online:'):
- nodes = line.split('[')[1].split(']')[0]
- return [n for n in nodes.split(' ') if n]
-
- def get_offline_nodes(self):
- offline = []
- for line in self.res['output'].splitlines():
- if line.startswith('Node') and line.endswith('(offline)'):
- offline.append(line.split()[1].replace(':', ''))
- if line.startswith('OFFLINE:'):
- nodes = line.split('[')[1].split(']')[0]
- offline.extend([n for n in nodes.split(' ') if n])
- return offline
+ self.nodes = []
+ # try crm_mon first
+ try:
+ if not self.get_option('only-corosync'):
+ try:
+ self.get_nodes_from_crm()
+ except Exception as err:
+ self.log_warn("Falling back to sourcing corosync.conf. "
+ "Could not parse crm_mon output: %s" % err)
+ if not self.nodes:
+ # fallback to corosync.conf, in case the node we're inspecting
+ # is offline from the cluster
+ self.get_nodes_from_corosync()
+ except Exception as err:
+ self.log_error("Could not determine nodes from cluster: %s" % err)
+
+ _shorts = [n for n in self.nodes if '.' not in n]
+ if _shorts:
+ self.log_warn(
+ "WARNING: Node addresses '%s' may not resolve locally if you "
+ "are not running on a node in the cluster. Try using option "
+ "'-c pacemaker.only-corosync' if these connections fail."
+ % ','.join(_shorts)
+ )
+ return self.nodes
+
+ def get_nodes_from_crm(self):
+ """
+ Try to parse crm_mon output for node list and status.
+ """
+ xmlopt = '--output-as=xml'
+ # older pacemaker had a different option for xml output
+ _ver = self.exec_primary_cmd('crm_mon --version')
+ if _ver['status'] == 0:
+ cver = _ver['output'].split()[1].split('-')[0]
+ if not version.parse(cver) > version.parse('2.0.3'):
+ xmlopt = '--as-xml'
+ else:
+ return
+ _out = self.exec_primary_cmd(
+ "crm_mon --one-shot --inactive %s" % xmlopt,
+ need_root=True
+ )
+ if _out['status'] == 0:
+ self.parse_crm_xml(_out['output'])
+
+ def parse_crm_xml(self, xmlstring):
+ """
+ Parse the xml output string provided by crm_mon
+ """
+ _xml = ElementTree.fromstring(xmlstring)
+ nodes = _xml.find('nodes')
+ for node in nodes:
+ _node = node.attrib
+ if self.get_option('online') and _node['online'] == 'true':
+ self.nodes.append(_node['name'])
+ elif self.get_option('offline') and _node['online'] == 'false':
+ self.nodes.append(_node['name'])
+
+ def get_nodes_from_corosync(self):
+ """
+ As a fallback measure, read corosync.conf to get the node list. Note
+ that this prevents us from separating online nodes from offline nodes.
+ """
+ self.log_warn("WARNING: unable to distinguish online nodes from "
+ "offline nodes when sourcing from corosync.conf")
+ cc = self.primary.read_file('/etc/corosync/corosync.conf')
+ nodes = re.findall(r'((\sring0_addr:)(.*))', cc)
+ for node in nodes:
+ self.nodes.append(node[-1].strip())
# vim: set et ts=4 sw=4 :
--
2.34.3
From 6701a7d77ecc998b018b54ecc00f9fd102ae9518 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Mon, 21 Mar 2022 12:05:59 -0400
Subject: [PATCH 2/2] [clusters] Allow clusters to not add localhost to node
list
For most of our supported clusters, we end up needing to add the
local host executing `sos collect` to the node list (unless `--no-local`
is used) as that accounts for the primary node that may otherwise be
left off. However, this is not helpful for clusters that may reports
node names as something other than resolveable names. In those cases,
such as with pacemaker, adding the local hostname may result in
duplicate collections.
Add a toggle to cluster profiles via a new `strict_node_list` class attr
that, if True, will skip this addition. This toggle is default `False`
to preserve existing behavior, and is now enabled for `pacemaker`
specifically.
Related: RHBZ#2065821
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/collector/__init__.py | 3 ++-
sos/collector/clusters/__init__.py | 4 ++++
sos/collector/clusters/pacemaker.py | 1 +
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
index a8bb0064..d898ca34 100644
--- a/sos/collector/__init__.py
+++ b/sos/collector/__init__.py
@@ -1073,7 +1073,8 @@ class SoSCollector(SoSComponent):
for node in self.node_list:
if host == node.split('.')[0]:
self.node_list.remove(node)
- self.node_list.append(self.hostname)
+ if not self.cluster.strict_node_list:
+ self.node_list.append(self.hostname)
self.reduce_node_list()
try:
_node_max = len(max(self.node_list, key=len))
diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
index f3f550ad..f00677b8 100644
--- a/sos/collector/clusters/__init__.py
+++ b/sos/collector/clusters/__init__.py
@@ -57,6 +57,10 @@ class Cluster():
sos_plugin_options = {}
sos_preset = ''
cluster_name = None
+ # set this to True if the local host running collect should *not* be
+ # forcibly added to the node list. This can be helpful in situations where
+ # the host's fqdn and the name the cluster uses are different
+ strict_node_list = False
def __init__(self, commons):
self.primary = None
diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
index 49d0ce51..bebcb265 100644
--- a/sos/collector/clusters/pacemaker.py
+++ b/sos/collector/clusters/pacemaker.py
@@ -20,6 +20,7 @@ class pacemaker(Cluster):
cluster_name = 'Pacemaker High Availability Cluster Manager'
sos_plugins = ['pacemaker']
packages = ('pacemaker',)
+ strict_node_list = True
option_list = [
('online', True, 'Collect nodes listed as online'),
('offline', True, 'Collect nodes listed as offline'),
--
2.34.3

View File

@ -1,66 +0,0 @@
From 5fd872c64c53af37015f366295e0c2418c969757 Mon Sep 17 00:00:00 2001
From: Yedidyah Bar David <didi@redhat.com>
Date: Thu, 26 May 2022 16:43:21 +0300
Subject: [PATCH] [ovirt] answer files: Filter out all password keys
Instead of hard-coding specific keys and having to maintain them over
time, replace the values of all keys that have 'password' in their name.
I think this covers all our current and hopefully future keys. It might
add "false positives" - keys that are not passwords but have 'password'
in their name - and I think that's a risk worth taking.
Sadly, the engine admin password prompt's name is
'OVESETUP_CONFIG_ADMIN_SETUP', which does not include 'password', so has
to be listed specifically.
A partial list of keys added since the replaced code was written:
- grafana-related stuff
- keycloak-related stuff
- otopi-style answer files
Signed-off-by: Yedidyah Bar David <didi@redhat.com>
Change-Id: I416c6e4078e7c3638493eb271d08d73a0c22b5ba
---
sos/report/plugins/ovirt.py | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/sos/report/plugins/ovirt.py b/sos/report/plugins/ovirt.py
index 09647bf1..3b1bb29b 100644
--- a/sos/report/plugins/ovirt.py
+++ b/sos/report/plugins/ovirt.py
@@ -241,19 +241,22 @@ class Ovirt(Plugin, RedHatPlugin):
r'{key}=********'.format(key=key)
)
- # Answer files contain passwords
- for key in (
- 'OVESETUP_CONFIG/adminPassword',
- 'OVESETUP_CONFIG/remoteEngineHostRootPassword',
- 'OVESETUP_DWH_DB/password',
- 'OVESETUP_DB/password',
- 'OVESETUP_REPORTS_CONFIG/adminPassword',
- 'OVESETUP_REPORTS_DB/password',
+ # Answer files contain passwords.
+ # Replace all keys that have 'password' in them, instead of hard-coding
+ # here the list of keys, which changes between versions.
+ # Sadly, the engine admin password prompt name does not contain
+ # 'password'... so neither does the env key.
+ for item in (
+ 'password',
+ 'OVESETUP_CONFIG_ADMIN_SETUP',
):
self.do_path_regex_sub(
r'/var/lib/ovirt-engine/setup/answers/.*',
- r'{key}=(.*)'.format(key=key),
- r'{key}=********'.format(key=key)
+ re.compile(
+ r'(?P<key>[^=]*{item}[^=]*)=.*'.format(item=item),
+ flags=re.IGNORECASE
+ ),
+ r'\g<key>=********'
)
# aaa profiles contain passwords
--
2.34.3

View File

@ -1,73 +0,0 @@
From c2e66fa4dae51f03c7310ba5278897ddecac1aad Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Thu, 2 Jun 2022 15:43:09 +0200
Subject: [PATCH] crio: switch from parsing output in table format to json
Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
sos/policies/runtimes/crio.py | 30 ++++++++++++++++++++----------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/sos/policies/runtimes/crio.py b/sos/policies/runtimes/crio.py
index 55082d07..4cae1ecc 100644
--- a/sos/policies/runtimes/crio.py
+++ b/sos/policies/runtimes/crio.py
@@ -7,6 +7,7 @@
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
+import json
from sos.policies.runtimes import ContainerRuntime
from sos.utilities import sos_get_command_output
@@ -29,14 +30,15 @@ class CrioContainerRuntime(ContainerRuntime):
:type get_all: ``bool``
"""
containers = []
- _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '')
+ _cmd = "%s ps %s -o json" % (self.binary, '-a' if get_all else '')
if self.active:
out = sos_get_command_output(_cmd, chroot=self.policy.sysroot)
- if out['status'] == 0:
- for ent in out['output'].splitlines()[1:]:
- ent = ent.split()
+ if out["status"] == 0:
+ out_json = json.loads(out["output"])
+ for container in out_json["containers"]:
# takes the form (container_id, container_name)
- containers.append((ent[0], ent[-3]))
+ containers.append(
+ (container["id"], container["metadata"]["name"]))
return containers
def get_images(self):
@@ -47,13 +49,21 @@ class CrioContainerRuntime(ContainerRuntime):
"""
images = []
if self.active:
- out = sos_get_command_output("%s images" % self.binary,
+ out = sos_get_command_output("%s images -o json" % self.binary,
chroot=self.policy.sysroot)
if out['status'] == 0:
- for ent in out['output'].splitlines():
- ent = ent.split()
- # takes the form (image_name, image_id)
- images.append((ent[0] + ':' + ent[1], ent[2]))
+ out_json = json.loads(out["output"])
+ for image in out_json["images"]:
+ # takes the form (repository:tag, image_id)
+ if len(image["repoTags"]) > 0:
+ for repo_tag in image["repoTags"]:
+ images.append((repo_tag, image["id"]))
+ else:
+ if len(image["repoDigests"]) == 0:
+ image_name = "<none>"
+ else:
+ image_name = image["repoDigests"][0].split("@")[0]
+ images.append((image_name + ":<none>", image["id"]))
return images
def fmt_container_cmd(self, container, cmd, quotecmd):
--
2.34.3

View File

@ -1,116 +0,0 @@
From 1dc3625fabea7331570f713fd1c87ac812d72d92 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Wed, 18 May 2022 13:39:38 -0400
Subject: [PATCH] [Plugin] Make forbidden path checks more efficient
Forbidden path checks have up until now worked by taking a given file
path (potentially with globs), expanding that against all discovered
files that actually exist on the system, and then comparing a potential
collection path against that list.
While this works, and works reasonably fast for most scenarios, it isn't
very efficient and causes significant slow downs when a non-standard
configuration is in play - e.g. thousands of block devices which sos
would individually have to compare against tens of thousands of paths
for every path the `block` plugin wants to collect.
Improve this by first not expanding the forbidden path globs, but taking
them as distinct patterns, translating from shell-style (to maintain
historical precedent of using globs to specify paths to be skipped) to
python regex patterns as needed. Second, use `re` to handle our pattern
matching for comparison against the distinct patterns provided by a
plugin to skip.
Closes: #2938
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/report/plugins/__init__.py | 20 +++++++++-----------
sos/report/plugins/cgroups.py | 6 ++----
sos/report/plugins/pulpcore.py | 2 +-
sos/report/plugins/rhui.py | 2 +-
4 files changed, 13 insertions(+), 17 deletions(-)
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
index 2a42e6b0a..ba1397a8a 100644
--- a/sos/report/plugins/__init__.py
+++ b/sos/report/plugins/__init__.py
@@ -46,11 +46,6 @@ def _mangle_command(command, name_max):
return mangledname
-def _path_in_path_list(path, path_list):
- return any((p == path or path.startswith(os.path.abspath(p)+os.sep)
- for p in path_list))
-
-
def _node_type(st):
""" return a string indicating the type of special node represented by
the stat buffer st (block, character, fifo, socket).
@@ -1407,7 +1402,9 @@ def _get_dest_for_srcpath(self, srcpath):
return None
def _is_forbidden_path(self, path):
- return _path_in_path_list(path, self.forbidden_paths)
+ return any(
+ re.match(forbid, path) for forbid in self.forbidden_paths
+ )
def _is_policy_forbidden_path(self, path):
return any([
@@ -1495,14 +1492,12 @@ def _do_copy_path(self, srcpath, dest=None):
'symlink': "no"
})
- def add_forbidden_path(self, forbidden, recursive=False):
+ def add_forbidden_path(self, forbidden):
"""Specify a path, or list of paths, to not copy, even if it's part of
an ``add_copy_spec()`` call
:param forbidden: A filepath to forbid collection from
:type forbidden: ``str`` or a ``list`` of strings
-
- :param recursive: Should forbidden glob be applied recursively
"""
if isinstance(forbidden, str):
forbidden = [forbidden]
@@ -1512,8 +1507,11 @@ def add_forbidden_path(self, forbidden, recursive=False):
for forbid in forbidden:
self._log_info("adding forbidden path '%s'" % forbid)
- for path in glob.glob(forbid, recursive=recursive):
- self.forbidden_paths.append(path)
+ if "*" in forbid:
+ # calling translate() here on a dir-level path will break the
+ # re.match() call during path comparison
+ forbid = fnmatch.translate(forbid)
+ self.forbidden_paths.append(forbid)
def set_option(self, optionname, value):
"""Set the named option to value. Ensure the original type of the
diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py
index 6c4237cae..f6bc194c7 100644
--- a/sos/report/plugins/pulpcore.py
+++ b/sos/report/plugins/pulpcore.py
@@ -89,7 +89,7 @@ class PulpCore(Plugin, IndependentPlugin
"/etc/pki/pulp/*"
])
# skip collecting certificate keys
- self.add_forbidden_path("/etc/pki/pulp/**/*.key", recursive=True)
+ self.add_forbidden_path("/etc/pki/pulp/**/*.key")
self.add_cmd_output("rq info -u redis://localhost:6379/8",
env={"LC_ALL": "en_US.UTF-8"},
diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py
index add024613..8063fd51c 100644
--- a/sos/report/plugins/rhui.py
+++ b/sos/report/plugins/rhui.py
@@ -30,7 +30,7 @@ def setup(self):
"/var/log/rhui/*",
])
# skip collecting certificate keys
- self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True)
+ self.add_forbidden_path("/etc/pki/rhui/**/*.key")
# call rhui-manager commands with 1m timeout and
# with an env. variable ensuring that "RHUI Username:"

View File

@ -1,40 +0,0 @@
From 7d1ee59fc659467e6860e72322e976ddc5c17db3 Mon Sep 17 00:00:00 2001
From: Juan Orti Alcaine <jortialc@redhat.com>
Date: Mon, 6 Jun 2022 16:35:51 +0200
Subject: [PATCH] [vdsm] Set LVM option use_devicesfile=0
Since RHV 4.4 SP1, vdsm configures LVM to use devicesfile, causing that
the LVM filter configuration used by sos is ignored.
This change disables the use of the devicesfile, so that the information
of the devices used for RHV storage domains can be collected.
Fixes: RHBZ#2093993
Signed-off-by: Juan Orti <jortialc@redhat.com>
---
sos/report/plugins/vdsm.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/sos/report/plugins/vdsm.py b/sos/report/plugins/vdsm.py
index ee5befbb1..146d223c2 100644
--- a/sos/report/plugins/vdsm.py
+++ b/sos/report/plugins/vdsm.py
@@ -29,7 +29,8 @@
# use_lvmetad is set to 0 in order not to show cached, old lvm metadata.
# use_lvmetad=0
#
-# preferred_names and filter config values are set to capture Vdsm devices.
+# preferred_names, use_devicesfile and filter config values are set to
+# capture Vdsm devices.
# preferred_names=[ '^/dev/mapper/' ]
# filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ]
LVM_CONFIG = """
@@ -43,6 +44,7 @@
ignore_suspended_devices=1
write_cache_state=0
disable_after_error_count=3
+ use_devicesfile=0
filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"]
}
"""

View File

@ -1,62 +0,0 @@
From 765f5f283bdb4747b0069f2f5d3381134b4b9a95 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Thu, 15 Sep 2022 12:36:42 -0400
Subject: [PATCH] [ocp] Add newly required labels to temp OCP namespace
Newer OCP versions have a more restrictive default deployment
configuration. As such, add the required labels to the temporary
namespace/project we use for collections.
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
sos/collector/clusters/ocp.py | 23 ++++++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
index 06301536f..92c4e04a2 100644
--- a/sos/collector/clusters/ocp.py
+++ b/sos/collector/clusters/ocp.py
@@ -114,12 +114,32 @@ class ocp(Cluster):
self.log_info("Creating new temporary project '%s'" % self.project)
ret = self.exec_primary_cmd("oc new-project %s" % self.project)
if ret['status'] == 0:
+ self._label_sos_project()
return True
self.log_debug("Failed to create project: %s" % ret['output'])
raise Exception("Failed to create temporary project for collection. "
"\nAborting...")
+ def _label_sos_project(self):
+ """Add pertinent labels to the temporary project we've created so that
+ our privileged containers can properly run.
+ """
+ labels = [
+ "security.openshift.io/scc.podSecurityLabelSync=false",
+ "pod-security.kubernetes.io/enforce=privileged"
+ ]
+ for label in labels:
+ ret = self.exec_primary_cmd(
+ self.fmt_oc_cmd(
+ f"label namespace {self.project} {label} --overwrite"
+ )
+ )
+ if not ret['status'] == 0:
+ raise Exception(
+ f"Error applying namespace labels: {ret['output']}"
+ )
+
def cleanup(self):
"""Remove the project we created to execute within
"""
@@ -231,8 +251,9 @@ def get_nodes(self):
for node_name, node in self.node_dict.items():
if roles:
for role in roles:
- if role == node['roles']:
+ if role in node['roles']:
nodes.append(node_name)
+ break
else:
nodes.append(node_name)
else:

View File

@ -4,8 +4,8 @@
Summary: A set of tools to gather troubleshooting information from a system
Name: sos
Version: 4.3
Release: 5%{?dist}
Version: 4.5.0
Release: 1%{?dist}
Group: Applications/System
Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz
Source1: sos-audit-%{auditversion}.tgz
@ -14,29 +14,16 @@ BuildArch: noarch
Url: https://github.com/sosreport/sos
BuildRequires: python3-devel
BuildRequires: gettext
Requires: libxml2-python3
#Requires: python3-rpm
BuildRequires: python3-setuptools
Requires: tar
Requires: bzip2
Requires: xz
Requires: python3-requests
Recommends: python3-magic
Recommends: python3-pexpect
Recommends: python3-pyyaml
Conflicts: vdsm < 4.40
Obsoletes: sos-collector <= 1.9
Recommends: python3-pexpect
Recommends: python3-requests
Patch1: sos-bz2055003-rebase-sos-add-sos-help.patch
Patch2: sos-bz2095267-ovirt-answer-files-passwords.patch
Patch3: sos-bz2079491-plugopts-valtype-str.patch
Patch4: sos-bz2066181-tigervnc-update-collections.patch
Patch5: sos-bz2082914-collect-pacemaker-cluster.patch
Patch6: sos-bz2079188-honor-default-plugin-timeout.patch
Patch7: sos-bz2079490-list-plugins-ignore-options.patch
Patch8: sos-bz2079492-timeouted-exec-cmd-exception.patch
Patch9: sos-bz2065563-ocp-backports.patch
Patch10: sos-bz2097674-openshift-ovn-disabled.patch
Patch11: sos-bz2122355-vdsm-set-use-devicesfile-zero.patch
Patch12: sos-bz2122354-forbidden-path-efficient.patch
Patch13: sos-bz2130209-ocp-add-labels-to-namespace.patch
%description
@ -48,19 +35,6 @@ support technicians and developers.
%prep
%setup -qn %{name}-%{version}
%setup -T -D -a1 -q
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%build
%py3_build
@ -83,7 +57,9 @@ cd %{name}-audit-%{auditversion}
DESTDIR=%{buildroot} ./install.sh
cd ..
%files -f %{name}.lang
# internationalization is currently broken. Uncomment this line once fixed.
# %%files -f %%{name}.lang
%files
%{_sbindir}/sos
%{_sbindir}/sosreport
%{_sbindir}/sos-collector
@ -128,15 +104,33 @@ of the system. Currently storage and filesystem commands are audited.
%changelog
* Mon Oct 03 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-5
- [ovn_central] Rename container responsable of Red Hat
Resolves: bz2042966
- [PATCH] [host] Skip entire /etc/sos/cleaner directory
Resolves: bz2023867
* Tue Feb 07 2023 Pavel Moravec <pmoravec@redhat.com> = 4.5.0-1
- Rebase on upstream 4.5.0
Resolves: bz2082615
* Thu Sep 29 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-4
* Thu Nov 03 2022 Pavel Moravec <pmoravec@redhat.com> = 4.4-4
- [ocp] Add newly required labels to temp OCP namespace
Resolves: bz2130209
Resolves: bz2130976
* Fri Oct 28 2022 Pavel Moravec <pmoravec@redhat.com> = 4.4-3
- [cleaner] Apply compile_regexes after a regular parse line
Resolves: bz2138174
* Thu Sep 22 2022 Pavel Moravec <pmoravec@redhat.com> = 4.4-2
- [utilities] Relax from hard dependency of python3-magic
Resolves: bz2126089
- [dnf] Collect legacy yum config symlinks, properly obfuscate pwds
Resolves: bz2125499
* Fri Sep 09 2022 Pavel Moravec <pmoravec@redhat.com> = 4.4-1
- Rebase on upstream 4.4
Resolves: bz2082615
- [redhat] Honour credential-less --upload-url on RedHat distro properly
Resolves: bz2059573
- [md] Restrict data capture to raid members
Resolves: bz2062283
- [sos] Fix unhandled exception when concurrently removing temp dir
Resolves: bz2088440
* Mon Aug 29 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-3
- [vdsm] Set LVM option use_devicesfile=0