import sos-4.3-5.el8
This commit is contained in:
parent
d234a20dd2
commit
7e8c859249
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,2 +1,2 @@
|
|||||||
SOURCES/sos-4.2.tar.gz
|
SOURCES/sos-4.3.tar.gz
|
||||||
SOURCES/sos-audit-0.3.tgz
|
SOURCES/sos-audit-0.3.tgz
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
fe82967b0577076aac104412a9fe35cdb444bde4 SOURCES/sos-4.2.tar.gz
|
6d443271a3eb26af8fb400ed417a4b572730d316 SOURCES/sos-4.3.tar.gz
|
||||||
9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz
|
9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,49 +0,0 @@
|
|||||||
From 66ebb8256b1326573cbcb2d134545635dfead3bc Mon Sep 17 00:00:00 2001
|
|
||||||
From: Jose Castillo <jcastillo@redhat.com>
|
|
||||||
Date: Sun, 29 Aug 2021 15:35:09 +0200
|
|
||||||
Subject: [PATCH] [omnipath_client] Ensure opacapture runs only with
|
|
||||||
allow-system-changes
|
|
||||||
|
|
||||||
While omnipath_client plugin is collecting "opacapture",
|
|
||||||
`depmod -a` command is executed to regenerates some files
|
|
||||||
under /usr/lib/modules/$kernel.
|
|
||||||
|
|
||||||
modules.dep
|
|
||||||
modules.dep.bin
|
|
||||||
modules.devname
|
|
||||||
modules.softdep
|
|
||||||
modules.symbols
|
|
||||||
modules.symbols.bin
|
|
||||||
|
|
||||||
This patch ensures that the command is only run when
|
|
||||||
the option --allow-system-changes is used.
|
|
||||||
|
|
||||||
Fixes: RHBZ#1998433
|
|
||||||
|
|
||||||
Signed-off-by: Jose Castillo <jcastillo@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/omnipath_client.py | 9 +++++++--
|
|
||||||
1 file changed, 7 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/omnipath_client.py b/sos/report/plugins/omnipath_client.py
|
|
||||||
index 1ec01384..4e988c5c 100644
|
|
||||||
--- a/sos/report/plugins/omnipath_client.py
|
|
||||||
+++ b/sos/report/plugins/omnipath_client.py
|
|
||||||
@@ -45,7 +45,12 @@ class OmnipathClient(Plugin, RedHatPlugin):
|
|
||||||
# rather than storing it somewhere under /var/tmp and copying it via
|
|
||||||
# add_copy_spec, add it directly to sos_commands/<plugin> dir by
|
|
||||||
# building a path argument using self.get_cmd_output_path().
|
|
||||||
- self.add_cmd_output("opacapture %s" % join(self.get_cmd_output_path(),
|
|
||||||
- "opacapture.tgz"))
|
|
||||||
+ # This command calls 'depmod -a', so lets make sure we
|
|
||||||
+ # specified the 'allow-system-changes' option before running it.
|
|
||||||
+ if self.get_option('allow_system_changes'):
|
|
||||||
+ self.add_cmd_output("opacapture %s" %
|
|
||||||
+ join(self.get_cmd_output_path(),
|
|
||||||
+ "opacapture.tgz"),
|
|
||||||
+ changes=True)
|
|
||||||
|
|
||||||
# vim: set et ts=4 sw=4 :
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
|||||||
From e2ca3d02f36c0db4efaacfb2c1b7d502f38e371c Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 30 Aug 2021 10:18:29 +0200
|
|
||||||
Subject: [PATCH] [unpackaged] deal with recursive loop of symlinks properly
|
|
||||||
|
|
||||||
When the plugin processes a recursive loop of symlinks, it currently
|
|
||||||
hangs in an infinite loop trying to follow the symlinks. Use
|
|
||||||
pathlib.Path.resolve() method to return the target directly.
|
|
||||||
|
|
||||||
Resolves: #2664
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/unpackaged.py | 5 +++--
|
|
||||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py
|
|
||||||
index e5cc6191..9d68077c 100644
|
|
||||||
--- a/sos/report/plugins/unpackaged.py
|
|
||||||
+++ b/sos/report/plugins/unpackaged.py
|
|
||||||
@@ -10,6 +10,7 @@ from sos.report.plugins import Plugin, RedHatPlugin
|
|
||||||
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
+from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
class Unpackaged(Plugin, RedHatPlugin):
|
|
||||||
@@ -41,8 +42,8 @@ class Unpackaged(Plugin, RedHatPlugin):
|
|
||||||
for name in files:
|
|
||||||
path = os.path.join(root, name)
|
|
||||||
try:
|
|
||||||
- while stat.S_ISLNK(os.lstat(path).st_mode):
|
|
||||||
- path = os.path.abspath(os.readlink(path))
|
|
||||||
+ if stat.S_ISLNK(os.lstat(path).st_mode):
|
|
||||||
+ path = Path(path).resolve()
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
file_list.append(os.path.realpath(path))
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
|||||||
From 7d5157aa5071e3620246e2d4aa80acb2d3ed30f0 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Tue, 28 Sep 2021 22:44:52 +0200
|
|
||||||
Subject: [PATCH] [networking] prevent iptables-save commands to load nf_tables
|
|
||||||
kmod
|
|
||||||
|
|
||||||
If iptables has built-in nf_tables kmod, then
|
|
||||||
'ip netns <foo> iptables-save' command requires the kmod which must
|
|
||||||
be guarded by predicate.
|
|
||||||
|
|
||||||
Analogously for ip6tables.
|
|
||||||
|
|
||||||
Resolves: #2703
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/networking.py | 29 ++++++++++++++++++++++++-----
|
|
||||||
1 file changed, 24 insertions(+), 5 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py
|
|
||||||
index c80ae719..1237f629 100644
|
|
||||||
--- a/sos/report/plugins/networking.py
|
|
||||||
+++ b/sos/report/plugins/networking.py
|
|
||||||
@@ -182,22 +182,41 @@ class Networking(Plugin):
|
|
||||||
# per-namespace.
|
|
||||||
self.add_cmd_output("ip netns")
|
|
||||||
cmd_prefix = "ip netns exec "
|
|
||||||
- for namespace in self.get_network_namespaces(
|
|
||||||
- self.get_option("namespace_pattern"),
|
|
||||||
- self.get_option("namespaces")):
|
|
||||||
+ namespaces = self.get_network_namespaces(
|
|
||||||
+ self.get_option("namespace_pattern"),
|
|
||||||
+ self.get_option("namespaces"))
|
|
||||||
+ if (namespaces):
|
|
||||||
+ # 'ip netns exec <foo> iptables-save' must be guarded by nf_tables
|
|
||||||
+ # kmod, if 'iptables -V' output contains 'nf_tables'
|
|
||||||
+ # analogously for ip6tables
|
|
||||||
+ co = {'cmd': 'iptables -V', 'output': 'nf_tables'}
|
|
||||||
+ co6 = {'cmd': 'ip6tables -V', 'output': 'nf_tables'}
|
|
||||||
+ iptables_with_nft = (SoSPredicate(self, kmods=['nf_tables'])
|
|
||||||
+ if self.test_predicate(self,
|
|
||||||
+ pred=SoSPredicate(self, cmd_outputs=co))
|
|
||||||
+ else None)
|
|
||||||
+ ip6tables_with_nft = (SoSPredicate(self, kmods=['nf_tables'])
|
|
||||||
+ if self.test_predicate(self,
|
|
||||||
+ pred=SoSPredicate(self, cmd_outputs=co6))
|
|
||||||
+ else None)
|
|
||||||
+ for namespace in namespaces:
|
|
||||||
ns_cmd_prefix = cmd_prefix + namespace + " "
|
|
||||||
self.add_cmd_output([
|
|
||||||
ns_cmd_prefix + "ip address show",
|
|
||||||
ns_cmd_prefix + "ip route show table all",
|
|
||||||
ns_cmd_prefix + "ip -s -s neigh show",
|
|
||||||
ns_cmd_prefix + "ip rule list",
|
|
||||||
- ns_cmd_prefix + "iptables-save",
|
|
||||||
- ns_cmd_prefix + "ip6tables-save",
|
|
||||||
ns_cmd_prefix + "netstat %s -neopa" % self.ns_wide,
|
|
||||||
ns_cmd_prefix + "netstat -s",
|
|
||||||
ns_cmd_prefix + "netstat %s -agn" % self.ns_wide,
|
|
||||||
ns_cmd_prefix + "nstat -zas",
|
|
||||||
], priority=50)
|
|
||||||
+ self.add_cmd_output([ns_cmd_prefix + "iptables-save"],
|
|
||||||
+ pred=iptables_with_nft,
|
|
||||||
+ priority=50)
|
|
||||||
+ self.add_cmd_output([ns_cmd_prefix + "ip6tables-save"],
|
|
||||||
+ pred=ip6tables_with_nft,
|
|
||||||
+ priority=50)
|
|
||||||
|
|
||||||
ss_cmd = ns_cmd_prefix + "ss -peaonmi"
|
|
||||||
# --allow-system-changes is handled directly in predicate
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
|||||||
From 23e523b6b9784390c7ce2c5af654ab497fb10aaf Mon Sep 17 00:00:00 2001
|
|
||||||
From: Jose Castillo <jcastillo@redhat.com>
|
|
||||||
Date: Wed, 8 Sep 2021 09:25:24 +0200
|
|
||||||
Subject: [PATCH] [kernel] Capture Pressure Stall Information
|
|
||||||
|
|
||||||
Kernel 4.20 includes PSI metrics for CPU, memeory and IO.
|
|
||||||
The feature is enabled after adding "psi=1" as
|
|
||||||
kernel boot parameter.
|
|
||||||
The information is captured in files
|
|
||||||
in the directory /proc/pressure.
|
|
||||||
|
|
||||||
Signed-off-by: Jose Castillo <jcastillo@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/kernel.py | 3 ++-
|
|
||||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py
|
|
||||||
index 8c5e5e11..803f5e30 100644
|
|
||||||
--- a/sos/report/plugins/kernel.py
|
|
||||||
+++ b/sos/report/plugins/kernel.py
|
|
||||||
@@ -112,7 +112,8 @@ class Kernel(Plugin, IndependentPlugin):
|
|
||||||
"/sys/kernel/debug/extfrag/unusable_index",
|
|
||||||
"/sys/kernel/debug/extfrag/extfrag_index",
|
|
||||||
clocksource_path + "available_clocksource",
|
|
||||||
- clocksource_path + "current_clocksource"
|
|
||||||
+ clocksource_path + "current_clocksource",
|
|
||||||
+ "/proc/pressure/"
|
|
||||||
])
|
|
||||||
|
|
||||||
if self.get_option("with-timer"):
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
|||||||
From 3f0ec3e55e7dcec89dd7fad10084ea7f16178608 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Salvatore Daniele <sdaniele@redhat.com>
|
|
||||||
Date: Tue, 7 Sep 2021 13:48:22 -0400
|
|
||||||
Subject: [PATCH 1/2] [openvswitch] add ovs default OpenFlow protocols
|
|
||||||
|
|
||||||
ovs-vsctl list bridge can return an empty 'protocol' column even when
|
|
||||||
there are OpenFlow protocols in place by default.
|
|
||||||
|
|
||||||
ovs-ofctl --version will return the range of supported ofp and should
|
|
||||||
also be used to ensure flow information for relevant protocol versions
|
|
||||||
is collected.
|
|
||||||
|
|
||||||
OpenFlow default versions:
|
|
||||||
https://docs.openvswitch.org/en/latest/faq/openflow/
|
|
||||||
|
|
||||||
Signed-off-by: Salvatore Daniele <sdaniele@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/openvswitch.py | 26 ++++++++++++++++++++++++++
|
|
||||||
1 file changed, 26 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
|
|
||||||
index cd897db2..92cc7259 100644
|
|
||||||
--- a/sos/report/plugins/openvswitch.py
|
|
||||||
+++ b/sos/report/plugins/openvswitch.py
|
|
||||||
@@ -206,6 +206,7 @@ class OpenVSwitch(Plugin):
|
|
||||||
|
|
||||||
# Gather additional output for each OVS bridge on the host.
|
|
||||||
br_list_result = self.collect_cmd_output("ovs-vsctl -t 5 list-br")
|
|
||||||
+ ofp_ver_result = self.collect_cmd_output("ovs-ofctl -t 5 --version")
|
|
||||||
if br_list_result['status'] == 0:
|
|
||||||
for br in br_list_result['output'].splitlines():
|
|
||||||
self.add_cmd_output([
|
|
||||||
@@ -232,6 +233,16 @@ class OpenVSwitch(Plugin):
|
|
||||||
"OpenFlow15"
|
|
||||||
]
|
|
||||||
|
|
||||||
+ # Flow protocol hex identifiers
|
|
||||||
+ ofp_versions = {
|
|
||||||
+ 0x01: "OpenFlow10",
|
|
||||||
+ 0x02: "OpenFlow11",
|
|
||||||
+ 0x03: "OpenFlow12",
|
|
||||||
+ 0x04: "OpenFlow13",
|
|
||||||
+ 0x05: "OpenFlow14",
|
|
||||||
+ 0x06: "OpenFlow15",
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
# List protocols currently in use, if any
|
|
||||||
ovs_list_bridge_cmd = "ovs-vsctl -t 5 list bridge %s" % br
|
|
||||||
br_info = self.collect_cmd_output(ovs_list_bridge_cmd)
|
|
||||||
@@ -242,6 +253,21 @@ class OpenVSwitch(Plugin):
|
|
||||||
br_protos_ln = line[line.find("[")+1:line.find("]")]
|
|
||||||
br_protos = br_protos_ln.replace('"', '').split(", ")
|
|
||||||
|
|
||||||
+ # If 'list bridge' yeilded no protocols, use the range of
|
|
||||||
+ # protocols enabled by default on this version of ovs.
|
|
||||||
+ if br_protos == [''] and ofp_ver_result['output']:
|
|
||||||
+ ofp_version_range = ofp_ver_result['output'].splitlines()
|
|
||||||
+ ver_range = []
|
|
||||||
+
|
|
||||||
+ for line in ofp_version_range:
|
|
||||||
+ if "OpenFlow versions" in line:
|
|
||||||
+ v = line.split("OpenFlow versions ")[1].split(":")
|
|
||||||
+ ver_range = range(int(v[0], 16), int(v[1], 16)+1)
|
|
||||||
+
|
|
||||||
+ for protocol in ver_range:
|
|
||||||
+ if protocol in ofp_versions:
|
|
||||||
+ br_protos.append(ofp_versions[protocol])
|
|
||||||
+
|
|
||||||
# Collect flow information for relevant protocol versions only
|
|
||||||
for flow in flow_versions:
|
|
||||||
if flow in br_protos:
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
||||||
|
|
||||||
From 5a006024f730213a726c70e82c5ecd2daf685b2b Mon Sep 17 00:00:00 2001
|
|
||||||
From: Salvatore Daniele <sdaniele@redhat.com>
|
|
||||||
Date: Tue, 7 Sep 2021 14:17:19 -0400
|
|
||||||
Subject: [PATCH 2/2] [openvswitch] add commands for offline analysis
|
|
||||||
|
|
||||||
Replicas of ovs-vswitchd and ovsdb-server can be recreated offline
|
|
||||||
using flow, group, and tlv dumps, and ovs conf.db. This allows for
|
|
||||||
offline anaylsis and the use of tools such as ovs-appctl
|
|
||||||
ofproto/trace and ovs-ofctl for debugging.
|
|
||||||
|
|
||||||
This patch ensures this information is available in the sos report.
|
|
||||||
The db is copied rather than collected using ovsdb-client list dump
|
|
||||||
for two reasons:
|
|
||||||
|
|
||||||
ovsdb-client requires interacting with the ovsdb-server which could
|
|
||||||
take it 'down' for some time, and impact large, busy clusters.
|
|
||||||
|
|
||||||
The list-dump is not in a format that can be used to restore the db
|
|
||||||
offline. All of the information in the list dump is available and more
|
|
||||||
by copying the db.
|
|
||||||
|
|
||||||
Signed-off-by: Salvatore Daniele <sdaniele@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/openvswitch.py | 12 ++++++++++--
|
|
||||||
sos/report/plugins/ovn_central.py | 1 +
|
|
||||||
2 files changed, 11 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
|
|
||||||
index 92cc7259..003596c6 100644
|
|
||||||
--- a/sos/report/plugins/openvswitch.py
|
|
||||||
+++ b/sos/report/plugins/openvswitch.py
|
|
||||||
@@ -75,12 +75,19 @@ class OpenVSwitch(Plugin):
|
|
||||||
"/run/openvswitch/ovs-monitor-ipsec.pid"
|
|
||||||
])
|
|
||||||
|
|
||||||
+ self.add_copy_spec([
|
|
||||||
+ path_join('/usr/local/etc/openvswitch', 'conf.db'),
|
|
||||||
+ path_join('/etc/openvswitch', 'conf.db'),
|
|
||||||
+ path_join('/var/lib/openvswitch', 'conf.db'),
|
|
||||||
+ ])
|
|
||||||
+ ovs_dbdir = environ.get('OVS_DBDIR')
|
|
||||||
+ if ovs_dbdir:
|
|
||||||
+ self.add_copy_spec(path_join(ovs_dbdir, 'conf.db'))
|
|
||||||
+
|
|
||||||
self.add_cmd_output([
|
|
||||||
# The '-t 5' adds an upper bound on how long to wait to connect
|
|
||||||
# to the Open vSwitch server, avoiding hangs when running sos.
|
|
||||||
"ovs-vsctl -t 5 show",
|
|
||||||
- # Gather the database.
|
|
||||||
- "ovsdb-client -f list dump",
|
|
||||||
# List the contents of important runtime directories
|
|
||||||
"ls -laZ /run/openvswitch",
|
|
||||||
"ls -laZ /dev/hugepages/",
|
|
||||||
@@ -276,6 +283,7 @@ class OpenVSwitch(Plugin):
|
|
||||||
"ovs-ofctl -O %s dump-groups %s" % (flow, br),
|
|
||||||
"ovs-ofctl -O %s dump-group-stats %s" % (flow, br),
|
|
||||||
"ovs-ofctl -O %s dump-flows %s" % (flow, br),
|
|
||||||
+ "ovs-ofctl -O %s dump-tlv-map %s" % (flow, br),
|
|
||||||
"ovs-ofctl -O %s dump-ports-desc %s" % (flow, br)
|
|
||||||
])
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
|
|
||||||
index a4c483a9..d6647aad 100644
|
|
||||||
--- a/sos/report/plugins/ovn_central.py
|
|
||||||
+++ b/sos/report/plugins/ovn_central.py
|
|
||||||
@@ -138,6 +138,7 @@ class OVNCentral(Plugin):
|
|
||||||
os.path.join('/usr/local/etc/openvswitch', dbfile),
|
|
||||||
os.path.join('/etc/openvswitch', dbfile),
|
|
||||||
os.path.join('/var/lib/openvswitch', dbfile),
|
|
||||||
+ os.path.join('/var/lib/ovn/etc', dbfile),
|
|
||||||
])
|
|
||||||
if ovs_dbdir:
|
|
||||||
self.add_copy_spec(os.path.join(ovs_dbdir, dbfile))
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,303 +0,0 @@
|
|||||||
From 2ab8ba3ecbd52e452cc554d515e0782801dcb4b6 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Wed, 8 Sep 2021 15:31:48 +0200
|
|
||||||
Subject: [PATCH] [firewalld] collect nft rules in firewall_tables only
|
|
||||||
|
|
||||||
We collect 'nft list ruleset' in both plugins, while:
|
|
||||||
- nft is not shipped by firewalld package, so we should not collect
|
|
||||||
it in firewalld plugin
|
|
||||||
- running the command requires both nf_tables and nfnetlink kmods, so
|
|
||||||
we should use both kmods in the predicate
|
|
||||||
|
|
||||||
Resolves: #2679
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/firewall_tables.py | 9 +++++----
|
|
||||||
sos/report/plugins/firewalld.py | 8 +-------
|
|
||||||
2 files changed, 6 insertions(+), 11 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
|
|
||||||
index 56058d3bf9..63a7dddeb5 100644
|
|
||||||
--- a/sos/report/plugins/firewall_tables.py
|
|
||||||
+++ b/sos/report/plugins/firewall_tables.py
|
|
||||||
@@ -40,10 +40,11 @@ def collect_nftables(self):
|
|
||||||
""" Collects nftables rulesets with 'nft' commands if the modules
|
|
||||||
are present """
|
|
||||||
|
|
||||||
- self.add_cmd_output(
|
|
||||||
- "nft list ruleset",
|
|
||||||
- pred=SoSPredicate(self, kmods=['nf_tables'])
|
|
||||||
- )
|
|
||||||
+ # collect nftables ruleset
|
|
||||||
+ nft_pred = SoSPredicate(self,
|
|
||||||
+ kmods=['nf_tables', 'nfnetlink'],
|
|
||||||
+ required={'kmods': 'all'})
|
|
||||||
+ self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
# collect iptables -t for any existing table, if we can't read the
|
|
||||||
diff --git a/sos/report/plugins/firewalld.py b/sos/report/plugins/firewalld.py
|
|
||||||
index ec83527ed7..9401bfd239 100644
|
|
||||||
--- a/sos/report/plugins/firewalld.py
|
|
||||||
+++ b/sos/report/plugins/firewalld.py
|
|
||||||
@@ -9,7 +9,7 @@
|
|
||||||
#
|
|
||||||
# See the LICENSE file in the source distribution for further information.
|
|
||||||
|
|
||||||
-from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
|
|
||||||
+from sos.report.plugins import Plugin, RedHatPlugin
|
|
||||||
|
|
||||||
|
|
||||||
class FirewallD(Plugin, RedHatPlugin):
|
|
||||||
@@ -35,12 +35,6 @@ def setup(self):
|
|
||||||
"/var/log/firewalld",
|
|
||||||
])
|
|
||||||
|
|
||||||
- # collect nftables ruleset
|
|
||||||
- nft_pred = SoSPredicate(self,
|
|
||||||
- kmods=['nf_tables', 'nfnetlink'],
|
|
||||||
- required={'kmods': 'all'})
|
|
||||||
- self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
|
|
||||||
-
|
|
||||||
# use a 10s timeout to workaround dbus problems in
|
|
||||||
# docker containers.
|
|
||||||
self.add_cmd_output([
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
||||||
|
|
||||||
From 2a7cf53b61943907dc823cf893530b620a87946c Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 15 Oct 2021 22:31:36 +0200
|
|
||||||
Subject: [PATCH 1/3] [report] Use log_skipped_cmd method inside
|
|
||||||
collect_cmd_output
|
|
||||||
|
|
||||||
Also, remove obsolete parameters of the log_skipped_cmd method.
|
|
||||||
|
|
||||||
Related: #2724
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/__init__.py | 26 ++++++++------------------
|
|
||||||
1 file changed, 8 insertions(+), 18 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index ec138f83..b60ab5f6 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -876,8 +876,7 @@ class Plugin():
|
|
||||||
return bool(pred)
|
|
||||||
return False
|
|
||||||
|
|
||||||
- def log_skipped_cmd(self, pred, cmd, kmods=False, services=False,
|
|
||||||
- changes=False):
|
|
||||||
+ def log_skipped_cmd(self, cmd, pred, changes=False):
|
|
||||||
"""Log that a command was skipped due to predicate evaluation.
|
|
||||||
|
|
||||||
Emit a warning message indicating that a command was skipped due
|
|
||||||
@@ -887,21 +886,17 @@ class Plugin():
|
|
||||||
message indicating that the missing data can be collected by using
|
|
||||||
the "--allow-system-changes" command line option will be included.
|
|
||||||
|
|
||||||
- :param pred: The predicate that caused the command to be skipped
|
|
||||||
- :type pred: ``SoSPredicate``
|
|
||||||
-
|
|
||||||
:param cmd: The command that was skipped
|
|
||||||
:type cmd: ``str``
|
|
||||||
|
|
||||||
- :param kmods: Did kernel modules cause the command to be skipped
|
|
||||||
- :type kmods: ``bool``
|
|
||||||
-
|
|
||||||
- :param services: Did services cause the command to be skipped
|
|
||||||
- :type services: ``bool``
|
|
||||||
+ :param pred: The predicate that caused the command to be skipped
|
|
||||||
+ :type pred: ``SoSPredicate``
|
|
||||||
|
|
||||||
:param changes: Is the `--allow-system-changes` enabled
|
|
||||||
:type changes: ``bool``
|
|
||||||
"""
|
|
||||||
+ if pred is None:
|
|
||||||
+ pred = SoSPredicate(self)
|
|
||||||
msg = "skipped command '%s': %s" % (cmd, pred.report_failure())
|
|
||||||
|
|
||||||
if changes:
|
|
||||||
@@ -1700,9 +1693,7 @@ class Plugin():
|
|
||||||
self.collect_cmds.append(soscmd)
|
|
||||||
self._log_info("added cmd output '%s'" % soscmd.cmd)
|
|
||||||
else:
|
|
||||||
- self.log_skipped_cmd(pred, soscmd.cmd, kmods=bool(pred.kmods),
|
|
||||||
- services=bool(pred.services),
|
|
||||||
- changes=soscmd.changes)
|
|
||||||
+ self.log_skipped_cmd(soscmd.cmd, pred, changes=soscmd.changes)
|
|
||||||
|
|
||||||
def add_cmd_output(self, cmds, suggest_filename=None,
|
|
||||||
root_symlink=None, timeout=None, stderr=True,
|
|
||||||
@@ -2112,7 +2103,7 @@ class Plugin():
|
|
||||||
root_symlink=False, timeout=None,
|
|
||||||
stderr=True, chroot=True, runat=None, env=None,
|
|
||||||
binary=False, sizelimit=None, pred=None,
|
|
||||||
- subdir=None, tags=[]):
|
|
||||||
+ changes=False, subdir=None, tags=[]):
|
|
||||||
"""Execute a command and save the output to a file for inclusion in the
|
|
||||||
report, then return the results for further use by the plugin
|
|
||||||
|
|
||||||
@@ -2163,8 +2154,7 @@ class Plugin():
|
|
||||||
:rtype: ``dict``
|
|
||||||
"""
|
|
||||||
if not self.test_predicate(cmd=True, pred=pred):
|
|
||||||
- self._log_info("skipped cmd output '%s' due to predicate (%s)" %
|
|
||||||
- (cmd, self.get_predicate(cmd=True, pred=pred)))
|
|
||||||
+ self.log_skipped_cmd(cmd, pred, changes=changes)
|
|
||||||
return {
|
|
||||||
'status': None, # don't match on if result['status'] checks
|
|
||||||
'output': '',
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
||||||
|
|
||||||
From 6b1bea0ffb1df7f8e5001b06cf25f0741b007ddd Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 15 Oct 2021 22:34:01 +0200
|
|
||||||
Subject: [PATCH 2/3] [firewall_tables] call iptables -t <table> based on nft
|
|
||||||
list
|
|
||||||
|
|
||||||
If iptables are not realy in use, calling iptables -t <table>
|
|
||||||
would load corresponding nft table.
|
|
||||||
|
|
||||||
Therefore, call iptables -t only for the tables from "nft list ruleset"
|
|
||||||
output.
|
|
||||||
|
|
||||||
Example: nft list ruleset contains
|
|
||||||
|
|
||||||
table ip mangle {
|
|
||||||
..
|
|
||||||
}
|
|
||||||
|
|
||||||
so we can collect iptable -t mangle -nvL .
|
|
||||||
|
|
||||||
The same applies to ip6tables as well.
|
|
||||||
|
|
||||||
Resolves: #2724
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/firewall_tables.py | 29 ++++++++++++++++++++-------
|
|
||||||
1 file changed, 22 insertions(+), 7 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
|
|
||||||
index 63a7ddde..ef04d939 100644
|
|
||||||
--- a/sos/report/plugins/firewall_tables.py
|
|
||||||
+++ b/sos/report/plugins/firewall_tables.py
|
|
||||||
@@ -44,26 +44,41 @@ class firewall_tables(Plugin, IndependentPlugin):
|
|
||||||
nft_pred = SoSPredicate(self,
|
|
||||||
kmods=['nf_tables', 'nfnetlink'],
|
|
||||||
required={'kmods': 'all'})
|
|
||||||
- self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
|
|
||||||
+ return self.collect_cmd_output("nft list ruleset", pred=nft_pred,
|
|
||||||
+ changes=True)
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
+ # first, collect "nft list ruleset" as collecting commands like
|
|
||||||
+ # ip6tables -t mangle -nvL
|
|
||||||
+ # depends on its output
|
|
||||||
+ # store in nft_ip_tables lists of ip[|6] tables from nft list
|
|
||||||
+ nft_list = self.collect_nftables()
|
|
||||||
+ nft_ip_tables = {'ip': [], 'ip6': []}
|
|
||||||
+ nft_lines = nft_list['output'] if nft_list['status'] == 0 else ''
|
|
||||||
+ for line in nft_lines.splitlines():
|
|
||||||
+ words = line.split()[0:3]
|
|
||||||
+ if len(words) == 3 and words[0] == 'table' and \
|
|
||||||
+ words[1] in nft_ip_tables.keys():
|
|
||||||
+ nft_ip_tables[words[1]].append(words[2])
|
|
||||||
# collect iptables -t for any existing table, if we can't read the
|
|
||||||
# tables, collect 2 default ones (mangle, filter)
|
|
||||||
+ # do collect them only when relevant nft list ruleset exists
|
|
||||||
+ default_ip_tables = "mangle\nfilter\n"
|
|
||||||
try:
|
|
||||||
ip_tables_names = open("/proc/net/ip_tables_names").read()
|
|
||||||
except IOError:
|
|
||||||
- ip_tables_names = "mangle\nfilter\n"
|
|
||||||
+ ip_tables_names = default_ip_tables
|
|
||||||
for table in ip_tables_names.splitlines():
|
|
||||||
- self.collect_iptable(table)
|
|
||||||
+ if nft_list['status'] == 0 and table in nft_ip_tables['ip']:
|
|
||||||
+ self.collect_iptable(table)
|
|
||||||
# collect the same for ip6tables
|
|
||||||
try:
|
|
||||||
ip_tables_names = open("/proc/net/ip6_tables_names").read()
|
|
||||||
except IOError:
|
|
||||||
- ip_tables_names = "mangle\nfilter\n"
|
|
||||||
+ ip_tables_names = default_ip_tables
|
|
||||||
for table in ip_tables_names.splitlines():
|
|
||||||
- self.collect_ip6table(table)
|
|
||||||
-
|
|
||||||
- self.collect_nftables()
|
|
||||||
+ if nft_list['status'] == 0 and table in nft_ip_tables['ip6']:
|
|
||||||
+ self.collect_ip6table(table)
|
|
||||||
|
|
||||||
# When iptables is called it will load the modules
|
|
||||||
# iptables_filter (for kernel <= 3) or
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
||||||
|
|
||||||
From 464bd2d2e83f203e369f2ba7671bbb7da53e06f6 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Sun, 24 Oct 2021 16:00:31 +0200
|
|
||||||
Subject: [PATCH 3/3] [firewall_tables] Call iptables only when nft ip filter
|
|
||||||
table exists
|
|
||||||
|
|
||||||
iptables -vnxL creates nft 'ip filter' table if it does not exist, hence
|
|
||||||
we must guard iptables execution by presence of the nft table.
|
|
||||||
|
|
||||||
An equivalent logic applies to ip6tables.
|
|
||||||
|
|
||||||
Resolves: #2724
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/firewall_tables.py | 26 ++++++++++++++------------
|
|
||||||
1 file changed, 14 insertions(+), 12 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
|
|
||||||
index ef04d939..7eafd60f 100644
|
|
||||||
--- a/sos/report/plugins/firewall_tables.py
|
|
||||||
+++ b/sos/report/plugins/firewall_tables.py
|
|
||||||
@@ -80,19 +80,21 @@ class firewall_tables(Plugin, IndependentPlugin):
|
|
||||||
if nft_list['status'] == 0 and table in nft_ip_tables['ip6']:
|
|
||||||
self.collect_ip6table(table)
|
|
||||||
|
|
||||||
- # When iptables is called it will load the modules
|
|
||||||
- # iptables_filter (for kernel <= 3) or
|
|
||||||
- # nf_tables (for kernel >= 4) if they are not loaded.
|
|
||||||
+ # When iptables is called it will load:
|
|
||||||
+ # 1) the modules iptables_filter (for kernel <= 3) or
|
|
||||||
+ # nf_tables (for kernel >= 4) if they are not loaded.
|
|
||||||
+ # 2) nft 'ip filter' table will be created
|
|
||||||
# The same goes for ipv6.
|
|
||||||
- self.add_cmd_output(
|
|
||||||
- "iptables -vnxL",
|
|
||||||
- pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables'])
|
|
||||||
- )
|
|
||||||
-
|
|
||||||
- self.add_cmd_output(
|
|
||||||
- "ip6tables -vnxL",
|
|
||||||
- pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables'])
|
|
||||||
- )
|
|
||||||
+ if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip']:
|
|
||||||
+ self.add_cmd_output(
|
|
||||||
+ "iptables -vnxL",
|
|
||||||
+ pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables'])
|
|
||||||
+ )
|
|
||||||
+ if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip6']:
|
|
||||||
+ self.add_cmd_output(
|
|
||||||
+ "ip6tables -vnxL",
|
|
||||||
+ pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables'])
|
|
||||||
+ )
|
|
||||||
|
|
||||||
self.add_copy_spec([
|
|
||||||
"/etc/nftables",
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
|||||||
From b09ed75b09075d86c184b0a63cce9260f2cee4ca Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 30 Aug 2021 11:27:48 +0200
|
|
||||||
Subject: [PATCH] [processor] Apply sizelimit to /sys/devices/system/cpu/cpuX
|
|
||||||
|
|
||||||
Copy /sys/devices/system/cpu/cpuX with separately applied sizelimit.
|
|
||||||
|
|
||||||
This is required for systems with tens/hundreds of CPUs where the
|
|
||||||
cumulative directory size exceeds 25MB or even 100MB.
|
|
||||||
|
|
||||||
Resolves: #2639
|
|
||||||
Closes: #2665
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/processor.py | 9 ++++++++-
|
|
||||||
1 file changed, 8 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py
|
|
||||||
index 0ddfd126..2df2dc9a 100644
|
|
||||||
--- a/sos/report/plugins/processor.py
|
|
||||||
+++ b/sos/report/plugins/processor.py
|
|
||||||
@@ -7,6 +7,7 @@
|
|
||||||
# See the LICENSE file in the source distribution for further information.
|
|
||||||
|
|
||||||
from sos.report.plugins import Plugin, IndependentPlugin
|
|
||||||
+import os
|
|
||||||
|
|
||||||
|
|
||||||
class Processor(Plugin, IndependentPlugin):
|
|
||||||
@@ -34,7 +35,13 @@ class Processor(Plugin, IndependentPlugin):
|
|
||||||
self.add_copy_spec([
|
|
||||||
"/proc/cpuinfo",
|
|
||||||
"/sys/class/cpuid",
|
|
||||||
- "/sys/devices/system/cpu"
|
|
||||||
+ ])
|
|
||||||
+ # copy /sys/devices/system/cpu/cpuX with separately applied sizelimit
|
|
||||||
+ # this is required for systems with tens/hundreds of CPUs where the
|
|
||||||
+ # cumulative directory size exceeds 25MB or even 100MB.
|
|
||||||
+ cdirs = self.listdir('/sys/devices/system/cpu')
|
|
||||||
+ self.add_copy_spec([
|
|
||||||
+ os.path.join('/sys/devices/system/cpu', cdir) for cdir in cdirs
|
|
||||||
])
|
|
||||||
|
|
||||||
self.add_cmd_output([
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
|||||||
From 5a9458d318302c1caef862a868745fc8bdf5c741 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 4 Oct 2021 15:52:36 +0200
|
|
||||||
Subject: [PATCH] [foreman] Collect puma status and stats
|
|
||||||
|
|
||||||
Collect foreman-puma-status and 'pumactl [gc-|]stats', optionally using
|
|
||||||
SCL (if detected).
|
|
||||||
|
|
||||||
Resolves: #2712
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/foreman.py | 21 ++++++++++++++++++++-
|
|
||||||
1 file changed, 20 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py
|
|
||||||
index 4539f12b..351794f4 100644
|
|
||||||
--- a/sos/report/plugins/foreman.py
|
|
||||||
+++ b/sos/report/plugins/foreman.py
|
|
||||||
@@ -13,6 +13,7 @@ from sos.report.plugins import (Plugin,
|
|
||||||
UbuntuPlugin)
|
|
||||||
from pipes import quote
|
|
||||||
from re import match
|
|
||||||
+from sos.utilities import is_executable
|
|
||||||
|
|
||||||
|
|
||||||
class Foreman(Plugin):
|
|
||||||
@@ -26,7 +27,9 @@ class Foreman(Plugin):
|
|
||||||
option_list = [
|
|
||||||
('months', 'number of months for dynflow output', 'fast', 1),
|
|
||||||
('proxyfeatures', 'collect features of smart proxies', 'slow', False),
|
|
||||||
+ ('puma-gc', 'collect Puma GC stats', 'fast', False),
|
|
||||||
]
|
|
||||||
+ pumactl = 'pumactl %s -S /usr/share/foreman/tmp/puma.state'
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
# for external DB, search in /etc/foreman/database.yml for:
|
|
||||||
@@ -134,6 +138,17 @@ class Foreman(Plugin):
|
|
||||||
suggest_filename='dynflow_sidekiq_status')
|
|
||||||
self.add_journal(units="dynflow-sidekiq@*")
|
|
||||||
|
|
||||||
+ # Puma stats & status, i.e. foreman-puma-stats, then
|
|
||||||
+ # pumactl stats -S /usr/share/foreman/tmp/puma.state
|
|
||||||
+ # and optionally also gc-stats
|
|
||||||
+ # if on RHEL with Software Collections, wrap the commands accordingly
|
|
||||||
+ if self.get_option('puma-gc'):
|
|
||||||
+ self.add_cmd_output(self.pumactl % 'gc-stats',
|
|
||||||
+ suggest_filename='pumactl_gc-stats')
|
|
||||||
+ self.add_cmd_output(self.pumactl % 'stats',
|
|
||||||
+ suggest_filename='pumactl_stats')
|
|
||||||
+ self.add_cmd_output('/usr/sbin/foreman-puma-status')
|
|
||||||
+
|
|
||||||
# collect tables sizes, ordered
|
|
||||||
_cmd = self.build_query_cmd(
|
|
||||||
"SELECT table_name, pg_size_pretty(total_bytes) AS total, "
|
|
||||||
@@ -297,6 +312,10 @@ class RedHatForeman(Foreman, RedHatPlugin):
|
|
||||||
self.add_file_tags({
|
|
||||||
'/usr/share/foreman/.ssh/ssh_config': 'ssh_foreman_config',
|
|
||||||
})
|
|
||||||
+ # if we are on RHEL7 with scl, wrap some Puma commands by
|
|
||||||
+ # scl enable tfm 'command'
|
|
||||||
+ if self.policy.dist_version() == 7 and is_executable('scl'):
|
|
||||||
+ self.pumactl = "scl enable tfm '%s'" % self.pumactl
|
|
||||||
|
|
||||||
super(RedHatForeman, self).setup()
|
|
||||||
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
|||||||
From e56b3ea999731b831ebba80cf367e43e65c12b62 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 4 Oct 2021 14:43:08 +0200
|
|
||||||
Subject: [PATCH] [report] Overwrite pred=None before refering predicate
|
|
||||||
attributes
|
|
||||||
|
|
||||||
During a dry run, add_journal method sets pred=None whilst log_skipped_cmd
|
|
||||||
refers to predicate attributes. In that case, replace None predicate
|
|
||||||
by a default / empty predicate.
|
|
||||||
|
|
||||||
Resolves: #2711
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/__init__.py | 2 ++
|
|
||||||
1 file changed, 2 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index 3c2b64d9..c635b8de 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -1693,6 +1693,8 @@ class Plugin():
|
|
||||||
def _add_cmd_output(self, **kwargs):
|
|
||||||
"""Internal helper to add a single command to the collection list."""
|
|
||||||
pred = kwargs.pop('pred') if 'pred' in kwargs else SoSPredicate(self)
|
|
||||||
+ if pred is None:
|
|
||||||
+ pred = SoSPredicate(self)
|
|
||||||
if 'priority' not in kwargs:
|
|
||||||
kwargs['priority'] = 10
|
|
||||||
if 'changes' not in kwargs:
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
|||||||
From a93e118a9c88df52fd2c701d2276185f877d565c Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Wed, 3 Nov 2021 16:07:15 +0100
|
|
||||||
Subject: [PATCH] [report] shutdown threads for timeouted plugins
|
|
||||||
|
|
||||||
Wait for shutting down threads of timeouted plugins, to prevent
|
|
||||||
them in writing to moved auxiliary files like sos_logs/sos.log
|
|
||||||
|
|
||||||
Resolves: #2722
|
|
||||||
Closes: #2746
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/__init__.py | 1 +
|
|
||||||
1 file changed, 1 insertion(+)
|
|
||||||
|
|
||||||
diff --git a/sos/report/__init__.py b/sos/report/__init__.py
|
|
||||||
index 1b5bc97d..ef86b28d 100644
|
|
||||||
--- a/sos/report/__init__.py
|
|
||||||
+++ b/sos/report/__init__.py
|
|
||||||
@@ -1046,6 +1046,7 @@ class SoSReport(SoSComponent):
|
|
||||||
self.ui_log.error("\n Plugin %s timed out\n" % plugin[1])
|
|
||||||
self.running_plugs.remove(plugin[1])
|
|
||||||
self.loaded_plugins[plugin[0]-1][1].set_timeout_hit()
|
|
||||||
+ pool.shutdown(wait=True)
|
|
||||||
pool._threads.clear()
|
|
||||||
if self.opts.estimate_only:
|
|
||||||
from pathlib import Path
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,91 +0,0 @@
|
|||||||
From 3fea9a564c4112d04f6324df0d8b212e78feb5b3 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
Date: Wed, 3 Nov 2021 11:02:54 -0400
|
|
||||||
Subject: [PATCH] [Plugin] Ensure specific plugin timeouts are only set for
|
|
||||||
that plugin
|
|
||||||
|
|
||||||
It was discovered that setting a specific plugin timeout via the `-k
|
|
||||||
$plugin.timeout` option could influence the timeout setting for other
|
|
||||||
plugins that are not also having their timeout explicitly set. Fix this
|
|
||||||
by moving the default plugin opts into `Plugin.__init__()` so that each
|
|
||||||
plugin is ensured a private copy of these default plugin options.
|
|
||||||
|
|
||||||
Additionally, add more timeout data to plugin manifest entries to allow
|
|
||||||
for better tracking of this setting.
|
|
||||||
|
|
||||||
Adds a test case for this scenario.
|
|
||||||
|
|
||||||
Closes: #2744
|
|
||||||
|
|
||||||
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/__init__.py | 2 +-
|
|
||||||
sos/report/plugins/__init__.py | 28 +++++++++++++------
|
|
||||||
tests/vendor_tests/redhat/rhbz2018033.py | 35 ++++++++++++++++++++++++
|
|
||||||
3 files changed, 55 insertions(+), 10 deletions(-)
|
|
||||||
create mode 100644 tests/vendor_tests/redhat/rhbz2018033.py
|
|
||||||
|
|
||||||
diff --git a/sos/report/__init__.py b/sos/report/__init__.py
|
|
||||||
index ef86b28d..c95e6300 100644
|
|
||||||
--- a/sos/report/__init__.py
|
|
||||||
+++ b/sos/report/__init__.py
|
|
||||||
@@ -766,7 +766,7 @@ class SoSReport(SoSComponent):
|
|
||||||
if self.all_options:
|
|
||||||
self.ui_log.info(_("The following options are available for ALL "
|
|
||||||
"plugins:"))
|
|
||||||
- for opt in self.all_options[0][0]._default_plug_opts:
|
|
||||||
+ for opt in self.all_options[0][0].get_default_plugin_opts():
|
|
||||||
val = opt[3]
|
|
||||||
if val == -1:
|
|
||||||
val = TIMEOUT_DEFAULT
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index 49f1af27..3e717993 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -474,12 +474,6 @@ class Plugin(object):
|
|
||||||
# Default predicates
|
|
||||||
predicate = None
|
|
||||||
cmd_predicate = None
|
|
||||||
- _default_plug_opts = [
|
|
||||||
- ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1),
|
|
||||||
- ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1),
|
|
||||||
- ('postproc', 'Enable post-processing collected plugin data', 'fast',
|
|
||||||
- True)
|
|
||||||
- ]
|
|
||||||
|
|
||||||
def __init__(self, commons):
|
|
||||||
|
|
||||||
@@ -506,7 +500,7 @@ class Plugin(object):
|
|
||||||
else logging.getLogger('sos')
|
|
||||||
|
|
||||||
# add the default plugin opts
|
|
||||||
- self.option_list.extend(self._default_plug_opts)
|
|
||||||
+ self.option_list.extend(self.get_default_plugin_opts())
|
|
||||||
|
|
||||||
# get the option list into a dictionary
|
|
||||||
for opt in self.option_list:
|
|
||||||
@@ -591,6 +583,14 @@ class Plugin():
|
|
||||||
# Initialise the default --dry-run predicate
|
|
||||||
self.set_predicate(SoSPredicate(self))
|
|
||||||
|
|
||||||
+ def get_default_plugin_opts(self):
|
|
||||||
+ return [
|
|
||||||
+ ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1),
|
|
||||||
+ ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1),
|
|
||||||
+ ('postproc', 'Enable post-processing collected plugin data', 'fast',
|
|
||||||
+ True)
|
|
||||||
+ ]
|
|
||||||
+
|
|
||||||
def set_plugin_manifest(self, manifest):
|
|
||||||
"""Pass in a manifest object to the plugin to write to
|
|
||||||
|
|
||||||
@@ -547,7 +541,9 @@ class Plugin(object):
|
|
||||||
self.manifest.add_field('setup_start', '')
|
|
||||||
self.manifest.add_field('setup_end', '')
|
|
||||||
self.manifest.add_field('setup_time', '')
|
|
||||||
+ self.manifest.add_field('timeout', self.timeout)
|
|
||||||
self.manifest.add_field('timeout_hit', False)
|
|
||||||
+ self.manifest.add_field('command_timeout', self.cmdtimeout)
|
|
||||||
self.manifest.add_list('commands', [])
|
|
||||||
self.manifest.add_list('files', [])
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
|||||||
From 568eb2fbcf74ecad00d5c06989f55f8a6a9e3516 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Thu, 4 Nov 2021 23:14:21 +0100
|
|
||||||
Subject: [PATCH] [report] fix filter_namespace per pattern
|
|
||||||
|
|
||||||
Curently, -k networking.namespace_pattern=.. is broken as the R.E. test
|
|
||||||
forgets to add the namespace in case of positive match.
|
|
||||||
|
|
||||||
Also ensure both plugopts namespace_pattern and namespaces work
|
|
||||||
together.
|
|
||||||
|
|
||||||
Resolves: #2748
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/__init__.py | 15 +++++++--------
|
|
||||||
1 file changed, 7 insertions(+), 8 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index 3e717993..a0d4e95d 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -2953,21 +2953,20 @@ class Plugin():
|
|
||||||
)
|
|
||||||
for ns in ns_list:
|
|
||||||
# if ns_pattern defined, skip namespaces not matching the pattern
|
|
||||||
- if ns_pattern:
|
|
||||||
- if not bool(re.match(pattern, ns)):
|
|
||||||
- continue
|
|
||||||
+ if ns_pattern and not bool(re.match(pattern, ns)):
|
|
||||||
+ continue
|
|
||||||
+ out_ns.append(ns)
|
|
||||||
|
|
||||||
- # if ns_max is defined at all, limit returned list to that number
|
|
||||||
+ # if ns_max is defined at all, break the loop when the limit is
|
|
||||||
+ # reached
|
|
||||||
# this allows the use of both '0' and `None` to mean unlimited
|
|
||||||
- elif ns_max:
|
|
||||||
- out_ns.append(ns)
|
|
||||||
+ if ns_max:
|
|
||||||
if len(out_ns) == ns_max:
|
|
||||||
self._log_warn("Limiting namespace iteration "
|
|
||||||
"to first %s namespaces found"
|
|
||||||
% ns_max)
|
|
||||||
break
|
|
||||||
- else:
|
|
||||||
- out_ns.append(ns)
|
|
||||||
+
|
|
||||||
return out_ns
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
@ -1,46 +0,0 @@
|
|||||||
From f2cc67750f55a71edff0c527a1bfc14fde8132c3 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Mamatha Inamdar <mamatha4@linux.vnet.ibm.com>
|
|
||||||
Date: Mon, 8 Nov 2021 10:50:03 +0530
|
|
||||||
Subject: [PATCH] [nvidia]:Patch to update nvidia plugin for GPU info
|
|
||||||
|
|
||||||
This patch is to update nvidia plugin to collect
|
|
||||||
logs for Nvidia GPUs
|
|
||||||
|
|
||||||
Signed-off-by: Mamatha Inamdar <mamatha4@linux.vnet.ibm.com>
|
|
||||||
Reported-by: Borislav Stoymirski <borislav.stoymirski@bg.ibm.com>
|
|
||||||
Reported-by: Yesenia Jimenez <yesenia@us.ibm.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/nvidia.py | 15 +++++++++++++--
|
|
||||||
1 file changed, 13 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/nvidia.py b/sos/report/plugins/nvidia.py
|
|
||||||
index 09aaf586b..9e21b478e 100644
|
|
||||||
--- a/sos/report/plugins/nvidia.py
|
|
||||||
+++ b/sos/report/plugins/nvidia.py
|
|
||||||
@@ -23,13 +23,24 @@ def setup(self):
|
|
||||||
'--list-gpus',
|
|
||||||
'-q -d PERFORMANCE',
|
|
||||||
'-q -d SUPPORTED_CLOCKS',
|
|
||||||
- '-q -d PAGE_RETIREMENT'
|
|
||||||
+ '-q -d PAGE_RETIREMENT',
|
|
||||||
+ '-q',
|
|
||||||
+ '-q -d ECC',
|
|
||||||
+ 'nvlink -s',
|
|
||||||
+ 'nvlink -e'
|
|
||||||
]
|
|
||||||
|
|
||||||
self.add_cmd_output(["nvidia-smi %s" % cmd for cmd in subcmds])
|
|
||||||
|
|
||||||
query = ('gpu_name,gpu_bus_id,vbios_version,temperature.gpu,'
|
|
||||||
- 'utilization.gpu,memory.total,memory.free,memory.used')
|
|
||||||
+ 'utilization.gpu,memory.total,memory.free,memory.used,'
|
|
||||||
+ 'clocks.applications.graphics,clocks.applications.memory')
|
|
||||||
+ querypages = ('timestamp,gpu_bus_id,gpu_serial,gpu_uuid,'
|
|
||||||
+ 'retired_pages.address,retired_pages.cause')
|
|
||||||
self.add_cmd_output("nvidia-smi --query-gpu=%s --format=csv" % query)
|
|
||||||
+ self.add_cmd_output(
|
|
||||||
+ "nvidia-smi --query-retired-pages=%s --format=csv" % querypages
|
|
||||||
+ )
|
|
||||||
+ self.add_journal(boot=0, identifier='nvidia-persistenced')
|
|
||||||
|
|
||||||
# vim: set et ts=4 sw=4 :
|
|
@ -1,224 +0,0 @@
|
|||||||
From 2e8b5e2d4f30854cce93d149fc7d24b9d9cfd02c Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 19 Nov 2021 16:16:07 +0100
|
|
||||||
Subject: [PATCH 1/3] [policies] strip path from SFTP upload filename
|
|
||||||
|
|
||||||
When case_id is not supplied, we ask SFTP server to store the uploaded
|
|
||||||
file under name /var/tmp/<tarball>, which is confusing.
|
|
||||||
|
|
||||||
Let remove the path from it also in case_id not supplied.
|
|
||||||
|
|
||||||
Related to: #2764
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/distros/redhat.py | 6 +++---
|
|
||||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
|
|
||||||
index 3476e21fb..8817fc785 100644
|
|
||||||
--- a/sos/policies/distros/redhat.py
|
|
||||||
+++ b/sos/policies/distros/redhat.py
|
|
||||||
@@ -269,10 +269,10 @@ def _get_sftp_upload_name(self):
|
|
||||||
"""The RH SFTP server will only automatically connect file uploads to
|
|
||||||
cases if the filename _starts_ with the case number
|
|
||||||
"""
|
|
||||||
+ fname = self.upload_archive_name.split('/')[-1]
|
|
||||||
if self.case_id:
|
|
||||||
- return "%s_%s" % (self.case_id,
|
|
||||||
- self.upload_archive_name.split('/')[-1])
|
|
||||||
- return self.upload_archive_name
|
|
||||||
+ return "%s_%s" % (self.case_id, fname)
|
|
||||||
+ return fname
|
|
||||||
|
|
||||||
def upload_sftp(self):
|
|
||||||
"""Override the base upload_sftp to allow for setting an on-demand
|
|
||||||
|
|
||||||
From 61023b29a656dd7afaa4a0643368b0a53f1a3779 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 19 Nov 2021 17:31:31 +0100
|
|
||||||
Subject: [PATCH 2/3] [redhat] update SFTP API version to v2
|
|
||||||
|
|
||||||
Change API version from v1 to v2, which includes:
|
|
||||||
- change of URL
|
|
||||||
- different URI
|
|
||||||
- POST method for token generation instead of GET
|
|
||||||
|
|
||||||
Resolves: #2764
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/distros/redhat.py | 10 +++++-----
|
|
||||||
1 file changed, 5 insertions(+), 5 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
|
|
||||||
index 8817fc785..e4e2b8835 100644
|
|
||||||
--- a/sos/policies/distros/redhat.py
|
|
||||||
+++ b/sos/policies/distros/redhat.py
|
|
||||||
@@ -175,7 +175,7 @@ def get_tmp_dir(self, opt_tmp_dir):
|
|
||||||
No changes will be made to system configuration.
|
|
||||||
"""
|
|
||||||
|
|
||||||
-RH_API_HOST = "https://access.redhat.com"
|
|
||||||
+RH_API_HOST = "https://api.access.redhat.com"
|
|
||||||
RH_SFTP_HOST = "sftp://sftp.access.redhat.com"
|
|
||||||
|
|
||||||
|
|
||||||
@@ -287,12 +287,12 @@ def upload_sftp(self):
|
|
||||||
" for obtaining SFTP auth token.")
|
|
||||||
_token = None
|
|
||||||
_user = None
|
|
||||||
+ url = RH_API_HOST + '/support/v2/sftp/token'
|
|
||||||
# we have a username and password, but we need to reset the password
|
|
||||||
# to be the token returned from the auth endpoint
|
|
||||||
if self.get_upload_user() and self.get_upload_password():
|
|
||||||
- url = RH_API_HOST + '/hydra/rest/v1/sftp/token'
|
|
||||||
auth = self.get_upload_https_auth()
|
|
||||||
- ret = requests.get(url, auth=auth, timeout=10)
|
|
||||||
+ ret = requests.post(url, auth=auth, timeout=10)
|
|
||||||
if ret.status_code == 200:
|
|
||||||
# credentials are valid
|
|
||||||
_user = self.get_upload_user()
|
|
||||||
@@ -302,8 +302,8 @@ def upload_sftp(self):
|
|
||||||
"credentials. Will try anonymous.")
|
|
||||||
# we either do not have a username or password/token, or both
|
|
||||||
if not _token:
|
|
||||||
- aurl = RH_API_HOST + '/hydra/rest/v1/sftp/token?isAnonymous=true'
|
|
||||||
- anon = requests.get(aurl, timeout=10)
|
|
||||||
+ adata = {"isAnonymous": True}
|
|
||||||
+ anon = requests.post(url, data=json.dumps(adata), timeout=10)
|
|
||||||
if anon.status_code == 200:
|
|
||||||
resp = json.loads(anon.text)
|
|
||||||
_user = resp['username']
|
|
||||||
|
|
||||||
From 267da2156ec61f526dd28e760ff6528408a76c3f Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 22 Nov 2021 15:22:32 +0100
|
|
||||||
Subject: [PATCH 3/3] [policies] Deal 200 return code as success
|
|
||||||
|
|
||||||
Return code 200 of POST method request must be dealt as success.
|
|
||||||
|
|
||||||
Newly required due to the SFTP API change using POST.
|
|
||||||
|
|
||||||
Related to: #2764
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/distros/__init__.py | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
|
|
||||||
index 0906fa779..6f257fdce 100644
|
|
||||||
--- a/sos/policies/distros/__init__.py
|
|
||||||
+++ b/sos/policies/distros/__init__.py
|
|
||||||
@@ -551,7 +551,7 @@ def upload_https(self):
|
|
||||||
r = self._upload_https_put(arc, verify)
|
|
||||||
else:
|
|
||||||
r = self._upload_https_post(arc, verify)
|
|
||||||
- if r.status_code != 201:
|
|
||||||
+ if r.status_code != 200 and r.status_code != 201:
|
|
||||||
if r.status_code == 401:
|
|
||||||
raise Exception(
|
|
||||||
"Authentication failed: invalid user credentials"
|
|
||||||
From 8da1b14246226792c160dd04e5c7c75dd4e8d44b Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Mon, 22 Nov 2021 10:44:09 +0100
|
|
||||||
Subject: [PATCH] [collect] fix moved get_upload_url under Policy class
|
|
||||||
|
|
||||||
SoSCollector does not further declare get_upload_url method
|
|
||||||
as that was moved under Policy class(es).
|
|
||||||
|
|
||||||
Resolves: #2766
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/collector/__init__.py | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
|
|
||||||
index 50183e873..42a7731d6 100644
|
|
||||||
--- a/sos/collector/__init__.py
|
|
||||||
+++ b/sos/collector/__init__.py
|
|
||||||
@@ -1219,7 +1219,7 @@ this utility or remote systems that it c
|
|
||||||
msg = 'No sosreports were collected, nothing to archive...'
|
|
||||||
self.exit(msg, 1)
|
|
||||||
|
|
||||||
- if self.opts.upload and self.get_upload_url():
|
|
||||||
+ if self.opts.upload and self.policy.get_upload_url():
|
|
||||||
try:
|
|
||||||
self.policy.upload_archive(arc_name)
|
|
||||||
self.ui_log.info("Uploaded archive successfully")
|
|
||||||
From abb2fc65bd14760021c61699ad3113cab3bd4c64 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Tue, 30 Nov 2021 11:37:02 +0100
|
|
||||||
Subject: [PATCH 1/2] [redhat] Fix broken URI to upload to customer portal
|
|
||||||
|
|
||||||
Revert back the unwanted change in URI of uploading tarball to the
|
|
||||||
Red Hat Customer portal.
|
|
||||||
|
|
||||||
Related: #2772
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/distros/redhat.py | 2 +-
|
|
||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
|
|
||||||
index e4e2b883..eb442407 100644
|
|
||||||
--- a/sos/policies/distros/redhat.py
|
|
||||||
+++ b/sos/policies/distros/redhat.py
|
|
||||||
@@ -250,7 +250,7 @@ support representative.
|
|
||||||
elif self.commons['cmdlineopts'].upload_protocol == 'sftp':
|
|
||||||
return RH_SFTP_HOST
|
|
||||||
else:
|
|
||||||
- rh_case_api = "/hydra/rest/cases/%s/attachments"
|
|
||||||
+ rh_case_api = "/support/v1/cases/%s/attachments"
|
|
||||||
return RH_API_HOST + rh_case_api % self.case_id
|
|
||||||
|
|
||||||
def _get_upload_headers(self):
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
||||||
|
|
||||||
From ea4f9e88a412c80a4791396e1bb78ac1e24ece14 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Tue, 30 Nov 2021 13:00:26 +0100
|
|
||||||
Subject: [PATCH 2/2] [policy] Add error message when FTP upload write failure
|
|
||||||
|
|
||||||
When (S)FTP upload fails to write the destination file,
|
|
||||||
our "expect" code should detect it sooner than after timeout happens
|
|
||||||
and write appropriate error message.
|
|
||||||
|
|
||||||
Resolves: #2772
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/distros/__init__.py | 5 ++++-
|
|
||||||
1 file changed, 4 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
|
|
||||||
index 6f257fdc..7bdc81b8 100644
|
|
||||||
--- a/sos/policies/distros/__init__.py
|
|
||||||
+++ b/sos/policies/distros/__init__.py
|
|
||||||
@@ -473,7 +473,8 @@ class LinuxPolicy(Policy):
|
|
||||||
put_expects = [
|
|
||||||
u'100%',
|
|
||||||
pexpect.TIMEOUT,
|
|
||||||
- pexpect.EOF
|
|
||||||
+ pexpect.EOF,
|
|
||||||
+ u'No such file or directory'
|
|
||||||
]
|
|
||||||
|
|
||||||
put_success = ret.expect(put_expects, timeout=180)
|
|
||||||
@@ -485,6 +486,8 @@ class LinuxPolicy(Policy):
|
|
||||||
raise Exception("Timeout expired while uploading")
|
|
||||||
elif put_success == 2:
|
|
||||||
raise Exception("Unknown error during upload: %s" % ret.before)
|
|
||||||
+ elif put_success == 3:
|
|
||||||
+ raise Exception("Unable to write archive to destination")
|
|
||||||
else:
|
|
||||||
raise Exception("Unexpected response from server: %s" % ret.before)
|
|
||||||
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
|||||||
From aa2887f71c779448b22e4de67ae68dbaf218b7b9 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Taft Sanders <taftsanders@gmail.com>
|
|
||||||
Date: Fri, 10 Dec 2021 09:34:59 -0500
|
|
||||||
Subject: [PATCH] [rhui] New log folder
|
|
||||||
|
|
||||||
Included new log folder per Bugzilla 2030741
|
|
||||||
|
|
||||||
Signed-off-by: Taft Sanders <taftsanders@gmail.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/rhui.py | 1 +
|
|
||||||
1 file changed, 1 insertion(+)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py
|
|
||||||
index 52065fb44..add024613 100644
|
|
||||||
--- a/sos/report/plugins/rhui.py
|
|
||||||
+++ b/sos/report/plugins/rhui.py
|
|
||||||
@@ -27,6 +27,7 @@ def setup(self):
|
|
||||||
"/var/log/rhui-subscription-sync.log",
|
|
||||||
"/var/cache/rhui/*",
|
|
||||||
"/root/.rhui/*",
|
|
||||||
+ "/var/log/rhui/*",
|
|
||||||
])
|
|
||||||
# skip collecting certificate keys
|
|
||||||
self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True)
|
|
File diff suppressed because it is too large
Load Diff
@ -1,146 +0,0 @@
|
|||||||
From 137abd394f64a63b6633949b5c81159af12038b7 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 14 Jan 2022 20:07:17 +0100
|
|
||||||
Subject: [PATCH] [report] pass foreground argument to collect_cmd_output
|
|
||||||
|
|
||||||
Related to: #2825
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/__init__.py | 12 +++++++++---
|
|
||||||
1 file changed, 9 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index 98f163ab9..1bbdf28a4 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -1920,6 +1920,8 @@ class Plugin(object):
|
|
||||||
:param subdir: Subdir in plugin directory to save to
|
|
||||||
:param changes: Does this cmd potentially make a change
|
|
||||||
on the system?
|
|
||||||
+ :param foreground: Run the `cmd` in the foreground with a
|
|
||||||
+ TTY
|
|
||||||
:param tags: Add tags in the archive manifest
|
|
||||||
:param cmd_as_tag: Format command string to tag
|
|
||||||
|
|
||||||
@@ -2145,7 +2147,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
|
|
||||||
root_symlink=False, timeout=None,
|
|
||||||
stderr=True, chroot=True, runat=None, env=None,
|
|
||||||
binary=False, sizelimit=None, pred=None,
|
|
||||||
- changes=False, subdir=None, tags=[]):
|
|
||||||
+ changes=False, foreground=False, subdir=None,
|
|
||||||
+ tags=[]):
|
|
||||||
"""Execute a command and save the output to a file for inclusion in the
|
|
||||||
report, then return the results for further use by the plugin
|
|
||||||
|
|
||||||
@@ -2188,6 +2191,9 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
|
|
||||||
on the system?
|
|
||||||
:type changes: ``bool``
|
|
||||||
|
|
||||||
+ :param foreground: Run the `cmd` in the foreground with a TTY
|
|
||||||
+ :type foreground: ``bool``
|
|
||||||
+
|
|
||||||
:param tags: Add tags in the archive manifest
|
|
||||||
:type tags: ``str`` or a ``list`` of strings
|
|
||||||
|
|
||||||
@@ -2206,8 +2212,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
|
|
||||||
return self._collect_cmd_output(
|
|
||||||
cmd, suggest_filename=suggest_filename, root_symlink=root_symlink,
|
|
||||||
timeout=timeout, stderr=stderr, chroot=chroot, runat=runat,
|
|
||||||
- env=env, binary=binary, sizelimit=sizelimit, subdir=subdir,
|
|
||||||
- tags=tags
|
|
||||||
+ env=env, binary=binary, sizelimit=sizelimit, foreground=foreground,
|
|
||||||
+ subdir=subdir, tags=tags
|
|
||||||
)
|
|
||||||
|
|
||||||
def exec_cmd(self, cmd, timeout=None, stderr=True, chroot=True,
|
|
||||||
From 747fef695e4ff08f320c5f03090bdefa7154c761 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 14 Jan 2022 20:10:22 +0100
|
|
||||||
Subject: [PATCH] [virsh] Call virsh commands in the foreground / with a TTY
|
|
||||||
|
|
||||||
In some virsh errors (like unable to connect to a hypervisor),
|
|
||||||
the tool requires to communicate to TTY otherwise it can get stuck
|
|
||||||
(when called via Popen with a timeout).
|
|
||||||
|
|
||||||
Calling it on foreground prevents the stuck / waiting on cmd timeout.
|
|
||||||
|
|
||||||
Resolves: #2825
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/virsh.py | 14 +++++++++-----
|
|
||||||
1 file changed, 9 insertions(+), 5 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py
|
|
||||||
index d6b7c16761..08f9a8488c 100644
|
|
||||||
--- a/sos/report/plugins/virsh.py
|
|
||||||
+++ b/sos/report/plugins/virsh.py
|
|
||||||
@@ -39,26 +39,30 @@ def setup(self):
|
|
||||||
]
|
|
||||||
|
|
||||||
for subcmd in subcmds:
|
|
||||||
- self.add_cmd_output('%s %s' % (cmd, subcmd))
|
|
||||||
+ self.add_cmd_output('%s %s' % (cmd, subcmd), foreground=True)
|
|
||||||
|
|
||||||
# get network, pool and nwfilter elements
|
|
||||||
for k in ['net', 'nwfilter', 'pool']:
|
|
||||||
- k_list = self.collect_cmd_output('%s %s-list' % (cmd, k))
|
|
||||||
+ k_list = self.collect_cmd_output('%s %s-list' % (cmd, k),
|
|
||||||
+ foreground=True)
|
|
||||||
if k_list['status'] == 0:
|
|
||||||
k_lines = k_list['output'].splitlines()
|
|
||||||
# the 'Name' column position changes between virsh cmds
|
|
||||||
pos = k_lines[0].split().index('Name')
|
|
||||||
for j in filter(lambda x: x, k_lines[2:]):
|
|
||||||
n = j.split()[pos]
|
|
||||||
- self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n))
|
|
||||||
+ self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n),
|
|
||||||
+ foreground=True)
|
|
||||||
|
|
||||||
# cycle through the VMs/domains list, ignore 2 header lines and latest
|
|
||||||
# empty line, and dumpxml domain name in 2nd column
|
|
||||||
- domains_output = self.exec_cmd('%s list --all' % cmd)
|
|
||||||
+ domains_output = self.exec_cmd('%s list --all' % cmd, foreground=True)
|
|
||||||
if domains_output['status'] == 0:
|
|
||||||
domains_lines = domains_output['output'].splitlines()[2:]
|
|
||||||
for domain in filter(lambda x: x, domains_lines):
|
|
||||||
d = domain.split()[1]
|
|
||||||
for x in ['dumpxml', 'dominfo', 'domblklist']:
|
|
||||||
- self.add_cmd_output('%s %s %s' % (cmd, x, d))
|
|
||||||
+ self.add_cmd_output('%s %s %s' % (cmd, x, d),
|
|
||||||
+ foreground=True)
|
|
||||||
+
|
|
||||||
# vim: et ts=4 sw=4
|
|
||||||
From 9bc032129ec66766f07349dd115335f104888efa Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Wed, 26 Jan 2022 09:44:01 +0100
|
|
||||||
Subject: [PATCH] [virsh] Catch parsing exception
|
|
||||||
|
|
||||||
In case virsh output is malformed or missing 'Name' otherwise,
|
|
||||||
catch parsing exception and continue in next for loop iteration.
|
|
||||||
|
|
||||||
Resolves: #2836
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/virsh.py | 6 +++++-
|
|
||||||
1 file changed, 5 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py
|
|
||||||
index 08f9a8488..2ce1df15c 100644
|
|
||||||
--- a/sos/report/plugins/virsh.py
|
|
||||||
+++ b/sos/report/plugins/virsh.py
|
|
||||||
@@ -48,7 +48,11 @@ def setup(self):
|
|
||||||
if k_list['status'] == 0:
|
|
||||||
k_lines = k_list['output'].splitlines()
|
|
||||||
# the 'Name' column position changes between virsh cmds
|
|
||||||
- pos = k_lines[0].split().index('Name')
|
|
||||||
+ # catch the rare exceptions when 'Name' is not found
|
|
||||||
+ try:
|
|
||||||
+ pos = k_lines[0].split().index('Name')
|
|
||||||
+ except Exception:
|
|
||||||
+ continue
|
|
||||||
for j in filter(lambda x: x, k_lines[2:]):
|
|
||||||
n = j.split()[pos]
|
|
||||||
self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n),
|
|
@ -1,252 +0,0 @@
|
|||||||
From 210b83e1d1164d29b1f6198675b8b596c4af8336 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Daniel Alvarez Sanchez <dalvarez@redhat.com>
|
|
||||||
Date: Thu, 20 Jan 2022 12:58:44 +0100
|
|
||||||
Subject: [PATCH] [ovn_central] Account for Red Hat ovn package naming
|
|
||||||
|
|
||||||
Previous ovn packages were 'ovn2xxx' and now they have
|
|
||||||
been renamed to 'ovn-2xxx'. This causes sos tool to not
|
|
||||||
recognize that the packages are installed and it won't
|
|
||||||
collect the relevant data.
|
|
||||||
|
|
||||||
This patch is changing the match to be compatible
|
|
||||||
with the previous and newer naming conventions.
|
|
||||||
|
|
||||||
Signed-off-by: Daniel Alvarez Sanchez <dalvarez@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/ovn_central.py | 2 +-
|
|
||||||
sos/report/plugins/ovn_host.py | 2 +-
|
|
||||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
|
|
||||||
index ddbf288da..0f947d4c5 100644
|
|
||||||
--- a/sos/report/plugins/ovn_central.py
|
|
||||||
+++ b/sos/report/plugins/ovn_central.py
|
|
||||||
@@ -147,7 +147,7 @@ def setup(self):
|
|
||||||
|
|
||||||
class RedHatOVNCentral(OVNCentral, RedHatPlugin):
|
|
||||||
|
|
||||||
- packages = ('openvswitch-ovn-central', 'ovn2.*-central', )
|
|
||||||
+ packages = ('openvswitch-ovn-central', 'ovn.*-central', )
|
|
||||||
ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl'
|
|
||||||
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/ovn_host.py b/sos/report/plugins/ovn_host.py
|
|
||||||
index 78604a15a..25c38cccc 100644
|
|
||||||
--- a/sos/report/plugins/ovn_host.py
|
|
||||||
+++ b/sos/report/plugins/ovn_host.py
|
|
||||||
@@ -55,7 +55,7 @@ def check_enabled(self):
|
|
||||||
|
|
||||||
class RedHatOVNHost(OVNHost, RedHatPlugin):
|
|
||||||
|
|
||||||
- packages = ('openvswitch-ovn-host', 'ovn2.*-host', )
|
|
||||||
+ packages = ('openvswitch-ovn-host', 'ovn.*-host', )
|
|
||||||
|
|
||||||
|
|
||||||
class DebianOVNHost(OVNHost, DebianPlugin, UbuntuPlugin):
|
|
||||||
From 21fc376d97a5f74743e2b7cf7069349e874b979e Mon Sep 17 00:00:00 2001
|
|
||||||
From: Hemanth Nakkina <hemanth.nakkina@canonical.com>
|
|
||||||
Date: Fri, 4 Feb 2022 07:57:59 +0530
|
|
||||||
Subject: [PATCH] [ovn-central] collect NB/SB ovsdb-server cluster status
|
|
||||||
|
|
||||||
Add commands to collect cluster status of Northbound and
|
|
||||||
Southbound ovsdb servers.
|
|
||||||
|
|
||||||
Resolves: #2840
|
|
||||||
|
|
||||||
Signed-off-by: Hemanth Nakkina hemanth.nakkina@canonical.com
|
|
||||||
---
|
|
||||||
sos/report/plugins/ovn_central.py | 13 ++++++++++++-
|
|
||||||
1 file changed, 12 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
|
|
||||||
index 0f947d4c5..2f0438df3 100644
|
|
||||||
--- a/sos/report/plugins/ovn_central.py
|
|
||||||
+++ b/sos/report/plugins/ovn_central.py
|
|
||||||
@@ -84,6 +84,14 @@ def setup(self):
|
|
||||||
else:
|
|
||||||
self.add_copy_spec("/var/log/ovn/*.log")
|
|
||||||
|
|
||||||
+ # ovsdb nb/sb cluster status commands
|
|
||||||
+ ovsdb_cmds = [
|
|
||||||
+ 'ovs-appctl -t {} cluster/status OVN_Northbound'.format(
|
|
||||||
+ self.ovn_nbdb_sock_path),
|
|
||||||
+ 'ovs-appctl -t {} cluster/status OVN_Southbound'.format(
|
|
||||||
+ self.ovn_sbdb_sock_path),
|
|
||||||
+ ]
|
|
||||||
+
|
|
||||||
# Some user-friendly versions of DB output
|
|
||||||
nbctl_cmds = [
|
|
||||||
'ovn-nbctl show',
|
|
||||||
@@ -109,7 +117,8 @@ def setup(self):
|
|
||||||
|
|
||||||
self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
|
|
||||||
|
|
||||||
- cmds = nbctl_cmds
|
|
||||||
+ cmds = ovsdb_cmds
|
|
||||||
+ cmds += nbctl_cmds
|
|
||||||
|
|
||||||
# Can only run sbdb commands if we are the leader
|
|
||||||
co = {'cmd': "ovs-appctl -t {} cluster/status OVN_Southbound".
|
|
||||||
@@ -148,10 +157,12 @@ def setup(self):
|
|
||||||
class RedHatOVNCentral(OVNCentral, RedHatPlugin):
|
|
||||||
|
|
||||||
packages = ('openvswitch-ovn-central', 'ovn.*-central', )
|
|
||||||
+ ovn_nbdb_sock_path = '/var/run/openvswitch/ovnnb_db.ctl'
|
|
||||||
ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl'
|
|
||||||
|
|
||||||
|
|
||||||
class DebianOVNCentral(OVNCentral, DebianPlugin, UbuntuPlugin):
|
|
||||||
|
|
||||||
packages = ('ovn-central', )
|
|
||||||
+ ovn_nbdb_sock_path = '/var/run/ovn/ovnnb_db.ctl'
|
|
||||||
ovn_sbdb_sock_path = '/var/run/ovn/ovnsb_db.ctl'
|
|
||||||
From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
Date: Tue, 15 Feb 2022 16:24:47 -0500
|
|
||||||
Subject: [PATCH] [runtimes] Allow container IDs to be used with
|
|
||||||
`container_exists()`
|
|
||||||
|
|
||||||
As container runtimes can interchange container names and container IDs,
|
|
||||||
sos should also allow the use of container IDs when checking for the
|
|
||||||
presence of a given container.
|
|
||||||
|
|
||||||
In particular, this change unblocks the use of `Plugin.exec_cmd()` when
|
|
||||||
used in conjunction with `Plugin.get_container_by_name()` to pick a
|
|
||||||
container based on a provided regex that the container name may match.
|
|
||||||
|
|
||||||
Related: #2856
|
|
||||||
|
|
||||||
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
---
|
|
||||||
sos/policies/runtimes/__init__.py | 17 +++++++++++++++++
|
|
||||||
sos/report/plugins/__init__.py | 6 +++---
|
|
||||||
2 files changed, 20 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
|
|
||||||
index 5ac673544..d28373496 100644
|
|
||||||
--- a/sos/policies/runtimes/__init__.py
|
|
||||||
+++ b/sos/policies/runtimes/__init__.py
|
|
||||||
@@ -147,6 +147,23 @@ def get_volumes(self):
|
|
||||||
vols.append(ent[-1])
|
|
||||||
return vols
|
|
||||||
|
|
||||||
+ def container_exists(self, container):
|
|
||||||
+ """Check if a given container ID or name exists on the system from the
|
|
||||||
+ perspective of the container runtime.
|
|
||||||
+
|
|
||||||
+ Note that this will only check _running_ containers
|
|
||||||
+
|
|
||||||
+ :param container: The name or ID of the container
|
|
||||||
+ :type container: ``str``
|
|
||||||
+
|
|
||||||
+ :returns: True if the container exists, else False
|
|
||||||
+ :rtype: ``bool``
|
|
||||||
+ """
|
|
||||||
+ for _contup in self.containers:
|
|
||||||
+ if container in _contup:
|
|
||||||
+ return True
|
|
||||||
+ return False
|
|
||||||
+
|
|
||||||
def fmt_container_cmd(self, container, cmd, quotecmd):
|
|
||||||
"""Format a command to run inside a container using the runtime
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
|
||||||
index 2988be089..cc5cb65bc 100644
|
|
||||||
--- a/sos/report/plugins/__init__.py
|
|
||||||
+++ b/sos/report/plugins/__init__.py
|
|
||||||
@@ -2593,7 +2593,7 @@ def container_exists(self, name):
|
|
||||||
"""If a container runtime is present, check to see if a container with
|
|
||||||
a given name is currently running
|
|
||||||
|
|
||||||
- :param name: The name of the container to check presence of
|
|
||||||
+ :param name: The name or ID of the container to check presence of
|
|
||||||
:type name: ``str``
|
|
||||||
|
|
||||||
:returns: ``True`` if `name` exists, else ``False``
|
|
||||||
@@ -2601,8 +2601,8 @@ def container_exists(self, name):
|
|
||||||
"""
|
|
||||||
_runtime = self._get_container_runtime()
|
|
||||||
if _runtime is not None:
|
|
||||||
- con = _runtime.get_container_by_name(name)
|
|
||||||
- return con is not None
|
|
||||||
+ return (_runtime.container_exists(name) or
|
|
||||||
+ _runtime.get_container_by_name(name) is not None)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_all_containers_by_regex(self, regex, get_all=False):
|
|
||||||
|
|
||||||
From de9b020a72d1ceda39587db4c6d5acf72cd90da2 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Fernando Royo <froyo@redhat.com>
|
|
||||||
Date: Tue, 15 Feb 2022 10:00:38 +0100
|
|
||||||
Subject: [PATCH] [ovn_central] Rename container responsable of Red Hat
|
|
||||||
ovn_central plugin
|
|
||||||
|
|
||||||
ovn_central plugin is running by container with
|
|
||||||
name 'ovn-dbs-bundle*', a typo has been identified and
|
|
||||||
this cause plugin ovn_central not enabled by default as it
|
|
||||||
does not recognize any container responsible of this.
|
|
||||||
|
|
||||||
This patch fix this container name match, searching schema db
|
|
||||||
keeping backward compatibility with openvswitch.
|
|
||||||
---
|
|
||||||
sos/report/plugins/ovn_central.py | 23 ++++++++++++-----------
|
|
||||||
1 file changed, 12 insertions(+), 11 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
|
|
||||||
index 2f0438df..2f34bff0 100644
|
|
||||||
--- a/sos/report/plugins/ovn_central.py
|
|
||||||
+++ b/sos/report/plugins/ovn_central.py
|
|
||||||
@@ -24,7 +24,7 @@ class OVNCentral(Plugin):
|
|
||||||
short_desc = 'OVN Northd'
|
|
||||||
plugin_name = "ovn_central"
|
|
||||||
profiles = ('network', 'virt')
|
|
||||||
- containers = ('ovs-db-bundle.*',)
|
|
||||||
+ containers = ('ovn-dbs-bundle.*',)
|
|
||||||
|
|
||||||
def get_tables_from_schema(self, filename, skip=[]):
|
|
||||||
if self._container_name:
|
|
||||||
@@ -66,7 +66,7 @@ class OVNCentral(Plugin):
|
|
||||||
cmds.append('%s list %s' % (ovn_cmd, table))
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
- self._container_name = self.get_container_by_name('ovs-dbs-bundle.*')
|
|
||||||
+ self._container_name = self.get_container_by_name(self.containers[0])
|
|
||||||
|
|
||||||
ovs_rundir = os.environ.get('OVS_RUNDIR')
|
|
||||||
for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
|
|
||||||
@@ -110,12 +110,11 @@ class OVNCentral(Plugin):
|
|
||||||
'ovn-sbctl get-connection',
|
|
||||||
]
|
|
||||||
|
|
||||||
- schema_dir = '/usr/share/openvswitch'
|
|
||||||
-
|
|
||||||
- nb_tables = self.get_tables_from_schema(self.path_join(
|
|
||||||
- schema_dir, 'ovn-nb.ovsschema'))
|
|
||||||
-
|
|
||||||
- self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
|
|
||||||
+ # backward compatibility
|
|
||||||
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
|
|
||||||
+ nb_tables = self.get_tables_from_schema(self.path_join(
|
|
||||||
+ path, 'ovn-nb.ovsschema'))
|
|
||||||
+ self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
|
|
||||||
|
|
||||||
cmds = ovsdb_cmds
|
|
||||||
cmds += nbctl_cmds
|
|
||||||
@@ -125,9 +124,11 @@ class OVNCentral(Plugin):
|
|
||||||
format(self.ovn_sbdb_sock_path),
|
|
||||||
"output": "Leader: self"}
|
|
||||||
if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
|
|
||||||
- sb_tables = self.get_tables_from_schema(self.path_join(
|
|
||||||
- schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow'])
|
|
||||||
- self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
|
|
||||||
+ # backward compatibility
|
|
||||||
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
|
|
||||||
+ sb_tables = self.get_tables_from_schema(self.path_join(
|
|
||||||
+ path, 'ovn-sb.ovsschema'), ['Logical_Flow'])
|
|
||||||
+ self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
|
|
||||||
cmds += sbctl_cmds
|
|
||||||
|
|
||||||
# If OVN is containerized, we need to run the above commands inside
|
|
||||||
--
|
|
||||||
2.34.1
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
|||||||
From 5634f7dd77eff821f37daa953fa86cc783d3b937 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
Date: Fri, 21 Jan 2022 16:27:33 +0100
|
|
||||||
Subject: [PATCH] [foreman] Use psql-msgpack-decode wrapper for dynflow >= 1.6
|
|
||||||
|
|
||||||
In dynflow >=1.6.3, dynflow* tables in postgres are encoded by
|
|
||||||
msgpack which makes plain CSV dumps unreadable. In such a case,
|
|
||||||
psql-msgpack-decode wrapper tool from dynflow-utils (of any
|
|
||||||
version) must be used instead of the plain psql command.
|
|
||||||
|
|
||||||
Resolves: #2830
|
|
||||||
|
|
||||||
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/foreman.py | 16 ++++++++++++----
|
|
||||||
1 file changed, 12 insertions(+), 4 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py
|
|
||||||
index 314a651d1..3fd80e6a8 100644
|
|
||||||
--- a/sos/report/plugins/foreman.py
|
|
||||||
+++ b/sos/report/plugins/foreman.py
|
|
||||||
@@ -244,8 +244,16 @@ def setup(self):
|
|
||||||
self.add_cmd_output(_cmd, suggest_filename=table, timeout=600,
|
|
||||||
sizelimit=100, env=self.env)
|
|
||||||
|
|
||||||
+ # dynflow* tables on dynflow >=1.6.3 are encoded and hence in that
|
|
||||||
+ # case, psql-msgpack-decode wrapper tool from dynflow-utils (any
|
|
||||||
+ # version) must be used instead of plain psql command
|
|
||||||
+ dynutils = self.is_installed('dynflow-utils')
|
|
||||||
for dyn in foremancsv:
|
|
||||||
- _cmd = self.build_query_cmd(foremancsv[dyn], csv=True)
|
|
||||||
+ binary = "psql"
|
|
||||||
+ if dyn != 'foreman_tasks_tasks' and dynutils:
|
|
||||||
+ binary = "/usr/libexec/psql-msgpack-decode"
|
|
||||||
+ _cmd = self.build_query_cmd(foremancsv[dyn], csv=True,
|
|
||||||
+ binary=binary)
|
|
||||||
self.add_cmd_output(_cmd, suggest_filename=dyn, timeout=600,
|
|
||||||
sizelimit=100, env=self.env)
|
|
||||||
|
|
||||||
@@ -270,7 +278,7 @@ def setup(self):
|
|
||||||
# collect http[|s]_proxy env.variables
|
|
||||||
self.add_env_var(["http_proxy", "https_proxy"])
|
|
||||||
|
|
||||||
- def build_query_cmd(self, query, csv=False):
|
|
||||||
+ def build_query_cmd(self, query, csv=False, binary="psql"):
|
|
||||||
"""
|
|
||||||
Builds the command needed to invoke the pgsql query as the postgres
|
|
||||||
user.
|
|
||||||
@@ -281,8 +289,8 @@ def build_query_cmd(self, query, csv=False):
|
|
||||||
if csv:
|
|
||||||
query = "COPY (%s) TO STDOUT " \
|
|
||||||
"WITH (FORMAT 'csv', DELIMITER ',', HEADER)" % query
|
|
||||||
- _dbcmd = "psql --no-password -h %s -p 5432 -U foreman -d foreman -c %s"
|
|
||||||
- return _dbcmd % (self.dbhost, quote(query))
|
|
||||||
+ _dbcmd = "%s --no-password -h %s -p 5432 -U foreman -d foreman -c %s"
|
|
||||||
+ return _dbcmd % (binary, self.dbhost, quote(query))
|
|
||||||
|
|
||||||
def postproc(self):
|
|
||||||
self.do_path_regex_sub(
|
|
@ -1,94 +0,0 @@
|
|||||||
From 5824cd5d3bddf39e0382d568419e2453abc93d8a Mon Sep 17 00:00:00 2001
|
|
||||||
From: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
Date: Mon, 30 Aug 2021 15:09:07 -0400
|
|
||||||
Subject: [PATCH] [options] Fix logging on plugopts in effective sos command
|
|
||||||
|
|
||||||
First, provide a special-case handling for plugin options specified in
|
|
||||||
sos.conf in `SoSOptions.to_args().has_value()` that allows for plugin
|
|
||||||
options to be included in the "effective options now" log message.
|
|
||||||
|
|
||||||
Second, move the logging of said message (and thus the merging of
|
|
||||||
preset options, if used), to being _prior_ to the loading of plugin
|
|
||||||
options.
|
|
||||||
|
|
||||||
Combined, plugin options specified in sos.conf will now be logged
|
|
||||||
properly and this logging will occur before we set (and log the setting
|
|
||||||
of) those options.
|
|
||||||
|
|
||||||
Resolves: #2663
|
|
||||||
|
|
||||||
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
|
||||||
---
|
|
||||||
sos/options.py | 2 ++
|
|
||||||
sos/report/__init__.py | 30 ++++++++++++++++--------------
|
|
||||||
2 files changed, 18 insertions(+), 14 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/sos/options.py b/sos/options.py
|
|
||||||
index a014a022..7bea3ffc 100644
|
|
||||||
--- a/sos/options.py
|
|
||||||
+++ b/sos/options.py
|
|
||||||
@@ -281,6 +281,8 @@ class SoSOptions():
|
|
||||||
null_values = ("False", "None", "[]", '""', "''", "0")
|
|
||||||
if not value or value in null_values:
|
|
||||||
return False
|
|
||||||
+ if name == 'plugopts' and value:
|
|
||||||
+ return True
|
|
||||||
if name in self.arg_defaults:
|
|
||||||
if str(value) == str(self.arg_defaults[name]):
|
|
||||||
return False
|
|
||||||
diff --git a/sos/report/__init__.py b/sos/report/__init__.py
|
|
||||||
index b0159e5b..82484f1d 100644
|
|
||||||
--- a/sos/report/__init__.py
|
|
||||||
+++ b/sos/report/__init__.py
|
|
||||||
@@ -925,20 +925,6 @@ class SoSReport(SoSComponent):
|
|
||||||
self._exit(1)
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
- # Log command line options
|
|
||||||
- msg = "[%s:%s] executing 'sos %s'"
|
|
||||||
- self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline)))
|
|
||||||
-
|
|
||||||
- # Log active preset defaults
|
|
||||||
- preset_args = self.preset.opts.to_args()
|
|
||||||
- msg = ("[%s:%s] using '%s' preset defaults (%s)" %
|
|
||||||
- (__name__, "setup", self.preset.name, " ".join(preset_args)))
|
|
||||||
- self.soslog.info(msg)
|
|
||||||
-
|
|
||||||
- # Log effective options after applying preset defaults
|
|
||||||
- self.soslog.info("[%s:%s] effective options now: %s" %
|
|
||||||
- (__name__, "setup", " ".join(self.opts.to_args())))
|
|
||||||
-
|
|
||||||
self.ui_log.info(_(" Setting up plugins ..."))
|
|
||||||
for plugname, plug in self.loaded_plugins:
|
|
||||||
try:
|
|
||||||
@@ -1386,11 +1372,27 @@ class SoSReport(SoSComponent):
|
|
||||||
self.report_md.add_list('disabled_plugins', self.opts.skip_plugins)
|
|
||||||
self.report_md.add_section('plugins')
|
|
||||||
|
|
||||||
+ def _merge_preset_options(self):
|
|
||||||
+ # Log command line options
|
|
||||||
+ msg = "[%s:%s] executing 'sos %s'"
|
|
||||||
+ self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline)))
|
|
||||||
+
|
|
||||||
+ # Log active preset defaults
|
|
||||||
+ preset_args = self.preset.opts.to_args()
|
|
||||||
+ msg = ("[%s:%s] using '%s' preset defaults (%s)" %
|
|
||||||
+ (__name__, "setup", self.preset.name, " ".join(preset_args)))
|
|
||||||
+ self.soslog.info(msg)
|
|
||||||
+
|
|
||||||
+ # Log effective options after applying preset defaults
|
|
||||||
+ self.soslog.info("[%s:%s] effective options now: %s" %
|
|
||||||
+ (__name__, "setup", " ".join(self.opts.to_args())))
|
|
||||||
+
|
|
||||||
def execute(self):
|
|
||||||
try:
|
|
||||||
self.policy.set_commons(self.get_commons())
|
|
||||||
self.load_plugins()
|
|
||||||
self._set_all_options()
|
|
||||||
+ self._merge_preset_options()
|
|
||||||
self._set_tunables()
|
|
||||||
self._check_for_unknown_plugins()
|
|
||||||
self._set_plugin_options()
|
|
||||||
--
|
|
||||||
2.34.1
|
|
||||||
|
|
169
SOURCES/sos-bz2055002-rebase-sos-add-sos-help.patch
Normal file
169
SOURCES/sos-bz2055002-rebase-sos-add-sos-help.patch
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
From b5389aa195675f473acdd22f20017a8854ff82d0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
Date: Wed, 16 Feb 2022 08:43:32 +0100
|
||||||
|
Subject: [PATCH] [man] Mention sos-help in main sos manpage
|
||||||
|
|
||||||
|
Related to #2860
|
||||||
|
|
||||||
|
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
---
|
||||||
|
man/en/sos.1 | 8 ++++++++
|
||||||
|
1 file changed, 8 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/man/en/sos.1 b/man/en/sos.1
|
||||||
|
index ce4918f99..c335b7e10 100644
|
||||||
|
--- a/man/en/sos.1
|
||||||
|
+++ b/man/en/sos.1
|
||||||
|
@@ -67,6 +67,14 @@ May be invoked via either \fBsos clean\fR, \fBsos cleaner\fR, \fBsos mask\fR,
|
||||||
|
or via the \fB--clean\fR, \fB--cleaner\fR or \fB --mask\fR options
|
||||||
|
for \fBreport\fR and \fBcollect\fR.
|
||||||
|
|
||||||
|
+.TP
|
||||||
|
+.B help
|
||||||
|
+This subcommand is used to retrieve more detailed information on the various SoS
|
||||||
|
+commands and components than is directly available in either other manpages or
|
||||||
|
+--help output.
|
||||||
|
+
|
||||||
|
+See \fB sos help --help\fR and \fB man sos-help\fR for more information.
|
||||||
|
+
|
||||||
|
.SH GLOBAL OPTIONS
|
||||||
|
sos components provide their own set of options, however the following are available
|
||||||
|
to be set across all components.
|
||||||
|
From ac4eb48fa35c13b99ada41540831412480babf8d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
Date: Wed, 16 Feb 2022 08:44:16 +0100
|
||||||
|
Subject: [PATCH] [setup] Add sos-help to build process
|
||||||
|
|
||||||
|
Resolves: #2860
|
||||||
|
Closes: #2861
|
||||||
|
|
||||||
|
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
---
|
||||||
|
setup.py | 5 +++--
|
||||||
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/setup.py b/setup.py
|
||||||
|
index 25e87a71b..8db8641f0 100644
|
||||||
|
--- a/setup.py
|
||||||
|
+++ b/setup.py
|
||||||
|
@@ -90,7 +90,7 @@ def copy_file (self, filename, dirname):
|
||||||
|
('share/man/man1', ['man/en/sosreport.1', 'man/en/sos-report.1',
|
||||||
|
'man/en/sos.1', 'man/en/sos-collect.1',
|
||||||
|
'man/en/sos-collector.1', 'man/en/sos-clean.1',
|
||||||
|
- 'man/en/sos-mask.1']),
|
||||||
|
+ 'man/en/sos-mask.1', 'man/en/sos-help.1']),
|
||||||
|
('share/man/man5', ['man/en/sos.conf.5']),
|
||||||
|
('share/licenses/sos', ['LICENSE']),
|
||||||
|
('share/doc/sos', ['AUTHORS', 'README.md']),
|
||||||
|
@@ -102,7 +102,8 @@ def copy_file (self, filename, dirname):
|
||||||
|
'sos.policies.package_managers', 'sos.policies.init_systems',
|
||||||
|
'sos.report', 'sos.report.plugins', 'sos.collector',
|
||||||
|
'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner',
|
||||||
|
- 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives'
|
||||||
|
+ 'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives',
|
||||||
|
+ 'sos.help'
|
||||||
|
],
|
||||||
|
cmdclass=cmdclass,
|
||||||
|
command_options=command_options,
|
||||||
|
From de9b020a72d1ceda39587db4c6d5acf72cd90da2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Fernando Royo <froyo@redhat.com>
|
||||||
|
Date: Tue, 15 Feb 2022 10:00:38 +0100
|
||||||
|
Subject: [PATCH] [ovn_central] Rename container responsable of Red Hat
|
||||||
|
ovn_central plugin
|
||||||
|
|
||||||
|
ovn_central plugin is running by container with
|
||||||
|
name 'ovn-dbs-bundle*', a typo has been identified and
|
||||||
|
this cause plugin ovn_central not enabled by default as it
|
||||||
|
does not recognize any container responsible of this.
|
||||||
|
|
||||||
|
This patch fix this container name match, searching schema db
|
||||||
|
keeping backward compatibility with openvswitch.
|
||||||
|
---
|
||||||
|
sos/report/plugins/ovn_central.py | 23 ++++++++++++-----------
|
||||||
|
1 file changed, 12 insertions(+), 11 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
|
||||||
|
index 2f0438df3..2f34bff09 100644
|
||||||
|
--- a/sos/report/plugins/ovn_central.py
|
||||||
|
+++ b/sos/report/plugins/ovn_central.py
|
||||||
|
@@ -24,7 +24,7 @@ class OVNCentral(Plugin):
|
||||||
|
short_desc = 'OVN Northd'
|
||||||
|
plugin_name = "ovn_central"
|
||||||
|
profiles = ('network', 'virt')
|
||||||
|
- containers = ('ovs-db-bundle.*',)
|
||||||
|
+ containers = ('ovn-dbs-bundle.*',)
|
||||||
|
|
||||||
|
def get_tables_from_schema(self, filename, skip=[]):
|
||||||
|
if self._container_name:
|
||||||
|
@@ -66,7 +66,7 @@ def add_database_output(self, tables, cmds, ovn_cmd):
|
||||||
|
cmds.append('%s list %s' % (ovn_cmd, table))
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
- self._container_name = self.get_container_by_name('ovs-dbs-bundle.*')
|
||||||
|
+ self._container_name = self.get_container_by_name(self.containers[0])
|
||||||
|
|
||||||
|
ovs_rundir = os.environ.get('OVS_RUNDIR')
|
||||||
|
for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
|
||||||
|
@@ -110,12 +110,11 @@ def setup(self):
|
||||||
|
'ovn-sbctl get-connection',
|
||||||
|
]
|
||||||
|
|
||||||
|
- schema_dir = '/usr/share/openvswitch'
|
||||||
|
-
|
||||||
|
- nb_tables = self.get_tables_from_schema(self.path_join(
|
||||||
|
- schema_dir, 'ovn-nb.ovsschema'))
|
||||||
|
-
|
||||||
|
- self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
|
||||||
|
+ # backward compatibility
|
||||||
|
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
|
||||||
|
+ nb_tables = self.get_tables_from_schema(self.path_join(
|
||||||
|
+ path, 'ovn-nb.ovsschema'))
|
||||||
|
+ self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
|
||||||
|
|
||||||
|
cmds = ovsdb_cmds
|
||||||
|
cmds += nbctl_cmds
|
||||||
|
@@ -125,9 +124,11 @@ def setup(self):
|
||||||
|
format(self.ovn_sbdb_sock_path),
|
||||||
|
"output": "Leader: self"}
|
||||||
|
if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
|
||||||
|
- sb_tables = self.get_tables_from_schema(self.path_join(
|
||||||
|
- schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow'])
|
||||||
|
- self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
|
||||||
|
+ # backward compatibility
|
||||||
|
+ for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
|
||||||
|
+ sb_tables = self.get_tables_from_schema(self.path_join(
|
||||||
|
+ path, 'ovn-sb.ovsschema'), ['Logical_Flow'])
|
||||||
|
+ self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
|
||||||
|
cmds += sbctl_cmds
|
||||||
|
|
||||||
|
# If OVN is containerized, we need to run the above commands inside
|
||||||
|
From 7ebb2ce0bcd13c1b3aada648aceb20b5aff636d9 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Tue, 15 Feb 2022 14:18:02 -0500
|
||||||
|
Subject: [PATCH] [host] Skip entire /etc/sos/cleaner directory
|
||||||
|
|
||||||
|
While `default_mapping` is typically the only file expected under
|
||||||
|
`/etc/sos/cleaner/` it is possible for other mapping files (such as
|
||||||
|
backups) to appear there.
|
||||||
|
|
||||||
|
Make the `add_forbidden_path()` spec here target the entire cleaner
|
||||||
|
directory to avoid ever capturing these map files.
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/report/plugins/host.py | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/sos/report/plugins/host.py b/sos/report/plugins/host.py
|
||||||
|
index 5e21da7b8..95a3b9cd9 100644
|
||||||
|
--- a/sos/report/plugins/host.py
|
||||||
|
+++ b/sos/report/plugins/host.py
|
||||||
|
@@ -20,7 +20,7 @@ class Host(Plugin, IndependentPlugin):
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
|
||||||
|
- self.add_forbidden_path('/etc/sos/cleaner/default_mapping')
|
||||||
|
+ self.add_forbidden_path('/etc/sos/cleaner')
|
||||||
|
|
||||||
|
self.add_cmd_output('hostname', root_symlink='hostname')
|
||||||
|
self.add_cmd_output('uptime', root_symlink='uptime')
|
File diff suppressed because it is too large
Load Diff
67
SOURCES/sos-bz2062908-tigervnc-update-collections.patch
Normal file
67
SOURCES/sos-bz2062908-tigervnc-update-collections.patch
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
From 4c92968ce461cdfc6a5d913748b2ce4f148ff4a9 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Thu, 10 Mar 2022 12:31:49 -0500
|
||||||
|
Subject: [PATCH] [tigervnc] Update collections for newer versions of TigerVNC
|
||||||
|
|
||||||
|
First, relaxes the file specifications for collection by capturing the
|
||||||
|
entire `/etc/tigervnc/` directory.
|
||||||
|
|
||||||
|
Second, adds collection of service status and journal output for each
|
||||||
|
configured vnc server. Collection of `vncserver -list` is kept for
|
||||||
|
backwards compatibility.
|
||||||
|
|
||||||
|
Finally, add a short docstring for the plugin for --help output.
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/report/plugins/tigervnc.py | 28 +++++++++++++++++++++++-----
|
||||||
|
1 file changed, 23 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/sos/report/plugins/tigervnc.py b/sos/report/plugins/tigervnc.py
|
||||||
|
index 1302f6d4..e31aee25 100644
|
||||||
|
--- a/sos/report/plugins/tigervnc.py
|
||||||
|
+++ b/sos/report/plugins/tigervnc.py
|
||||||
|
@@ -12,17 +12,35 @@ from sos.report.plugins import Plugin, RedHatPlugin
|
||||||
|
|
||||||
|
|
||||||
|
class TigerVNC(Plugin, RedHatPlugin):
|
||||||
|
+ """
|
||||||
|
+ This plugin gathers information for VNC servers provided by the tigervnc
|
||||||
|
+ package. This is explicitly for server-side collections, not clients.
|
||||||
|
+
|
||||||
|
+ By default, this plugin will capture the contents of /etc/tigervnc, which
|
||||||
|
+ may include usernames. If usernames are sensitive information for end
|
||||||
|
+ users of sos, consider using the `--clean` option to obfuscate these
|
||||||
|
+ names.
|
||||||
|
+ """
|
||||||
|
|
||||||
|
short_desc = 'TigerVNC server configuration'
|
||||||
|
plugin_name = 'tigervnc'
|
||||||
|
packages = ('tigervnc-server',)
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
- self.add_copy_spec([
|
||||||
|
- '/etc/tigervnc/vncserver-config-defaults',
|
||||||
|
- '/etc/tigervnc/vncserver-config-mandatory',
|
||||||
|
- '/etc/tigervnc/vncserver.users'
|
||||||
|
- ])
|
||||||
|
+ self.add_copy_spec('/etc/tigervnc/')
|
||||||
|
+
|
||||||
|
+ # service names are 'vncserver@$port' where $port is :1,, :2, etc...
|
||||||
|
+ # however they are not reported via list-unit-files, only list-units
|
||||||
|
+ vncs = self.exec_cmd(
|
||||||
|
+ 'systemctl list-units --type=service --no-legend vncserver*'
|
||||||
|
+ )
|
||||||
|
+ if vncs['status'] == 0:
|
||||||
|
+ for serv in vncs['output'].splitlines():
|
||||||
|
+ vnc = serv.split()
|
||||||
|
+ if not vnc:
|
||||||
|
+ continue
|
||||||
|
+ self.add_service_status(vnc[0])
|
||||||
|
+ self.add_journal(vnc[0])
|
||||||
|
|
||||||
|
self.add_cmd_output('vncserver -list')
|
||||||
|
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
230
SOURCES/sos-bz2065805-collect-pacemaker-cluster.patch
Normal file
230
SOURCES/sos-bz2065805-collect-pacemaker-cluster.patch
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
From 3b84b4ccfa9e4924a5a3829d3810568dfb69bf63 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Fri, 18 Mar 2022 16:25:35 -0400
|
||||||
|
Subject: [PATCH 1/2] [pacemaker] Redesign node enumeration logic
|
||||||
|
|
||||||
|
It has been found that `pcs status` output is liable to change, which
|
||||||
|
ends up breaking our parsing of node lists when using it on newer
|
||||||
|
versions.
|
||||||
|
|
||||||
|
Instead, first try to parse through `crm_mon` output, which is what `pcs
|
||||||
|
status` uses under the hood, but as a stable and reliable xml format.
|
||||||
|
|
||||||
|
Failing that, for example if the `--primary` node is not functioning as
|
||||||
|
part of the cluster, source `/etc/corosync/corosync.conf` instead.
|
||||||
|
|
||||||
|
Related: RHBZ2065805
|
||||||
|
Related: RHBZ2065811
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/collector/clusters/pacemaker.py | 110 +++++++++++++++++++---------
|
||||||
|
1 file changed, 76 insertions(+), 34 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
|
||||||
|
index 55024314..49d0ce51 100644
|
||||||
|
--- a/sos/collector/clusters/pacemaker.py
|
||||||
|
+++ b/sos/collector/clusters/pacemaker.py
|
||||||
|
@@ -8,7 +8,11 @@
|
||||||
|
#
|
||||||
|
# See the LICENSE file in the source distribution for further information.
|
||||||
|
|
||||||
|
+import re
|
||||||
|
+
|
||||||
|
from sos.collector.clusters import Cluster
|
||||||
|
+from setuptools._vendor.packaging import version
|
||||||
|
+from xml.etree import ElementTree
|
||||||
|
|
||||||
|
|
||||||
|
class pacemaker(Cluster):
|
||||||
|
@@ -18,42 +22,80 @@ class pacemaker(Cluster):
|
||||||
|
packages = ('pacemaker',)
|
||||||
|
option_list = [
|
||||||
|
('online', True, 'Collect nodes listed as online'),
|
||||||
|
- ('offline', True, 'Collect nodes listed as offline')
|
||||||
|
+ ('offline', True, 'Collect nodes listed as offline'),
|
||||||
|
+ ('only-corosync', False, 'Only use corosync.conf to enumerate nodes')
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_nodes(self):
|
||||||
|
- self.res = self.exec_primary_cmd('pcs status')
|
||||||
|
- if self.res['status'] != 0:
|
||||||
|
- self.log_error('Cluster status could not be determined. Is the '
|
||||||
|
- 'cluster running on this node?')
|
||||||
|
- return []
|
||||||
|
- if 'node names do not match' in self.res['output']:
|
||||||
|
- self.log_warn('Warning: node name mismatch reported. Attempts to '
|
||||||
|
- 'connect to some nodes may fail.\n')
|
||||||
|
- return self.parse_pcs_output()
|
||||||
|
-
|
||||||
|
- def parse_pcs_output(self):
|
||||||
|
- nodes = []
|
||||||
|
- if self.get_option('online'):
|
||||||
|
- nodes += self.get_online_nodes()
|
||||||
|
- if self.get_option('offline'):
|
||||||
|
- nodes += self.get_offline_nodes()
|
||||||
|
- return nodes
|
||||||
|
-
|
||||||
|
- def get_online_nodes(self):
|
||||||
|
- for line in self.res['output'].splitlines():
|
||||||
|
- if line.startswith('Online:'):
|
||||||
|
- nodes = line.split('[')[1].split(']')[0]
|
||||||
|
- return [n for n in nodes.split(' ') if n]
|
||||||
|
-
|
||||||
|
- def get_offline_nodes(self):
|
||||||
|
- offline = []
|
||||||
|
- for line in self.res['output'].splitlines():
|
||||||
|
- if line.startswith('Node') and line.endswith('(offline)'):
|
||||||
|
- offline.append(line.split()[1].replace(':', ''))
|
||||||
|
- if line.startswith('OFFLINE:'):
|
||||||
|
- nodes = line.split('[')[1].split(']')[0]
|
||||||
|
- offline.extend([n for n in nodes.split(' ') if n])
|
||||||
|
- return offline
|
||||||
|
+ self.nodes = []
|
||||||
|
+ # try crm_mon first
|
||||||
|
+ try:
|
||||||
|
+ if not self.get_option('only-corosync'):
|
||||||
|
+ try:
|
||||||
|
+ self.get_nodes_from_crm()
|
||||||
|
+ except Exception as err:
|
||||||
|
+ self.log_warn("Falling back to sourcing corosync.conf. "
|
||||||
|
+ "Could not parse crm_mon output: %s" % err)
|
||||||
|
+ if not self.nodes:
|
||||||
|
+ # fallback to corosync.conf, in case the node we're inspecting
|
||||||
|
+ # is offline from the cluster
|
||||||
|
+ self.get_nodes_from_corosync()
|
||||||
|
+ except Exception as err:
|
||||||
|
+ self.log_error("Could not determine nodes from cluster: %s" % err)
|
||||||
|
+
|
||||||
|
+ _shorts = [n for n in self.nodes if '.' not in n]
|
||||||
|
+ if _shorts:
|
||||||
|
+ self.log_warn(
|
||||||
|
+ "WARNING: Node addresses '%s' may not resolve locally if you "
|
||||||
|
+ "are not running on a node in the cluster. Try using option "
|
||||||
|
+ "'-c pacemaker.only-corosync' if these connections fail."
|
||||||
|
+ % ','.join(_shorts)
|
||||||
|
+ )
|
||||||
|
+ return self.nodes
|
||||||
|
+
|
||||||
|
+ def get_nodes_from_crm(self):
|
||||||
|
+ """
|
||||||
|
+ Try to parse crm_mon output for node list and status.
|
||||||
|
+ """
|
||||||
|
+ xmlopt = '--output-as=xml'
|
||||||
|
+ # older pacemaker had a different option for xml output
|
||||||
|
+ _ver = self.exec_primary_cmd('crm_mon --version')
|
||||||
|
+ if _ver['status'] == 0:
|
||||||
|
+ cver = _ver['output'].split()[1].split('-')[0]
|
||||||
|
+ if not version.parse(cver) > version.parse('2.0.3'):
|
||||||
|
+ xmlopt = '--as-xml'
|
||||||
|
+ else:
|
||||||
|
+ return
|
||||||
|
+ _out = self.exec_primary_cmd(
|
||||||
|
+ "crm_mon --one-shot --inactive %s" % xmlopt,
|
||||||
|
+ need_root=True
|
||||||
|
+ )
|
||||||
|
+ if _out['status'] == 0:
|
||||||
|
+ self.parse_crm_xml(_out['output'])
|
||||||
|
+
|
||||||
|
+ def parse_crm_xml(self, xmlstring):
|
||||||
|
+ """
|
||||||
|
+ Parse the xml output string provided by crm_mon
|
||||||
|
+ """
|
||||||
|
+ _xml = ElementTree.fromstring(xmlstring)
|
||||||
|
+ nodes = _xml.find('nodes')
|
||||||
|
+ for node in nodes:
|
||||||
|
+ _node = node.attrib
|
||||||
|
+ if self.get_option('online') and _node['online'] == 'true':
|
||||||
|
+ self.nodes.append(_node['name'])
|
||||||
|
+ elif self.get_option('offline') and _node['online'] == 'false':
|
||||||
|
+ self.nodes.append(_node['name'])
|
||||||
|
+
|
||||||
|
+ def get_nodes_from_corosync(self):
|
||||||
|
+ """
|
||||||
|
+ As a fallback measure, read corosync.conf to get the node list. Note
|
||||||
|
+ that this prevents us from separating online nodes from offline nodes.
|
||||||
|
+ """
|
||||||
|
+ self.log_warn("WARNING: unable to distinguish online nodes from "
|
||||||
|
+ "offline nodes when sourcing from corosync.conf")
|
||||||
|
+ cc = self.primary.read_file('/etc/corosync/corosync.conf')
|
||||||
|
+ nodes = re.findall(r'((\sring0_addr:)(.*))', cc)
|
||||||
|
+ for node in nodes:
|
||||||
|
+ self.nodes.append(node[-1].strip())
|
||||||
|
|
||||||
|
# vim: set et ts=4 sw=4 :
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
||||||
|
|
||||||
|
From 6701a7d77ecc998b018b54ecc00f9fd102ae9518 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Mon, 21 Mar 2022 12:05:59 -0400
|
||||||
|
Subject: [PATCH 2/2] [clusters] Allow clusters to not add localhost to node
|
||||||
|
list
|
||||||
|
|
||||||
|
For most of our supported clusters, we end up needing to add the
|
||||||
|
local host executing `sos collect` to the node list (unless `--no-local`
|
||||||
|
is used) as that accounts for the primary node that may otherwise be
|
||||||
|
left off. However, this is not helpful for clusters that may reports
|
||||||
|
node names as something other than resolveable names. In those cases,
|
||||||
|
such as with pacemaker, adding the local hostname may result in
|
||||||
|
duplicate collections.
|
||||||
|
|
||||||
|
Add a toggle to cluster profiles via a new `strict_node_list` class attr
|
||||||
|
that, if True, will skip this addition. This toggle is default `False`
|
||||||
|
to preserve existing behavior, and is now enabled for `pacemaker`
|
||||||
|
specifically.
|
||||||
|
|
||||||
|
Related: RHBZ#2065821
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/collector/__init__.py | 3 ++-
|
||||||
|
sos/collector/clusters/__init__.py | 4 ++++
|
||||||
|
sos/collector/clusters/pacemaker.py | 1 +
|
||||||
|
3 files changed, 7 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
|
||||||
|
index a8bb0064..d898ca34 100644
|
||||||
|
--- a/sos/collector/__init__.py
|
||||||
|
+++ b/sos/collector/__init__.py
|
||||||
|
@@ -1073,7 +1073,8 @@ class SoSCollector(SoSComponent):
|
||||||
|
for node in self.node_list:
|
||||||
|
if host == node.split('.')[0]:
|
||||||
|
self.node_list.remove(node)
|
||||||
|
- self.node_list.append(self.hostname)
|
||||||
|
+ if not self.cluster.strict_node_list:
|
||||||
|
+ self.node_list.append(self.hostname)
|
||||||
|
self.reduce_node_list()
|
||||||
|
try:
|
||||||
|
_node_max = len(max(self.node_list, key=len))
|
||||||
|
diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
|
||||||
|
index f3f550ad..f00677b8 100644
|
||||||
|
--- a/sos/collector/clusters/__init__.py
|
||||||
|
+++ b/sos/collector/clusters/__init__.py
|
||||||
|
@@ -57,6 +57,10 @@ class Cluster():
|
||||||
|
sos_plugin_options = {}
|
||||||
|
sos_preset = ''
|
||||||
|
cluster_name = None
|
||||||
|
+ # set this to True if the local host running collect should *not* be
|
||||||
|
+ # forcibly added to the node list. This can be helpful in situations where
|
||||||
|
+ # the host's fqdn and the name the cluster uses are different
|
||||||
|
+ strict_node_list = False
|
||||||
|
|
||||||
|
def __init__(self, commons):
|
||||||
|
self.primary = None
|
||||||
|
diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
|
||||||
|
index 49d0ce51..bebcb265 100644
|
||||||
|
--- a/sos/collector/clusters/pacemaker.py
|
||||||
|
+++ b/sos/collector/clusters/pacemaker.py
|
||||||
|
@@ -20,6 +20,7 @@ class pacemaker(Cluster):
|
||||||
|
cluster_name = 'Pacemaker High Availability Cluster Manager'
|
||||||
|
sos_plugins = ['pacemaker']
|
||||||
|
packages = ('pacemaker',)
|
||||||
|
+ strict_node_list = True
|
||||||
|
option_list = [
|
||||||
|
('online', True, 'Collect nodes listed as online'),
|
||||||
|
('offline', True, 'Collect nodes listed as offline'),
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
@ -35,5 +35,5 @@ index cc5cb65b..336b4d22 100644
|
|||||||
_timeout = own_timeout
|
_timeout = own_timeout
|
||||||
else:
|
else:
|
||||||
--
|
--
|
||||||
2.34.1
|
2.34.3
|
||||||
|
|
68
SOURCES/sos-bz2079484-list-plugins-ignore-options.patch
Normal file
68
SOURCES/sos-bz2079484-list-plugins-ignore-options.patch
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
From f3dc8cd574614572d441f76c02453fd85d0c57e2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Wed, 27 Apr 2022 10:40:55 -0400
|
||||||
|
Subject: [PATCH] [report] --list-plugins should report used, not default,
|
||||||
|
option values
|
||||||
|
|
||||||
|
When using `--list-plugins`, sos should report the values that will be
|
||||||
|
used in a given command, or with a given config file, not what the
|
||||||
|
default values are.
|
||||||
|
|
||||||
|
By reporting the set value, users can be sure their configuration or
|
||||||
|
commandline settings are being honored correctly before executing a
|
||||||
|
report collection.
|
||||||
|
|
||||||
|
Closes: #2921
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/report/__init__.py | 22 +++++++++++++++-------
|
||||||
|
1 file changed, 15 insertions(+), 7 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/sos/report/__init__.py b/sos/report/__init__.py
|
||||||
|
index 74c7973a..8735c903 100644
|
||||||
|
--- a/sos/report/__init__.py
|
||||||
|
+++ b/sos/report/__init__.py
|
||||||
|
@@ -868,24 +868,32 @@ class SoSReport(SoSComponent):
|
||||||
|
_defaults = self.loaded_plugins[0][1].get_default_plugin_opts()
|
||||||
|
for _opt in _defaults:
|
||||||
|
opt = _defaults[_opt]
|
||||||
|
- val = opt.default
|
||||||
|
- if opt.default == -1:
|
||||||
|
- val = TIMEOUT_DEFAULT
|
||||||
|
+ val = opt.value
|
||||||
|
+ if opt.value == -1:
|
||||||
|
+ if _opt == 'timeout':
|
||||||
|
+ val = self.opts.plugin_timeout or TIMEOUT_DEFAULT
|
||||||
|
+ elif _opt == 'cmd-timeout':
|
||||||
|
+ val = self.opts.cmd_timeout or TIMEOUT_DEFAULT
|
||||||
|
+ else:
|
||||||
|
+ val = TIMEOUT_DEFAULT
|
||||||
|
+ if opt.name == 'postproc':
|
||||||
|
+ val = not self.opts.no_postproc
|
||||||
|
self.ui_log.info(" %-25s %-15s %s" % (opt.name, val, opt.desc))
|
||||||
|
self.ui_log.info("")
|
||||||
|
|
||||||
|
self.ui_log.info(_("The following plugin options are available:"))
|
||||||
|
for opt in self.all_options:
|
||||||
|
if opt.name in ('timeout', 'postproc', 'cmd-timeout'):
|
||||||
|
- continue
|
||||||
|
+ if opt.value == opt.default:
|
||||||
|
+ continue
|
||||||
|
# format option value based on its type (int or bool)
|
||||||
|
- if isinstance(opt.default, bool):
|
||||||
|
- if opt.default is True:
|
||||||
|
+ if isinstance(opt.value, bool):
|
||||||
|
+ if opt.value is True:
|
||||||
|
tmpopt = "on"
|
||||||
|
else:
|
||||||
|
tmpopt = "off"
|
||||||
|
else:
|
||||||
|
- tmpopt = opt.default
|
||||||
|
+ tmpopt = opt.value
|
||||||
|
|
||||||
|
if tmpopt is None:
|
||||||
|
tmpopt = 0
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
34
SOURCES/sos-bz2079485-plugopts-valtype-str.patch
Normal file
34
SOURCES/sos-bz2079485-plugopts-valtype-str.patch
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
From 9b10abcdd4aaa41e2549438d5bc52ece86dcb21f Mon Sep 17 00:00:00 2001
|
||||||
|
From: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
Date: Sat, 7 May 2022 14:23:04 +0200
|
||||||
|
Subject: [PATCH] [plugins] Allow 'str' PlugOpt type to accept any value
|
||||||
|
|
||||||
|
For PlugOpt type 'str', we should allow any content including e.g.
|
||||||
|
numbers, and interpret it as a string.
|
||||||
|
|
||||||
|
Resolves: #2922
|
||||||
|
Closes: #2935
|
||||||
|
|
||||||
|
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
---
|
||||||
|
sos/report/plugins/__init__.py | 4 ++++
|
||||||
|
1 file changed, 4 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
||||||
|
index d6be42b9..2a42e6b0 100644
|
||||||
|
--- a/sos/report/plugins/__init__.py
|
||||||
|
+++ b/sos/report/plugins/__init__.py
|
||||||
|
@@ -452,6 +452,10 @@ class PluginOpt():
|
||||||
|
return self.__str__()
|
||||||
|
|
||||||
|
def set_value(self, val):
|
||||||
|
+ # 'str' type accepts any value, incl. numbers
|
||||||
|
+ if type('') in self.val_type:
|
||||||
|
+ self.value = str(val)
|
||||||
|
+ return
|
||||||
|
if not any([type(val) == _t for _t in self.val_type]):
|
||||||
|
valid = []
|
||||||
|
for t in self.val_type:
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
31
SOURCES/sos-bz2079486-timeouted-exec-cmd-exception.patch
Normal file
31
SOURCES/sos-bz2079486-timeouted-exec-cmd-exception.patch
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
From 5e27b92a8a9f066af4c41ddd0bedc7c69187ff52 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
Date: Mon, 2 May 2022 22:13:34 +0200
|
||||||
|
Subject: [PATCH] [utilities] Close file only when storing to file
|
||||||
|
|
||||||
|
Call _output.close() only when to_file=true.
|
||||||
|
|
||||||
|
Closes: #2925
|
||||||
|
|
||||||
|
Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
|
||||||
|
---
|
||||||
|
sos/utilities.py | 3 ++-
|
||||||
|
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/sos/utilities.py b/sos/utilities.py
|
||||||
|
index d2f73d86..1075d1d4 100644
|
||||||
|
--- a/sos/utilities.py
|
||||||
|
+++ b/sos/utilities.py
|
||||||
|
@@ -212,7 +212,8 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False,
|
||||||
|
p.wait(timeout if timeout else None)
|
||||||
|
except Exception:
|
||||||
|
p.terminate()
|
||||||
|
- _output.close()
|
||||||
|
+ if to_file:
|
||||||
|
+ _output.close()
|
||||||
|
# until we separate timeouts from the `timeout` command
|
||||||
|
# handle per-cmd timeouts via Plugin status checks
|
||||||
|
return {'status': 124, 'output': reader.get_contents(),
|
||||||
|
--
|
||||||
|
2.34.3
|
||||||
|
|
@ -69,5 +69,5 @@ index 55082d07..4cae1ecc 100644
|
|||||||
|
|
||||||
def fmt_container_cmd(self, container, cmd, quotecmd):
|
def fmt_container_cmd(self, container, cmd, quotecmd):
|
||||||
--
|
--
|
||||||
2.27.0
|
2.34.3
|
||||||
|
|
@ -17,10 +17,10 @@ Signed-off-by: Juan Orti <jortialc@redhat.com>
|
|||||||
1 file changed, 3 insertions(+), 1 deletion(-)
|
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/vdsm.py b/sos/report/plugins/vdsm.py
|
diff --git a/sos/report/plugins/vdsm.py b/sos/report/plugins/vdsm.py
|
||||||
index ee5befbb..146d223c 100644
|
index ee5befbb1..146d223c2 100644
|
||||||
--- a/sos/report/plugins/vdsm.py
|
--- a/sos/report/plugins/vdsm.py
|
||||||
+++ b/sos/report/plugins/vdsm.py
|
+++ b/sos/report/plugins/vdsm.py
|
||||||
@@ -29,7 +29,8 @@ import re
|
@@ -29,7 +29,8 @@
|
||||||
# use_lvmetad is set to 0 in order not to show cached, old lvm metadata.
|
# use_lvmetad is set to 0 in order not to show cached, old lvm metadata.
|
||||||
# use_lvmetad=0
|
# use_lvmetad=0
|
||||||
#
|
#
|
||||||
@ -30,7 +30,7 @@ index ee5befbb..146d223c 100644
|
|||||||
# preferred_names=[ '^/dev/mapper/' ]
|
# preferred_names=[ '^/dev/mapper/' ]
|
||||||
# filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ]
|
# filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ]
|
||||||
LVM_CONFIG = """
|
LVM_CONFIG = """
|
||||||
@@ -43,6 +44,7 @@ devices {
|
@@ -43,6 +44,7 @@
|
||||||
ignore_suspended_devices=1
|
ignore_suspended_devices=1
|
||||||
write_cache_state=0
|
write_cache_state=0
|
||||||
disable_after_error_count=3
|
disable_after_error_count=3
|
||||||
@ -38,6 +38,3 @@ index ee5befbb..146d223c 100644
|
|||||||
filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"]
|
filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"]
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
--
|
|
||||||
2.27.0
|
|
||||||
|
|
@ -62,5 +62,5 @@ index 09647bf1..3b1bb29b 100644
|
|||||||
|
|
||||||
# aaa profiles contain passwords
|
# aaa profiles contain passwords
|
||||||
--
|
--
|
||||||
2.27.0
|
2.34.3
|
||||||
|
|
@ -32,7 +32,7 @@ Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
|||||||
4 files changed, 13 insertions(+), 17 deletions(-)
|
4 files changed, 13 insertions(+), 17 deletions(-)
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
|
||||||
index 2a42e6b0..ba1397a8 100644
|
index 2a42e6b0a..ba1397a8a 100644
|
||||||
--- a/sos/report/plugins/__init__.py
|
--- a/sos/report/plugins/__init__.py
|
||||||
+++ b/sos/report/plugins/__init__.py
|
+++ b/sos/report/plugins/__init__.py
|
||||||
@@ -46,11 +46,6 @@ def _mangle_command(command, name_max):
|
@@ -46,11 +46,6 @@ def _mangle_command(command, name_max):
|
||||||
@ -47,7 +47,7 @@ index 2a42e6b0..ba1397a8 100644
|
|||||||
def _node_type(st):
|
def _node_type(st):
|
||||||
""" return a string indicating the type of special node represented by
|
""" return a string indicating the type of special node represented by
|
||||||
the stat buffer st (block, character, fifo, socket).
|
the stat buffer st (block, character, fifo, socket).
|
||||||
@@ -1407,7 +1402,9 @@ class Plugin():
|
@@ -1407,7 +1402,9 @@ def _get_dest_for_srcpath(self, srcpath):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _is_forbidden_path(self, path):
|
def _is_forbidden_path(self, path):
|
||||||
@ -58,7 +58,7 @@ index 2a42e6b0..ba1397a8 100644
|
|||||||
|
|
||||||
def _is_policy_forbidden_path(self, path):
|
def _is_policy_forbidden_path(self, path):
|
||||||
return any([
|
return any([
|
||||||
@@ -1495,14 +1492,12 @@ class Plugin():
|
@@ -1495,14 +1492,12 @@ def _do_copy_path(self, srcpath, dest=None):
|
||||||
'symlink': "no"
|
'symlink': "no"
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ index 2a42e6b0..ba1397a8 100644
|
|||||||
"""
|
"""
|
||||||
if isinstance(forbidden, str):
|
if isinstance(forbidden, str):
|
||||||
forbidden = [forbidden]
|
forbidden = [forbidden]
|
||||||
@@ -1244,8 +1244,11 @@
|
@@ -1512,8 +1507,11 @@ def add_forbidden_path(self, forbidden, recursive=False):
|
||||||
|
|
||||||
for forbid in forbidden:
|
for forbid in forbidden:
|
||||||
self._log_info("adding forbidden path '%s'" % forbid)
|
self._log_info("adding forbidden path '%s'" % forbid)
|
||||||
@ -86,27 +86,13 @@ index 2a42e6b0..ba1397a8 100644
|
|||||||
+ forbid = fnmatch.translate(forbid)
|
+ forbid = fnmatch.translate(forbid)
|
||||||
+ self.forbidden_paths.append(forbid)
|
+ self.forbidden_paths.append(forbid)
|
||||||
|
|
||||||
def get_all_options(self):
|
def set_option(self, optionname, value):
|
||||||
"""return a list of all options selected"""
|
"""Set the named option to value. Ensure the original type of the
|
||||||
diff --git a/sos/report/plugins/cgroups.py b/sos/report/plugins/cgroups.py
|
|
||||||
index 6e2a6918..20d299cf 100644
|
|
||||||
--- a/sos/report/plugins/cgroups.py
|
|
||||||
+++ b/sos/report/plugins/cgroups.py
|
|
||||||
@@ -30,6 +30,9 @@
|
|
||||||
])
|
|
||||||
|
|
||||||
self.add_cmd_output("systemd-cgls")
|
|
||||||
+ self.add_forbidden_path(
|
|
||||||
+ "/sys/fs/cgroup/memory/**/memory.kmem.slabinfo"
|
|
||||||
+ )
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py
|
diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py
|
||||||
index 6c4237ca..f6bc194c 100644
|
index 6c4237cae..f6bc194c7 100644
|
||||||
--- a/sos/report/plugins/pulpcore.py
|
--- a/sos/report/plugins/pulpcore.py
|
||||||
+++ b/sos/report/plugins/pulpcore.py
|
+++ b/sos/report/plugins/pulpcore.py
|
||||||
@@ -89,7 +89,7 @@
|
@@ -89,7 +89,7 @@ class PulpCore(Plugin, IndependentPlugin
|
||||||
"/etc/pki/pulp/*"
|
"/etc/pki/pulp/*"
|
||||||
])
|
])
|
||||||
# skip collecting certificate keys
|
# skip collecting certificate keys
|
||||||
@ -116,10 +102,10 @@ index 6c4237ca..f6bc194c 100644
|
|||||||
self.add_cmd_output("rq info -u redis://localhost:6379/8",
|
self.add_cmd_output("rq info -u redis://localhost:6379/8",
|
||||||
env={"LC_ALL": "en_US.UTF-8"},
|
env={"LC_ALL": "en_US.UTF-8"},
|
||||||
diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py
|
diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py
|
||||||
index add02461..8063fd51 100644
|
index add024613..8063fd51c 100644
|
||||||
--- a/sos/report/plugins/rhui.py
|
--- a/sos/report/plugins/rhui.py
|
||||||
+++ b/sos/report/plugins/rhui.py
|
+++ b/sos/report/plugins/rhui.py
|
||||||
@@ -30,7 +30,7 @@ class Rhui(Plugin, RedHatPlugin):
|
@@ -30,7 +30,7 @@ def setup(self):
|
||||||
"/var/log/rhui/*",
|
"/var/log/rhui/*",
|
||||||
])
|
])
|
||||||
# skip collecting certificate keys
|
# skip collecting certificate keys
|
||||||
@ -128,6 +114,3 @@ index add02461..8063fd51 100644
|
|||||||
|
|
||||||
# call rhui-manager commands with 1m timeout and
|
# call rhui-manager commands with 1m timeout and
|
||||||
# with an env. variable ensuring that "RHUI Username:"
|
# with an env. variable ensuring that "RHUI Username:"
|
||||||
--
|
|
||||||
2.27.0
|
|
||||||
|
|
62
SOURCES/sos-bz2120617-ocp-add-labels-to-namespace.patch
Normal file
62
SOURCES/sos-bz2120617-ocp-add-labels-to-namespace.patch
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
From 765f5f283bdb4747b0069f2f5d3381134b4b9a95 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
Date: Thu, 15 Sep 2022 12:36:42 -0400
|
||||||
|
Subject: [PATCH] [ocp] Add newly required labels to temp OCP namespace
|
||||||
|
|
||||||
|
Newer OCP versions have a more restrictive default deployment
|
||||||
|
configuration. As such, add the required labels to the temporary
|
||||||
|
namespace/project we use for collections.
|
||||||
|
|
||||||
|
Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
|
||||||
|
---
|
||||||
|
sos/collector/clusters/ocp.py | 23 ++++++++++++++++++++++-
|
||||||
|
1 file changed, 22 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
|
||||||
|
index 06301536f..92c4e04a2 100644
|
||||||
|
--- a/sos/collector/clusters/ocp.py
|
||||||
|
+++ b/sos/collector/clusters/ocp.py
|
||||||
|
@@ -114,12 +114,32 @@ class ocp(Cluster):
|
||||||
|
self.log_info("Creating new temporary project '%s'" % self.project)
|
||||||
|
ret = self.exec_primary_cmd("oc new-project %s" % self.project)
|
||||||
|
if ret['status'] == 0:
|
||||||
|
+ self._label_sos_project()
|
||||||
|
return True
|
||||||
|
|
||||||
|
self.log_debug("Failed to create project: %s" % ret['output'])
|
||||||
|
raise Exception("Failed to create temporary project for collection. "
|
||||||
|
"\nAborting...")
|
||||||
|
|
||||||
|
+ def _label_sos_project(self):
|
||||||
|
+ """Add pertinent labels to the temporary project we've created so that
|
||||||
|
+ our privileged containers can properly run.
|
||||||
|
+ """
|
||||||
|
+ labels = [
|
||||||
|
+ "security.openshift.io/scc.podSecurityLabelSync=false",
|
||||||
|
+ "pod-security.kubernetes.io/enforce=privileged"
|
||||||
|
+ ]
|
||||||
|
+ for label in labels:
|
||||||
|
+ ret = self.exec_primary_cmd(
|
||||||
|
+ self.fmt_oc_cmd(
|
||||||
|
+ f"label namespace {self.project} {label} --overwrite"
|
||||||
|
+ )
|
||||||
|
+ )
|
||||||
|
+ if not ret['status'] == 0:
|
||||||
|
+ raise Exception(
|
||||||
|
+ f"Error applying namespace labels: {ret['output']}"
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
def cleanup(self):
|
||||||
|
"""Remove the project we created to execute within
|
||||||
|
"""
|
||||||
|
@@ -231,8 +251,9 @@ def get_nodes(self):
|
||||||
|
for node_name, node in self.node_dict.items():
|
||||||
|
if roles:
|
||||||
|
for role in roles:
|
||||||
|
- if role == node['roles']:
|
||||||
|
+ if role in node['roles']:
|
||||||
|
nodes.append(node_name)
|
||||||
|
+ break
|
||||||
|
else:
|
||||||
|
nodes.append(node_name)
|
||||||
|
else:
|
@ -1,30 +0,0 @@
|
|||||||
From f827192424f2a4b9b390816c10b08dff658e0d74 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Rodolfo Olivieri <rolivier@redhat.com>
|
|
||||||
Date: Mon, 25 Oct 2021 09:04:06 -0300
|
|
||||||
Subject: [PATCH] [convert2rhel] Add archived log collection
|
|
||||||
|
|
||||||
Convert2RHEL will now archive old logs to maintain the sake of simplicity, and for that,
|
|
||||||
we are including the archive directory to be collected as well.
|
|
||||||
|
|
||||||
Signed-off-by: Rodolfo Olivieri <rolivier@redhat.com>
|
|
||||||
---
|
|
||||||
sos/report/plugins/convert2rhel.py | 3 ++-
|
|
||||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/sos/report/plugins/convert2rhel.py b/sos/report/plugins/convert2rhel.py
|
|
||||||
index 74d6d40e..a786f3c2 100644
|
|
||||||
--- a/sos/report/plugins/convert2rhel.py
|
|
||||||
+++ b/sos/report/plugins/convert2rhel.py
|
|
||||||
@@ -21,7 +21,8 @@ class convert2rhel(Plugin, RedHatPlugin):
|
|
||||||
|
|
||||||
self.add_copy_spec([
|
|
||||||
"/var/log/convert2rhel/convert2rhel.log",
|
|
||||||
- "/var/log/convert2rhel/rpm_va.log"
|
|
||||||
+ "/var/log/convert2rhel/archive/convert2rhel-*.log",
|
|
||||||
+ "/var/log/convert2rhel/rpm_va.log",
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
2.31.1
|
|
||||||
|
|
154
SPECS/sos.spec
154
SPECS/sos.spec
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
Summary: A set of tools to gather troubleshooting information from a system
|
Summary: A set of tools to gather troubleshooting information from a system
|
||||||
Name: sos
|
Name: sos
|
||||||
Version: 4.2
|
Version: 4.3
|
||||||
Release: 22%{?dist}
|
Release: 5%{?dist}
|
||||||
Group: Applications/System
|
Group: Applications/System
|
||||||
Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz
|
Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz
|
||||||
Source1: sos-audit-%{auditversion}.tgz
|
Source1: sos-audit-%{auditversion}.tgz
|
||||||
@ -21,35 +21,20 @@ Conflicts: vdsm < 4.40
|
|||||||
Obsoletes: sos-collector
|
Obsoletes: sos-collector
|
||||||
Recommends: python3-pexpect
|
Recommends: python3-pexpect
|
||||||
Recommends: python3-requests
|
Recommends: python3-requests
|
||||||
Patch1: sos-bz2011413-cpuX-individual-sizelimits.patch
|
Patch1: sos-bz2055002-rebase-sos-add-sos-help.patch
|
||||||
Patch2: sos-bz1998521-unpackaged-recursive-symlink.patch
|
Patch2: sos-bz2095263-ovirt-answer-files-passwords.patch
|
||||||
Patch3: sos-bz1998433-opacapture-under-allow-system-changes.patch
|
Patch3: sos-bz2079485-plugopts-valtype-str.patch
|
||||||
Patch4: sos-bz2002145-kernel-psi.patch
|
Patch4: sos-bz2062908-tigervnc-update-collections.patch
|
||||||
Patch5: sos-bz2001096-iptables-save-under-nf_tables-kmod.patch
|
Patch5: sos-bz2065805-collect-pacemaker-cluster.patch
|
||||||
Patch6: sos-bz1873185-estimate-only-option.patch
|
Patch6: sos-bz2079187-honor-default-plugin-timeout.patch
|
||||||
Patch7: sos-bz2005195-iptables-based-on-ntf.patch
|
Patch7: sos-bz2079484-list-plugins-ignore-options.patch
|
||||||
Patch8: sos-bz2011506-foreman-puma-status.patch
|
Patch8: sos-bz2079486-timeouted-exec-cmd-exception.patch
|
||||||
Patch9: sos-bz2012856-dryrun-uncaught-exception.patch
|
Patch9: sos-bz2058279-ocp-backports.patch
|
||||||
Patch10: sos-bz2004929-openvswitch-offline-analysis.patch
|
Patch10: sos-bz2092969-openshift-ovn-disabled.patch
|
||||||
Patch11: sos-bz2012857-plugin-timeout-unhandled-exception.patch
|
Patch11: sos-bz2093993-vdsm-set-use-devicesfile-zero.patch
|
||||||
Patch12: sos-bz2018033-plugin-timeouts-proper-handling.patch
|
Patch12: sos-bz2099598-forbidden-path-efficient.patch
|
||||||
Patch13: sos-bz2020777-filter-namespace-per-pattern.patch
|
Patch13: sos-bz2120617-ocp-add-labels-to-namespace.patch
|
||||||
Patch14: sos-bz2023867-cleaner-hostnames-improvements.patch
|
|
||||||
Patch15: sos-bz2025610-RHTS-api-change.patch
|
|
||||||
Patch16: sos-bz2025403-nvidia-GPU-info.patch
|
|
||||||
Patch17: sos-bz2030741-rhui-logs.patch
|
|
||||||
Patch18: sos-bz2036697-ocp-backports.patch
|
|
||||||
Patch19: sos-bz2043102-foreman-tasks-msgpack.patch
|
|
||||||
Patch20: sos-bz2041488-virsh-in-foreground.patch
|
|
||||||
Patch21: sos-bz2042966-ovn-proper-package-enablement.patch
|
|
||||||
Patch22: sos-bz2054882-plugopt-logging-effective-opts.patch
|
|
||||||
Patch23: sos-bz2055547-honour-plugins-timeout-hardcoded.patch
|
|
||||||
Patch24: sos-bz2071825-merged-8.6.z.patch
|
|
||||||
Patch25: sos-bz2098639-ovirt-obfuscation_answer_file.patch
|
|
||||||
Patch26: sos-bz2098643-crio-output-to-json.patch
|
|
||||||
Patch27: sos-bz2121774-vdsm-set-use_devicesfile-0.patch
|
|
||||||
Patch28: sos-bz2125656-plugin-make-forbidden-path-checks-more-efficient.patch
|
|
||||||
Patch29: sos-bz2129105-conver2rhel-add-archived-log-collection.patch
|
|
||||||
|
|
||||||
%description
|
%description
|
||||||
Sos is a set of tools that gathers information about system
|
Sos is a set of tools that gathers information about system
|
||||||
@ -73,22 +58,7 @@ support technicians and developers.
|
|||||||
%patch11 -p1
|
%patch11 -p1
|
||||||
%patch12 -p1
|
%patch12 -p1
|
||||||
%patch13 -p1
|
%patch13 -p1
|
||||||
%patch14 -p1
|
|
||||||
%patch15 -p1
|
|
||||||
%patch16 -p1
|
|
||||||
%patch17 -p1
|
|
||||||
%patch18 -p1
|
|
||||||
%patch19 -p1
|
|
||||||
%patch20 -p1
|
|
||||||
%patch21 -p1
|
|
||||||
%patch22 -p1
|
|
||||||
%patch23 -p1
|
|
||||||
%patch24 -p1
|
|
||||||
%patch25 -p1
|
|
||||||
%patch26 -p1
|
|
||||||
%patch27 -p1
|
|
||||||
%patch28 -p1
|
|
||||||
%patch29 -p1
|
|
||||||
|
|
||||||
%build
|
%build
|
||||||
%py3_build
|
%py3_build
|
||||||
@ -118,6 +88,7 @@ mkdir -p %{buildroot}%{_sysconfdir}/sos/{cleaner,presets.d,extras.d,groups.d}
|
|||||||
%{_mandir}/man1/sos-clean.1.gz
|
%{_mandir}/man1/sos-clean.1.gz
|
||||||
%{_mandir}/man1/sos-collect.1.gz
|
%{_mandir}/man1/sos-collect.1.gz
|
||||||
%{_mandir}/man1/sos-collector.1.gz
|
%{_mandir}/man1/sos-collector.1.gz
|
||||||
|
%{_mandir}/man1/sos-help.1.gz
|
||||||
%{_mandir}/man1/sos-mask.1.gz
|
%{_mandir}/man1/sos-mask.1.gz
|
||||||
%{_mandir}/man1/sos-report.1.gz
|
%{_mandir}/man1/sos-report.1.gz
|
||||||
%{_mandir}/man1/sos.1.gz
|
%{_mandir}/man1/sos.1.gz
|
||||||
@ -155,52 +126,57 @@ of the system. Currently storage and filesystem commands are audited.
|
|||||||
%ghost /etc/audit/rules.d/40-sos-storage.rules
|
%ghost /etc/audit/rules.d/40-sos-storage.rules
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
* Thu Sep 22 2022 Jan Jansky <jjansky@redhat.com> = 4.2-22
|
* Mon Oct 03 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-5
|
||||||
|
- [ovn_central] Rename container responsable of Red Hat
|
||||||
|
Resolves: bz2042966
|
||||||
|
- [PATCH] [host] Skip entire /etc/sos/cleaner directory
|
||||||
|
Resolves: bz2023867
|
||||||
|
|
||||||
|
* Thu Sep 29 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-4
|
||||||
|
- [ocp] Add newly required labels to temp OCP namespace
|
||||||
|
Resolves: bz2120617
|
||||||
|
|
||||||
|
* Mon Aug 29 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-3
|
||||||
- [vdsm] Set LVM option use_devicesfile=0
|
- [vdsm] Set LVM option use_devicesfile=0
|
||||||
Resolves: bz2121774
|
Resolves: bz2093993
|
||||||
- [Plugin] Make forbidden path checks more efficient
|
- [Plugin] Make forbidden path checks more efficient
|
||||||
Resolves: bz2125656
|
Resolves: bz2099598
|
||||||
- [convert2rhel] Add archived log collection
|
|
||||||
Resolves: bz2129105
|
|
||||||
|
|
||||||
* Mon Sep 19 2022 Jan Jansky <jjansky@redhat.com> = 4.2-21
|
* Thu Jun 16 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-2
|
||||||
- [vdsm] Set LVM option use_devicesfile=0
|
- [ovirt] answer files: Filter out all password keys
|
||||||
Resolves: bz2121774
|
Resolves: bz2095263
|
||||||
- [Plugin] Make forbidden path checks more efficient
|
- [plugins] Allow 'str' PlugOpt type to accept any value
|
||||||
Resolves: bz2125656
|
Resolves: bz2079485
|
||||||
|
- [tigervnc] Update collections for newer versions of TigerVNC
|
||||||
|
Resolves: bz2062908
|
||||||
|
- [pacemaker] Redesign node enumeration logic
|
||||||
|
Resolves: bz2065805
|
||||||
|
- crio: switch from parsing output in table format to json
|
||||||
|
Resolves: bz2092969
|
||||||
|
- [report] Honor plugins' hardcoded plugin_timeout
|
||||||
|
Resolves: bz2079187
|
||||||
|
- [report] --list-plugins should report used, not default,
|
||||||
|
Resolves: bz2079484
|
||||||
|
- [utilities] Close file only when storing to file
|
||||||
|
Resolves: bz2079486
|
||||||
|
- [presets] Adjust OCP preset options, more OCP backports
|
||||||
|
Resolves: bz2058279
|
||||||
|
|
||||||
* Fri Jul 22 2022 Jan Jansky <jjansky@redhat.com> = 4.2-20
|
* Mon Apr 04 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-1
|
||||||
- [ovirt] obfuscate answer file
|
- Rebase on upstream 4.3
|
||||||
Resolves: bz2098639
|
Resolves: bz2055002
|
||||||
- [crio] from output to json
|
- [sapnw] Fix IndexError exception
|
||||||
Resolves: bz2098643
|
Resolves: bz1992938
|
||||||
|
- [Plugin, utilities] Allow writing command output directly to disk
|
||||||
* Mon May 09 2022 Jan Jansky <jjansky@redhat.com> = 4.2-19
|
Resolves: bz1726023
|
||||||
- OCP backport
|
- [Ceph] Add support for containerized Ceph setup
|
||||||
Resolves: bz2071824
|
Resolves: bz1882544
|
||||||
- [pacemaker] Update collect cluster profile for pacemaker
|
- [unbound] Add new plugin for Unbound DNS resolver
|
||||||
Resolves: bz2071695
|
Resolves: bz2018228
|
||||||
- [Plugin] oom excessive memory usage
|
- [discovery] Add new discovery plugin
|
||||||
Resolves: bz2071825
|
Resolves: bz2018549
|
||||||
|
- [vdsm] Exclude /var/lib/vdsm/storage/transient_disks
|
||||||
* Fri Apr 22 2022 Jan Jansky <jjansky@redhat.com> = 4.2-18
|
Resolves: bz2029154
|
||||||
- OCP backport
|
|
||||||
Resolves: bz2071824
|
|
||||||
- [pacemaker] Update collect cluster profile for pacemaker
|
|
||||||
Resolves: bz2071695
|
|
||||||
- [Plugin] oom excessive memory usage
|
|
||||||
Resolves: bz2071825
|
|
||||||
|
|
||||||
* Wed Apr 20 2022 Jan Jansky <jjansky@redhat.com> = 4.2-17
|
|
||||||
- increased release version
|
|
||||||
|
|
||||||
* Wed Apr 13 2022 Jan Jansky <jjansky@redhat.com> = 4.2-16
|
|
||||||
- OCP backport
|
|
||||||
Resolves: bz2071824
|
|
||||||
- [pacemaker] Update collect cluster profile for pacemaker
|
|
||||||
Resolves: bz2071695
|
|
||||||
- [Plugin] oom excessive memory usage
|
|
||||||
Resolves: bz2071825
|
|
||||||
|
|
||||||
* Wed Feb 23 2022 Pavel Moravec <pmoravec@redhat.com> = 4.2-15
|
* Wed Feb 23 2022 Pavel Moravec <pmoravec@redhat.com> = 4.2-15
|
||||||
- [sosnode] Handle downstream versioning for runtime option
|
- [sosnode] Handle downstream versioning for runtime option
|
||||||
|
Loading…
Reference in New Issue
Block a user